Exemplo n.º 1
0
 def testInterface(self):
     self.assertRaises(TypeError, varica.mvarica)
     # simply pass in different data shapes and see if the functions runs without error
     varica.mvarica(np.sin(np.arange(30)).reshape((10, 3)),
                    VAR(1))  # 10 samples, 3 channels
     varica.mvarica(np.sin(np.arange(30)).reshape((5, 3, 2)),
                    VAR(1))  # 5 samples, 3 channels, 2 trials
Exemplo n.º 2
0
    def testModelIdentification(self):
        """ generate VAR signals, mix them, and see if MVARICA can reconstruct the signals
            do this for every backend """

        # original model coefficients
        b0 = np.zeros((3, 6))
        b0[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0], [-0.7, 0.0, 0.9, 0.0]]
        m0 = b0.shape[0]
        l, t = 1000, 100

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0))**3

        var = VAR(2)
        var.coef = b0
        sources = var.simulate([l, t], noisefunc)

        # simulate volume conduction... 3 sources measured with 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(sources, mix)

        backend_modules = [import_module('scot.' + b) for b in scot.backends]

        for bm in backend_modules:

            # apply MVARICA
            #  - default setting of 0.99 variance should reduce to 3 channels with this data
            #  - automatically determine delta (enough data, so it should most likely be 0)
            result = varica.mvarica(data,
                                    var,
                                    optimize_var=True,
                                    backend=bm.backend)

            # ICA does not define the ordering and sign of components
            # so wee need to test all combinations to find if one of them fits the original coefficients
            permutations = np.array([[0, 1, 2, 3, 4, 5], [0, 1, 4, 5, 2, 3],
                                     [2, 3, 4, 5, 0, 1], [2, 3, 0, 1, 4, 5],
                                     [4, 5, 0, 1, 2, 3], [4, 5, 2, 3, 0, 1]])
            signperms = np.array([[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, -1, -1],
                                  [1, 1, -1, -1, 1, 1], [1, 1, -1, -1, -1, -1],
                                  [-1, -1, 1, 1, 1, 1], [-1, -1, 1, 1, -1, -1],
                                  [-1, -1, -1, -1, 1, 1],
                                  [-1, -1, -1, -1, -1, -1]])

            best, d = np.inf, None

            for perm in permutations:
                b = result.b.coef[perm[::2] // 2, :]
                b = b[:, perm]
                for sgn in signperms:
                    c = b * np.repeat([sgn], 3, 0) * np.repeat([sgn[::2]], 6,
                                                               0).T
                    err = np.sum((c - b0)**2)
                    if err < best:
                        best = err
                        d = c

            self.assertTrue(np.all(abs(d - b0) < 0.05))
Exemplo n.º 3
0
    def testModelIdentification(self):
        """ generate VAR signals, mix them, and see if MVARICA can reconstruct the signals
            do this for every backend """

        # original model coefficients
        b0 = np.zeros((3, 6))
        b0[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0],
                        [-0.7, 0.0, 0.9, 0.0]]
        m0 = b0.shape[0]
        l, t = 1000, 100

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3

        var = VAR(2)
        var.coef = b0
        sources = var.simulate([l, t], noisefunc)

        # simulate volume conduction... 3 sources measured with 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(sources, mix)

        backend_modules = [import_module('scot.' + b) for b in scot.backends]

        for bm in backend_modules:

            api = scot.Workspace({'model_order': 2}, backend=bm.backend)

            api.set_data(data)

            # apply MVARICA
            #  - default setting of 0.99 variance should reduce to 3 channels with this data
            #  - automatically determine delta (enough data, so it should most likely be 0)
            api.do_mvarica()
            #result = varica.mvarica(data, 2, delta='auto', backend=bm.backend)

            # ICA does not define the ordering and sign of components
            # so wee need to test all combinations to find if one of them fits the original coefficients
            permutations = np.array(
                [[0, 1, 2, 3, 4, 5], [0, 1, 4, 5, 2, 3], [2, 3, 4, 5, 0, 1], [2, 3, 0, 1, 4, 5], [4, 5, 0, 1, 2, 3],
                 [4, 5, 2, 3, 0, 1]])
            signperms = np.array(
                [[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, -1, -1], [1, 1, -1, -1, 1, 1], [1, 1, -1, -1, -1, -1],
                 [-1, -1, 1, 1, 1, 1], [-1, -1, 1, 1, -1, -1], [-1, -1, -1, -1, 1, 1], [-1, -1, -1, -1, -1, -1]])

            best, d = np.inf, None

            for perm in permutations:
                b = api.var_.coef[perm[::2] // 2, :]
                b = b[:, perm]
                for sgn in signperms:
                    c = b * np.repeat([sgn], 3, 0) * np.repeat([sgn[::2]], 6, 0).T
                    err = np.sum((c - b0) ** 2)
                    if err < best:
                        best = err
                        d = c

            self.assertTrue(np.all(abs(d - b0) < 0.05))
Exemplo n.º 4
0
def do_mvar_evaluation(X, morder, whit_max=3., whit_min=1., thr_cons=0.8):
    '''
    Fit MVAR model to data using scot and do some basic checks.

    X: array (trials, channels, samples)
    morder: the model order

    Returns:
    (is_white, consistency, is_stable)
    '''
    print('starting checks and MVAR fitting...')
    # tsdata_to_var from MVGC requires sources x samples x trials
    # X is of shape trials x sources x samples (which is what ScoT uses)

    X_trans = X.transpose(1, 2, 0)

    A, SIG, E = _tsdata_to_var(X_trans, morder)
    del A, SIG

    whi = False
    dw, pval = dw_whiteness(X_trans, E)
    if np.all(dw < whit_max) and np.all(dw > whit_min):
        whi = True
    cons = consistency(X_trans, E)
    del dw, pval, E

    from scot.var import VAR
    mvar = VAR(morder)
    mvar.fit(X)  # scot func which requires shape trials x sources x samples
    is_st = mvar.is_stable()
    if cons < thr_cons or is_st is False or whi is False:
        print('ERROR: Model order not ideal - check parameters !!')

    return str(whi), cons, str(is_st)
Exemplo n.º 5
0
    def test_bisection_invalid(self):
        np.random.seed(42)
        x = np.random.randn(10, 100, 10)

        var = VAR(1)
        var.optimize_delta_bisection(x)

        # totally ugly data, should be unable to find reasonable regularization.
        self.assertEqual(var.delta, 0)
Exemplo n.º 6
0
    def test_bisection_invalid(self):
        np.random.seed(42)
        x = np.random.randn(10, 100, 10)

        var = VAR(1)
        var.optimize_delta_bisection(x)

        # totally ugly data, should be unable to find reasonable regularization.
        self.assertEqual(var.delta, 0)
Exemplo n.º 7
0
    def test_application(self):
        from scot.var import VAR
        from sklearn.cross_validation import LeaveOneOut, KFold
        np.random.seed(42)
        x = np.random.randn(10, 3, 15)

        var = VAR(3, xvschema=lambda n, _: LeaveOneOut(n)).optimize_delta_bisection(x)
        self.assertGreater(var.delta, 0)
        var = VAR(3, xvschema=lambda n, _: KFold(n, 5)).optimize_delta_bisection(x)
        self.assertGreater(var.delta, 0)
Exemplo n.º 8
0
    def testFit(self):
        """ Test submodel fitting on instationary data
        """
        np.random.seed(42)

        # original model coefficients
        b01 = np.array([[0.0, 0], [0, 0]])
        b02 = np.array([[0.5, 0.3], [0.3, 0.5]])
        b03 = np.array([[0.1, 0.1], [0.1, 0.1]])
        t, m, l = 10, 2, 100

        noisefunc = lambda: np.random.normal(size=(1, m))**3 / 1e3

        var = VAR(1)
        var.coef = b01
        sources1 = var.simulate([l, t], noisefunc)
        var.coef = b02
        sources2 = var.simulate([l, t], noisefunc)
        var.coef = b03
        sources3 = var.simulate([l, t * 2], noisefunc)

        sources = np.vstack([sources1, sources2, sources3])
        cl = [1] * t + [2] * t + [1, 2] * t

        var = VAR(1)
        r_trial = varica.cspvarica(sources,
                                   var,
                                   cl,
                                   reducedim=None,
                                   varfit='trial')
        r_class = varica.cspvarica(sources,
                                   var,
                                   cl,
                                   reducedim=None,
                                   varfit='class')
        r_ensemble = varica.cspvarica(sources,
                                      var,
                                      cl,
                                      reducedim=None,
                                      varfit='ensemble')

        vars = [
            np.var(r.var_residuals) for r in [r_trial, r_class, r_ensemble]
        ]

        # class one consists of trials generated with b01 and b03
        # class two consists of trials generated with b02 and b03
        #
        # ensemble fitting cannot resolve any model -> highest residual variance
        # class fitting cannot only resolve (b01+b03) vs (b02+b03) -> medium residual variance
        # trial fitting can resolve all three models -> lowest residual variance
        print(vars)

        self.assertLess(vars[0], vars[1])
        self.assertLess(vars[1], vars[2])
Exemplo n.º 9
0
    def testModelIdentification(self):
        """ generate VAR signals, mix them, and see if MVARICA can reconstruct the signals
            do this for every backend """

        # original model coefficients
        b0 = np.zeros((3, 6))
        b0[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0],
                        [-0.7, 0.0, 0.9, 0.0]]
        m0 = b0.shape[0]
        l, t = 1000, 100

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3 / 1e3

        var = VAR(2)
        var.coef = b0
        sources = var.simulate([l, t], noisefunc)

        # simulate volume conduction... 3 sources measured with 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(np.transpose(mix), sources)

        for backend_name, backend_gen in scot.backend.items():

            # apply MVARICA
            #  - default setting of 0.99 variance should reduce to 3 channels with this data
            #  - automatically determine delta (enough data, so it should most likely be 0)
            result = varica.mvarica(data, var, optimize_var=True, backend=backend_gen())

            # ICA does not define the ordering and sign of components
            # so wee need to test all combinations to find if one of them fits the original coefficients
            permutations = np.array(
                [[0, 1, 2, 3, 4, 5], [0, 1, 4, 5, 2, 3], [2, 3, 4, 5, 0, 1], [2, 3, 0, 1, 4, 5], [4, 5, 0, 1, 2, 3],
                 [4, 5, 2, 3, 0, 1]])
            signperms = np.array(
                [[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, -1, -1], [1, 1, -1, -1, 1, 1], [1, 1, -1, -1, -1, -1],
                 [-1, -1, 1, 1, 1, 1], [-1, -1, 1, 1, -1, -1], [-1, -1, -1, -1, 1, 1], [-1, -1, -1, -1, -1, -1]])

            best, d = np.inf, None

            for perm in permutations:
                b = result.b.coef[perm[::2] // 2, :]
                b = b[:, perm]
                for sgn in signperms:
                    c = b * np.repeat([sgn], 3, 0) * np.repeat([sgn[::2]], 6, 0).T
                    err = np.sum((c - b0) ** 2)
                    if err < best:
                        best = err
                        d = c

            assert_allclose(d, b0, rtol=1e-2, atol=1e-2)
Exemplo n.º 10
0
    def testModelIdentification(self):
        """ generate independent signals, mix them, and see if ICA can reconstruct the mixing matrix
            do this for every backend """

        # original model coefficients
        b0 = np.zeros((3, 3))  # no connectivity
        m0 = b0.shape[0]
        l, t = 100, 100

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0))**3

        var = VAR(1)
        var.coef = b0
        sources = var.simulate([l, t], noisefunc)

        # simulate volume conduction... 3 sources measured with 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(sources, mix)

        backend_modules = [import_module('scot.' + b) for b in scot.backends]

        for bm in backend_modules:

            result = plainica.plainica(data, backend=bm.backend)

            i = result.mixing.dot(result.unmixing)
            self.assertTrue(
                np.allclose(i, np.eye(i.shape[0]), rtol=1e-6, atol=1e-7))

            permutations = [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0],
                            [2, 0, 1], [2, 1, 0]]

            bestdiff = np.inf
            bestmix = None

            absmix = np.abs(result.mixing)
            absmix /= np.max(absmix)

            for p in permutations:
                estmix = absmix[p, :]
                diff = np.sum((np.abs(estmix) - np.abs(mix))**2)

                if diff < bestdiff:
                    bestdiff = diff
                    bestmix = estmix

            self.assertTrue(np.allclose(bestmix, mix, rtol=1e-1, atol=1e-1))
Exemplo n.º 11
0
    def test_bootstrap_difference_and_fdr(self):
        # Generate reference data
        np.random.seed(31415)
        x, var0 = self.generate_data()
        a = cs.bootstrap_connectivity('PDC', x, VAR(2), nfft=4, repeats=100)

        # Similar to reference data ==> no significant differences expected
        np.random.seed(12345)
        x, var0 = self.generate_data()
        b = cs.bootstrap_connectivity('PDC', x, VAR(2), nfft=4, repeats=100)
        p = cs.test_bootstrap_difference(a, b)
        self.assertFalse(np.any(p < 0.01))  # TODO: np.all?
        self.assertFalse(np.any(cs.significance_fdr(p, 0.05)))  # TODO: np.all?

        # Trials rearranged ==> no significant differences expected
        np.random.seed(12345)
        x, var0 = self.generate_data()
        b = cs.bootstrap_connectivity('PDC',
                                      x[::-1, :, :],
                                      VAR(2),
                                      nfft=4,
                                      repeats=100)
        p = cs.test_bootstrap_difference(a, b)
        self.assertFalse(np.any(p < 0.01))
        self.assertFalse(np.any(cs.significance_fdr(p, 0.05)))

        # Channels rearranged ==> highly significant differences expected
        np.random.seed(12345)
        x, var0 = self.generate_data()
        b = cs.bootstrap_connectivity('PDC',
                                      x[1, ::-1, :],
                                      VAR(2),
                                      nfft=4,
                                      repeats=100)
        p = cs.test_bootstrap_difference(a, b)
        self.assertTrue(np.all(p < 0.0001))
        self.assertTrue(np.all(cs.significance_fdr(p, 0.01)))

        # Time reversed ==> highly significant differences expected
        np.random.seed(12345)
        x, var0 = self.generate_data()
        b = cs.bootstrap_connectivity('PDC',
                                      x[1, :, ::-1],
                                      VAR(2),
                                      nfft=4,
                                      repeats=100)
        p = cs.test_bootstrap_difference(a, b)
        self.assertTrue(np.all(p < 0.0001))
        self.assertTrue(np.all(cs.significance_fdr(p, 0.01)))
Exemplo n.º 12
0
    def test_optimize(self):
        np.random.seed(745)
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        l = (100, 10)
        x = var0.simulate(l)

        for n_jobs in [None, -1, 1, 2]:
            var = VAR(-1, n_jobs=n_jobs, verbose=0)

            var.optimize_order(x)
            self.assertEqual(var.p, 2)

            var.optimize_order(x, min_p=1, max_p=1)
            self.assertEqual(var.p, 1)
Exemplo n.º 13
0
    def testModelIdentification(self):
        """ generate independent signals, mix them, and see if ICA can reconstruct the mixing matrix
            do this for every backend """

        # original model coefficients
        b0 = np.zeros((3, 3))    # no connectivity
        m0 = b0.shape[0]
        l, t = 100, 100

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3

        var = VAR(1)
        var.coef = b0
        sources = var.simulate([l, t], noisefunc)

        # simulate volume conduction... 3 sources measured with 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(sources, mix)

        backend_modules = [import_module('scot.' + b) for b in scot.backends]

        for bm in backend_modules:

            result = plainica.plainica(data, backend=bm.backend)

            i = result.mixing.dot(result.unmixing)
            self.assertTrue(np.allclose(i, np.eye(i.shape[0]), rtol=1e-6, atol=1e-7))

            permutations = [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, 1], [2, 1, 0]]

            bestdiff = np.inf
            bestmix = None

            absmix = np.abs(result.mixing)
            absmix /= np.max(absmix)

            for p in permutations:
                estmix = absmix[p, :]
                diff = np.sum((np.abs(estmix) - np.abs(mix)) ** 2)

                if diff < bestdiff:
                    bestdiff = diff
                    bestmix = estmix

            self.assertTrue(np.allclose(bestmix, mix, rtol=1e-1, atol=1e-1))
Exemplo n.º 14
0
def compute_order(X, m_max, verbose=True):
    """
    Estimate VAR order with the Bayesian Information Criterion (BIC).

    Parameters
    ----------
    X : ndarray, shape (trials, n_channels, n_samples)

    m_max : int
        The maximum model order to test

    Reference
    ---------
    [1] provides the equation:BIC(m) = 2*log[det(Σ)]+ 2*(p**2)*m*log(N*n*m)/(N*n*m),
    Σ is the noise covariance matrix, p is the channels, N is the trials, n
    is the n_samples, m is model order.

    [1] Mingzhou Ding, Yonghong Chen. Granger Causality: Basic Theory and Application
    to Neuroscience.Elsevier Science, 7 February 2008.

    URL: https://gist.github.com/dongqunxi/b23d1679b9bffa8e458c11f93bd8d6ff


    Returns
    -------
    o_m : int
        Estimated order
    bic : list
        List with the BICs for the orders from 1 to m_max.
    """
    from scot.var import VAR
    from scipy import linalg

    N, p, n = X.shape
    bic = []
    for m in range(m_max):
        mvar = VAR(m + 1)
        mvar.fit(X)
        sigma = mvar.rescov
        m_bic = np.log(linalg.det(sigma))
        m_bic += (p**2) * (m + 1) * np.log(N * n) / (N * n)
        bic.append(m_bic)
        if verbose:
            print(('model order: %d, BIC value: %.2f' % (m + 1, bic[m])))

    o_m = np.argmin(bic) + 1
    return o_m, bic
Exemplo n.º 15
0
    def test_bisection_overdetermined(self):
        np.random.seed(42)
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        l = (100, 10)
        x = var0.simulate(l)

        var = VAR(2)
        var.optimize_delta_bisection(x)

        # nice data, so the regularization should not be too strong.
        self.assertLess(var.delta, 10)
Exemplo n.º 16
0
    def test_source_selection(self):
        var = VAR(2)
        var.coef = np.random.randn(16, 4)
        x = var.simulate([500, 50], lambda: np.random.randn(16).dot(np.eye(16, 16)))
        api = scot.Workspace({"model_order": 2})
        api.set_data(x)
        self.assertRaises(RuntimeError, api.keep_sources, [0, 5, 11, 12])
        self.assertRaises(RuntimeError, api.remove_sources, [1, 2, 8, 14])

        # keep sources
        api.do_mvarica()
        api.keep_sources([0, 5, 11, 12])
        self.assertEqual(api.mixing_.shape, (4, 16))
        self.assertEqual(api.unmixing_.shape, (16, 4))

        # remove sources
        api.do_mvarica()
        api.remove_sources([1, 2, 8, 14])
        self.assertEqual(api.mixing_.shape, (12, 16))
        self.assertEqual(api.unmixing_.shape, (16, 12))
Exemplo n.º 17
0
    def test_residuals(self):
        l = 100000
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        x = var0.simulate(l)

        var = VAR(2)
        var.fit(x)

        self.assertEqual(x.shape, var.residuals.shape)

        self.assertTrue(
            np.allclose(var.rescov, np.eye(var.rescov.shape[0]), 1e-2, 1e-2))
Exemplo n.º 18
0
    def test_source_selection(self):
        var = VAR(2)
        var.coef = np.random.randn(16, 4)
        x = var.simulate([500, 50],
                         lambda: np.random.randn(16).dot(np.eye(16, 16)))
        api = scot.Workspace({'model_order': 2})
        api.set_data(x)
        self.assertRaises(RuntimeError, api.keep_sources, [0, 5, 11, 12])
        self.assertRaises(RuntimeError, api.remove_sources, [1, 2, 8, 14])

        # keep sources
        api.do_mvarica()
        api.keep_sources([0, 5, 11, 12])
        self.assertEqual(api.mixing_.shape, (4, 16))
        self.assertEqual(api.unmixing_.shape, (16, 4))

        # remove sources
        api.do_mvarica()
        api.remove_sources([1, 2, 8, 14])
        self.assertEqual(api.mixing_.shape, (12, 16))
        self.assertEqual(api.unmixing_.shape, (16, 12))
Exemplo n.º 19
0
    def test_bisection_underdetermined(self):
        n_trials, n_samples = 10, 10
        np.random.seed(42)
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        x = var0.simulate((n_samples, n_trials))
        x = np.concatenate([x, np.random.randn(n_trials, 8, n_samples)],
                           axis=1)

        var = VAR(7)
        var.optimize_delta_bisection(x)

        # nice data, so the regularization should not be too weak.
        self.assertGreater(var.delta, 10)
Exemplo n.º 20
0
    def test_surrogate(self):
        np.random.seed(31415)
        x, var0 = self.generate_data()

        result = cs.surrogate_connectivity('PDC',
                                           x,
                                           VAR(2),
                                           nfft=4,
                                           repeats=100)
        self.assertEqual(result.shape, (100, 2, 2, 4))

        structure = np.mean(np.mean(result, axis=3), axis=0)
        self.assertTrue(np.all(np.abs(structure - np.eye(2)) < 0.05))
Exemplo n.º 21
0
    def test_fit(self):
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        l = 100000
        x = var0.simulate(l)
        y = x.copy()

        var = VAR(2)
        var.fit(x)

        # make sure the input remains unchanged
        self.assertTrue(np.all(x == y))

        # that limit is rather generous, but we don't want tests to fail due to random variation
        self.assertTrue(np.all(np.abs(var0.coef - var.coef) < 0.02))
Exemplo n.º 22
0
    def test_optimize(self):
        np.random.seed(745)
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        l = (100, 10)
        x = var0.simulate(l)

        for n_jobs in [None, -1, 1, 2]:
            var = VAR(-1, n_jobs=n_jobs, verbose=0)

            var.optimize_order(x)
            self.assertEqual(var.p, 2)

            var.optimize_order(x, min_p=1, max_p=1)
            self.assertEqual(var.p, 1)
Exemplo n.º 23
0
    def test_bootstrap(self):
        np.random.seed(31415)
        x, var0 = self.generate_data()

        result = cs.bootstrap_connectivity('PDC',
                                           x,
                                           VAR(2),
                                           nfft=4,
                                           repeats=100)
        self.assertEqual(result.shape, (100, 2, 2, 4))

        structure = np.mean(np.mean(result, axis=3), axis=0)
        # make sure result has roughly the correct structure
        self.assertTrue(np.all(np.abs(structure - [[1, 0], [0.5, 1]]) < 0.25))
Exemplo n.º 24
0
    def test_residuals(self):
        l = 100000
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        x = var0.simulate(l)

        var = VAR(2)
        var.fit(x)

        self.assertEqual(x.shape, var.residuals.shape)

        self.assertTrue(np.allclose(var.rescov, np.eye(var.rescov.shape[0]), 1e-2, 1e-2))
Exemplo n.º 25
0
    def test_bisection_overdetermined(self):
        np.random.seed(42)
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        l = (100, 10)
        x = var0.simulate(l)

        var = VAR(2)
        var.optimize_delta_bisection(x)

        # nice data, so the regularization should not be too strong.
        self.assertLess(var.delta, 10)
Exemplo n.º 26
0
    def test_fit_regularized(self):
        l = 100000
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        x = var0.simulate(l)
        y = x.copy()

        var = VAR(10, delta=1)
        var.fit(x)

        # make sure the input remains unchanged
        self.assertTrue(np.all(x == y))

        b0 = np.zeros((2, 20))
        b0[:, 0:2] = var0.coef[:, 0:2]
        b0[:, 10:12] = var0.coef[:, 2:4]

        # that limit is rather generous, but we don't want tests to fail due to random variation
        self.assertTrue(np.all(np.abs(b0 - var.coef) < 0.02))
Exemplo n.º 27
0
    def test_random_state(self):
        np.random.seed(10)
        api = scot.Workspace(VAR(1),locations=[[0, 0, 1], [1, 0, 0], [0, 1, 0], [-1, 0, 0], [0, -1, 0]], reducedim=None)
        api.set_data(np.random.randn(10, 5, 10), [1, 0] * 5)

        # test MVARICA
        api.do_mvarica(random_state=1)
        mixing1 = api.mixing_
        api.do_mvarica(random_state=1)
        mixing2 = api.mixing_
        assert_array_equal(mixing1, mixing2)

        # test CSPVARICA
        api.do_cspvarica(random_state=1)
        mixing1 = api.mixing_
        api.do_cspvarica(random_state=1)
        mixing2 = api.mixing_
        assert_array_equal(mixing1, mixing2)
Exemplo n.º 28
0
    def test_bisection_underdetermined(self):
        n_trials, n_samples = 10, 10
        np.random.seed(42)
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        x = var0.simulate((n_samples, n_trials))
        x = np.concatenate([x, np.random.randn(n_trials, 8, n_samples)], axis=1)

        var = VAR(7)
        var.optimize_delta_bisection(x)

        # nice data, so the regularization should not be too weak.
        self.assertGreater(var.delta, 10)
Exemplo n.º 29
0
    def testFit(self):
        """ Test submodel fitting on instationary data
        """
        np.random.seed(42)

        # original model coefficients
        b01 = np.array([[0.0, 0], [0, 0]])
        b02 = np.array([[0.5, 0.3], [0.3, 0.5]])
        b03 = np.array([[0.1, 0.1], [0.1, 0.1]])
        t, m, l = 10, 2, 100

        noisefunc = lambda: np.random.normal(size=(1, m)) ** 3 / 1e3

        var = VAR(1)
        var.coef = b01
        sources1 = var.simulate([l, t], noisefunc)
        var.coef = b02
        sources2 = var.simulate([l, t], noisefunc)
        var.coef = b03
        sources3 = var.simulate([l, t * 2], noisefunc)

        sources = np.vstack([sources1, sources2, sources3])
        cl = [1] * t + [2] * t + [1, 2] * t

        var = VAR(1)
        r_trial = varica.cspvarica(sources, var, cl, reducedim=None, varfit='trial')
        r_class = varica.cspvarica(sources, var, cl, reducedim=None, varfit='class')
        r_ensemble = varica.cspvarica(sources, var, cl, reducedim=None, varfit='ensemble')

        vars = [np.var(r.var_residuals) for r in [r_trial, r_class, r_ensemble]]

        # class one consists of trials generated with b01 and b03
        # class two consists of trials generated with b02 and b03
        #
        # ensemble fitting cannot resolve any model -> highest residual variance
        # class fitting cannot only resolve (b01+b03) vs (b02+b03) -> medium residual variance
        # trial fitting can resolve all three models -> lowest residual variance
        print(vars)

        self.assertLess(vars[0], vars[1])
        self.assertLess(vars[1], vars[2])
Exemplo n.º 30
0
    def test_fit(self):
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        l = 100000
        x = var0.simulate(l)
        y = x.copy()

        var = VAR(2)
        var.fit(x)

        # make sure the input remains unchanged
        self.assertTrue(np.all(x == y))

        # that limit is rather generous, but we don't want tests to fail due to random variation
        self.assertTrue(np.all(np.abs(var0.coef - var.coef) < 0.02))
Exemplo n.º 31
0
    def test_plotting(self):
        np.random.seed(3141592)

        api = scot.Workspace(VAR(1), locations=[[0, 0, 1], [1, 0, 0], [0, 1, 0], [-1, 0, 0], [0, -1, 0]])

        api.set_data(np.random.randn(10, 5, 10), [1, 0] * 5)
        api.do_mvarica()

        api.plot_source_topos()

        for diag in ['S', 'fill', 'topo']:
            for outside in [True, False]:
                api.plot_diagonal = diag
                api.plot_outside_topo = outside

                fig = api.plot_connectivity_topos()
                api.get_connectivity('PHI', plot=fig)
                api.get_surrogate_connectivity('PHI', plot=fig, repeats=5)
                api.get_bootstrap_connectivity('PHI', plot=fig, repeats=5)
                api.get_tf_connectivity('PHI', winlen=2, winstep=1, plot=fig)
                api.compare_conditions([0], [1], 'PHI', plot=fig, repeats=5)
Exemplo n.º 32
0
    def test_fit_regularized(self):
        l = 100000
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        x = var0.simulate(l)
        y = x.copy()

        var = VAR(10, delta=1)
        var.fit(x)

        # make sure the input remains unchanged
        self.assertTrue(np.all(x == y))

        b0 = np.zeros((2, 20))
        b0[:, 0:2] = var0.coef[:, 0:2]
        b0[:, 10:12] = var0.coef[:, 2:4]

        # that limit is rather generous, but we don't want tests to fail due to random variation
        self.assertTrue(np.all(np.abs(b0 - var.coef) < 0.02))
Exemplo n.º 33
0
def compute_order_extended(X,
                           m_max,
                           m_min=1,
                           m_step=1,
                           n_jobs=None,
                           verbose=True):
    """
    Estimate VAR order with the Bayesian Information Criterion (BIC).

    Parameters:
    -----------
    X : ndarray, shape (trials, n_channels, n_samples)

    m_max : int
        The maximum model order to test,
    m_min : int
        The minimum model order to test.
    m_step : int
        The step size for checking the model order interval
        given by m_min and m_max.
    n_jobs : None | int, optional
        Number of jobs to run in parallel for various tasks (e.g. whiteness
        testing). If set to None, joblib is not used at all. Note that the main
        script must be guarded with `if __name__ == '__main__':` when using
        parallelization.
    verbose : bool
        Plot results for other information criteria as well.

    Returns:
    --------
    o_m : int
        Estimated order using BIC2.
    morders : np.array of shape ((m_max - m_min) / m_step, )
        The model orders corresponding to the entries in the following results
        arrays.
    ics : np.array of shape (n_ics, (m_max - m_min) / m_step)
        The information criteria for the different model orders.
        [AIC1, BIC1, AIC2, BIC2, lnFPE, HQIC]]
    stability : np.array of shape ((m_max - m_min) / m_step), )
        Indicates if MVAR model describes stable process (covariance
        stationary).
    p_white_scot : np.array of shape ((m_max - m_min) / m_step), )
        p-value that the residuals are white based on the Li-McLeod Portmanteau test
        implemented in SCoT. Reject hypothesis of white residuals if p is smaller
        than the critical p-value.
    p_white_dw : np.array of shape ((m_max - m_min) / m_step), n_rois)
        Uncorrected p-values that the residuals are white based on the Durbin-Watson
        test as implemented by Barnett and Seth (2012). Reject hypothesis of white
        residuals if all p's are smaller than the critical p-value.
    dw : np.array of shape ((m_max - m_min) / m_step), n_rois)
        The Durbin-Watson statistics.
    consistency : np.array of shape ((m_max - m_min) / m_step), )
        Results of the MVAR consistency estimation.

    References:
    -----------
    [1] provides the equation:BIC(m) = 2*log[det(Σ)]+ 2*(p**2)*m*log(N*n*m)/(N*n*m),
    Σ is the noise covariance matrix, p is the channels, N is the trials, n
    is the n_samples, m is model order.

    [1] Mingzhou Ding, Yonghong Chen (2008). "Granger Causality: Basic Theory and Application
    to Neuroscience." Elsevier Science

    [2] Nicoletta Nicolaou and Julius Georgiou (2013). “Autoregressive Model Order Estimation
    Criteria for Monitoring Awareness during Anaesthesia.” IFIP Advances in Information and
    Communication Technology 412

    [3] Helmut Lütkepohl (2005). "New Introduction to Multiple Time Series Analysis."
    1st ed. Berlin: Springer-Verlag Berlin Heidelberg.

    URL: https://gist.github.com/dongqunxi/b23d1679b9bffa8e458c11f93bd8d6ff
    """
    from scot.var import VAR
    from scipy import linalg

    N, p, n = X.shape

    aic1 = []
    bic1 = []
    aic2 = []
    bic2 = []
    lnfpe = []
    hqic = []

    morders = []
    stability = []
    p_white_scot = []
    p_white_dw = []
    dw = []

    consistency = []

    # TODO: should this be n_total = N * n * p ???
    # total number of data points: n_trials * n_samples
    # Esther Florin (2010): N_total is number of time points contained in each time series
    n_total = N * n

    # check model order min/max/step input
    if m_min >= m_max:
        m_min = m_max - 1
    if m_min < 1:
        m_min = 1
    if m_step < 1:
        m_step = 1
    if m_step >= m_max:
        m_step = m_max

    for m in range(m_min, m_max + 1, m_step):
        morders.append(m)
        mvar = VAR(m, n_jobs=n_jobs)
        mvar.fit(X)

        stable = mvar.is_stable()
        stability.append(stable)

        p_white_scot_ = mvar.test_whiteness(h=m,
                                            repeats=100,
                                            get_q=False,
                                            random_state=None)
        white_scot_ = p_white_scot_ >= 0.05

        p_white_scot.append(p_white_scot_)

        white_dw_, cons, dw_, pval = check_whiteness_and_consistency(
            X.transpose(1, 2, 0),
            mvar.residuals.transpose(1, 2, 0),
            alpha=0.05)
        dw.append(dw_)
        p_white_dw.append(pval)
        consistency.append(cons)

        sigma = mvar.rescov

        ########################################################################
        # from [1]
        ########################################################################
        m_aic = 2 * np.log(linalg.det(sigma)) + 2 * (p**2) * m / n_total
        m_bic = 2 * np.log(
            linalg.det(sigma)) + 2 * (p**2) * m / n_total * np.log(n_total)
        aic1.append(m_aic)
        bic1.append(m_bic)

        ########################################################################
        # from [2]
        ########################################################################

        m_aic2 = np.log(linalg.det(sigma)) + 2 * (p**2) * m / n_total
        m_bic2 = np.log(
            linalg.det(sigma)) + (p**2) * m / n_total * np.log(n_total)

        aic2.append(m_aic2)
        bic2.append(m_bic2)

        ########################################################################
        # from [3]
        ########################################################################
        # Akaike's final prediction error
        m_ln_fpe3 = np.log(linalg.det(sigma)) + p * np.log(
            (n_total + m * p + 1) / (n_total - m * p - 1))
        # Hannan-Quinn criterion
        m_hqc3 = np.log(linalg.det(sigma)) + 2 * (p**2) * m / n_total * np.log(
            np.log(n_total))

        lnfpe.append(m_ln_fpe3)
        hqic.append(m_hqc3)

        if verbose:
            results = 'Model order: ' + str(m).zfill(2)
            results += '    AIC1: %.2f' % m_aic
            results += '    BIC1: %.2f' % m_bic
            results += '    AIC2: %.2f' % m_aic2
            results += '    BIC2: %.2f' % m_bic2
            results += '  lnFPE3: %.2f' % m_ln_fpe3
            results += '    HQC3: %.2f' % m_hqc3
            results += '  stable: %s' % str(stable)
            results += '  white1: %s' % str(white_scot_)
            results += '  white2: %s' % str(white_dw_)
            results += '   DWmin: %.2f' % dw_.min()
            results += '   DWmax: %.2f' % dw_.max()
            results += ' consistency: %.4f' % cons

            print(results)

    morders = np.array(morders)
    o_m = morders[np.argmin(bic2)]
    if verbose:
        print('>>> Optimal model order according to BIC2 = %d' % o_m)

    ics = [aic1, bic1, aic2, bic2, lnfpe, hqic]
    ics = np.asarray(ics)

    stability = np.array(stability)
    p_white_scot = np.array(p_white_scot)
    p_white_dw = np.array(p_white_dw)
    dw = np.array(dw)
    consistency = np.array(consistency)

    return o_m, morders, ics, stability, p_white_scot, p_white_dw, dw, consistency
Exemplo n.º 34
0
"""
This example demonstrates that it is possible to reconstruct sources even if we
include a PCA step in the process.
"""

from __future__ import print_function

import numpy as np

from scot import pca

from scot.var import VAR

# Generate data from a VAR(1) process
model0 = VAR(1)
model0.coef = np.array([[0.3, -0.6], [0, -0.9]])
x = model0.simulate(10000).squeeze()

# Transform data with PCA
w, v = pca(x)
y = x.dot(w)

print('Covariance of x:\n', np.cov(x.squeeze().T))
print('\nCovariance of y:\n', np.cov(y.squeeze().T))

model1, model2 = VAR(1), VAR(1)

# Fit model1 to the original data
model1.fit(x)

# Fit model2 to the PCA transformed data
Exemplo n.º 35
0
 def testInterface(self):
     # self.assertRaises(TypeError, varica.cspvarica)
     # simply pass in different data shapes and see if the functions runs without error
     self.assertRaises(AttributeError, varica.cspvarica,
                       np.sin(np.arange(30)).reshape((10, 3)), VAR(1), [0])
Exemplo n.º 36
0
# Prevent execution of the main script in worker threads
if __name__ == "__main__":

    midata = fetch("mi")[0]

    raweeg = midata["eeg"]
    triggers = midata["triggers"]
    classes = midata["labels"]
    fs = midata["fs"]
    locs = midata["locations"]

    # Prepare data
    #
    # Here we cut out segments from 3s to 4s after each trigger. This is right
    # in the middle of the motor imagery period.
    data = cut_segments(raweeg, triggers, 3 * fs, 4 * fs)

    # only use every 10th trial to make the example run faster
    data = data[::10]

    var = VAR(model_order=5)
    var.fit(data)
    for n_jobs in [-1, None, 1, 2, 3, 4, 5, 6, 7, 8]:
        # Set random seed for repeatable results
        np.random.seed(42)
        var.n_jobs = n_jobs
        start = time.perf_counter()
        p = var.test_whiteness(10, repeats=1000)
        time1 = time.perf_counter()
        print('n_jobs: {:>4s}, whiteness test: {:.2f}s, p = {}'.format(str(n_jobs), time1 - start, p))
Exemplo n.º 37
0
 def generate_data():
     var = VAR(2)
     var.coef = np.array([[0.2, 0.1, 0, 0], [0.7, -0.4, 0.1, 0]])
     l = (100, 100)
     x = var.simulate(l)
     return x, var
Exemplo n.º 38
0
 def test_premixing(self):
     api = scot.Workspace(VAR(1))
     api.set_premixing([[0, 1], [1, 0]])
Exemplo n.º 39
0
 def testTrivia(self):
     api = scot.Workspace(VAR(1))
     str(api)
Exemplo n.º 40
0
Arquivo: pca.py Projeto: cbrnr/scot
we include PCA in the process.
"""

from __future__ import print_function

import numpy as np

from scot.pca import pca
from scot.var import VAR


# Set random seed for repeatable results
np.random.seed(42)

# Generate data from a VAR(1) process
model0 = VAR(1)
model0.coef = np.array([[0.3, -0.6], [0, -0.9]])
x = model0.simulate(10000).squeeze()

# Transform data with PCA
w, v = pca(x)
y = np.dot(w.T, x)

# Verify that transformed data y is decorrelated
print("Covariance of x:\n", np.cov(x.squeeze()))
print("\nCovariance of y:\n", np.cov(y.squeeze()))

model1, model2 = VAR(1), VAR(1)

# Fit model1 to the original data
model1.fit(x)
Exemplo n.º 41
0
 def generate_data():
     var = VAR(2)
     var.coef = np.array([[0.2, 0.1, 0, 0], [0.7, -0.4, 0.1, 0]])
     l = (100, 100)
     x = var.simulate(l)
     return x, var
Exemplo n.º 42
0
def check_model_order(X, p, whit_min=1.5, whit_max=2.5, check_stability=True):
    """
    Check whiteness, consistency, and stability for all model
    orders k <= p.

    Computationally intensive but for high model orders probably
    faster than do_mvar_evaluation().

    Parameters:
    -----------
    X : narray, shape (n_epochs, n_sources, n_times)
        The data to estimate the model order for.
    p : int
        The maximum model order.
    whit_min : float
        Lower boundary for the Durbin-Watson test.
    whit_max : float
        Upper boundary for the Durbin-Watson test.
    check_stability : bool
        Check the stability condition. Time intensive since
        it fits a second MVAR model from scot.var.VAR
    Returns:
    --------
    A: array, coefficients of the specified model
    SIG:array, recovariance of this model
    E:  array, noise covariance of this model
    """

    assert p >= 1, "The model order must be greater or equal to 1."

    from scot.var import VAR

    X_orig = X.copy()
    X = X.transpose(1, 2, 0)

    n, m, N = X.shape
    p1 = p + 1
    q1n = p1 * n
    I = np.eye(n)
    XX = np.zeros((n, p1, m + p, N))
    for k in range(p1):
        XX[:, k, k:k + m, :] = X
    AF = np.zeros((n, q1n))
    AB = np.zeros((n, q1n))
    k = 1
    kn = k * n
    M = N * (m - k)
    kf = list(range(0, kn))
    kb = list(range(q1n - kn, q1n))
    XF = np.reshape(XX[:, 0:k, k:m, :], (kn, M), order='F')
    XB = np.reshape(XX[:, 0:k, k - 1:m - 1, :], (kn, M), order='F')
    CXF = np.linalg.cholesky(XF.dot(XF.T)).T
    CXB = np.linalg.cholesky(XB.dot(XB.T)).T
    AF[:, kf] = np.linalg.solve(CXF.T, I)
    AB[:, kb] = np.linalg.solve(CXB.T, I)

    del p1, XF, XB, CXF, CXB

    while k <= p:

        tempF = np.reshape(XX[:, 0:k, k:m, :], (kn, M), order='F')
        af = AF[:, kf]
        EF = af.dot(tempF)

        del af, tempF

        tempB = np.reshape(XX[:, 0:k, k - 1:m - 1, :], (kn, M), order='F')
        ab = AB[:, kb]
        EB = ab.dot(tempB)

        del ab, tempB

        CEF = np.linalg.cholesky(EF.dot(EF.T)).T
        CEB = np.linalg.cholesky(EB.dot(EB.T)).T
        R = np.dot(np.linalg.solve(CEF.T, EF.dot(EB.T)), np.linalg.inv(CEB))

        del EB, CEF, CEB

        RF = np.linalg.cholesky(I - R.dot(R.T)).T
        RB = np.linalg.cholesky(I - (R.T).dot(R)).T
        k = k + 1
        kn = k * n
        M = N * (m - k)
        kf = np.arange(kn)
        kb = list(range(q1n - kn, q1n))
        AFPREV = AF[:, kf]
        ABPREV = AB[:, kb]
        AF[:, kf] = np.linalg.solve(RF.T, AFPREV - R.dot(ABPREV))
        AB[:, kb] = np.linalg.solve(RB.T, ABPREV - R.T.dot(AFPREV))

        del RF, RB, ABPREV

        # check MVAR model properties

        E = np.linalg.solve(AFPREV[:, :n], EF)
        E = np.reshape(E, (n, m - k + 1, N), order='F')

        if k > 1:

            whi, cons, _, _ = check_whiteness_and_consistency(
                X, E, whit_min, whit_max)

            if check_stability:
                mvar = VAR((k - 1))
                mvar.fit(
                    X_orig
                )  # scot func which requires shape trials x sources x samples
                is_st = mvar.is_stable()

            output = 'morder %d:' % (k - 1)
            output += ' white: %s' % str(whi)
            output += '; consistency: %.4f' % cons
            if check_stability:
                output += '; stable: %s' % str(is_st)
            print(output)
Exemplo n.º 43
0
    def testFunctionality(self):
        """ generate VAR signals, and apply the api to them
            do this for every backend """
        np.random.seed(3141592)

        # original model coefficients
        b01 = np.zeros((3, 6))
        b02 = np.zeros((3, 6))
        b01[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0], [-0.7, 0.0, 0.9, 0.0]]
        b02[0:3, 2:6] = [[0.4, 0.0, 0.0, 0.0], [0.4, 0.0, 0.4, 0.0],
                         [0.0, 0.0, 0.4, 0.0]]
        m0 = b01.shape[0]
        cl = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1, 0])
        l = 200
        t = len(cl)

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0))**3

        var = VAR(2)
        var.coef = b01
        sources1 = var.simulate([l, sum(cl == 0)], noisefunc)
        var.coef = b02
        sources2 = var.simulate([l, sum(cl == 1)], noisefunc)

        var.fit(sources1)
        var.fit(sources2)

        sources = np.zeros((l, m0, t))

        sources[:, :, cl == 0] = sources1
        sources[:, :, cl == 1] = sources2

        # simulate volume conduction... 3 sources measured with 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(sources, mix)

        backend_modules = [import_module('scot.' + b) for b in scot.backends]

        for bm in backend_modules:
            np.random.seed(
                3141592
            )  # reset random seed so we're independent of module order

            api = scot.Workspace({'model_order': 2},
                                 reducedim=3,
                                 backend=bm.backend)

            api.set_data(data)

            api.do_ica()

            self.assertEqual(api.mixing_.shape, (3, 7))
            self.assertEqual(api.unmixing_.shape, (7, 3))

            api.do_mvarica()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))

            api.set_data(data)

            api.fit_var()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))
            self.assertEqual(
                api.get_tf_connectivity('S', 100, 50).shape,
                (3, 3, 512, (l - 100) // 50))

            api.set_data(data, cl)

            self.assertFalse(np.any(np.isnan(api.data_)))
            self.assertFalse(np.any(np.isinf(api.data_)))

            api.do_cspvarica()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))

            for c in np.unique(cl):
                api.set_used_labels([c])

                api.fit_var()
                fc = api.get_connectivity('S')
                self.assertEqual(fc.shape, (3, 3, 512))

                tfc = api.get_tf_connectivity('S', 100, 50)
                self.assertEqual(tfc.shape, (3, 3, 512, (l - 100) // 50))

            api.set_data(data)
            api.remove_sources([0, 2])
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(
                api.get_tf_connectivity('S', 100, 50).shape,
                (1, 1, 512, (l - 100) // 50))

            try:
                api.optimize_var()
            except NotImplementedError:
                pass
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(
                api.get_tf_connectivity('S', 100, 50).shape,
                (1, 1, 512, (l - 100) // 50))
Exemplo n.º 44
0
    def testFunctionality(self):
        """ generate VAR signals, and apply the api to them
            do this for every backend """
        np.random.seed(3141592)

        # original model coefficients
        b01 = np.zeros((3, 6))
        b02 = np.zeros((3, 6))
        b01[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0],
                        [-0.7, 0.0, 0.9, 0.0]]
        b02[0:3, 2:6] = [[0.4, 0.0, 0.0, 0.0],
                        [0.4, 0.0, 0.4, 0.0],
                        [0.0, 0.0, 0.4, 0.0]]
        m0 = b01.shape[0]
        cl = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1, 0])
        l = 200
        t = len(cl)

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3

        var = VAR(2)
        var.coef = b01
        sources1 = var.simulate([l, sum(cl==0)], noisefunc)
        var.coef = b02
        sources2 = var.simulate([l, sum(cl==1)], noisefunc)

        var.fit(sources1)
        var.fit(sources2)

        sources = np.zeros((l,m0,t))

        sources[:,:,cl==0] = sources1
        sources[:,:,cl==1] = sources2

        # simulate volume conduction... 3 sources measured with 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(sources, mix)

        backend_modules = [import_module('scot.' + b) for b in scot.backends]

        for bm in backend_modules:
            np.random.seed(3141592)  # reset random seed so we're independent of module order

            api = scot.Workspace({'model_order': 2}, reducedim=3, backend=bm.backend)

            api.set_data(data)

            api.do_ica()

            self.assertEqual(api.mixing_.shape, (3, 7))
            self.assertEqual(api.unmixing_.shape, (7, 3))

            api.do_mvarica()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))

            api.set_data(data)

            api.fit_var()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (3, 3, 512, (l-100)//50))

            api.set_data(data, cl)
            
            self.assertFalse(np.any(np.isnan(api.data_)))
            self.assertFalse(np.any(np.isinf(api.data_)))
            
            api.do_cspvarica()
            
            self.assertEqual(api.get_connectivity('S').shape, (3,3,512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))
            
            for c in np.unique(cl):
                api.set_used_labels([c])

                api.fit_var()
                fc = api.get_connectivity('S')
                self.assertEqual(fc.shape, (3, 3, 512))

                tfc = api.get_tf_connectivity('S', 100, 50)
                self.assertEqual(tfc.shape, (3, 3, 512, (l-100)//50))

            api.set_data(data)
            api.remove_sources([0, 2])
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (1, 1, 512, (l-100)//50))

            try:
                api.optimize_var()
            except NotImplementedError:
                pass
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (1, 1, 512, (l-100)//50))
Exemplo n.º 45
0
    def testFunctionality(self):
        """ generate VAR signals, and apply the api to them
            do this for every backend """
        np.random.seed(3141592)

        # original model coefficients
        b01 = np.zeros((3, 6))
        b02 = np.zeros((3, 6))
        b01[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0],
                         [-0.7, 0.0, 0.9, 0.0]]
        b02[0:3, 2:6] = [[0.4, 0.0, 0.0, 0.0],
                         [0.4, 0.0, 0.4, 0.0],
                         [0.0, 0.0, 0.4, 0.0]]
        m0 = b01.shape[0]
        cl = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1, 0])
        l = 200
        t = len(cl)

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3 / 1e3

        var = VAR(2)
        var.coef = b01
        sources1 = var.simulate([l, sum(cl == 0)], noisefunc)
        var.coef = b02
        sources2 = var.simulate([l, sum(cl == 1)], noisefunc)

        var.fit(sources1)
        var.fit(sources2)

        sources = np.zeros((t, m0, l))

        sources[cl == 0, :, :] = sources1
        sources[cl == 1, :, :] = sources2

        # simulate volume conduction... 3 sources smeared over 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(np.transpose(mix), sources)
        data += np.random.randn(*data.shape) * 0.001  # add small noise

        for backend_name, backend_gen in scot.backend.items():
            np.random.seed(3141592)  # reset random seed so we're independent of module order

            api = scot.Workspace({'model_order': 2}, reducedim=3, backend=backend_gen())

            api.set_data(data)

            api.do_ica()

            self.assertEqual(api.mixing_.shape, (3, 7))
            self.assertEqual(api.unmixing_.shape, (7, 3))

            api.do_mvarica()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))

            api.set_data(data)

            api.fit_var()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (3, 3, 512, (l-100)//50))
            
            tfc1 = api.get_tf_connectivity('PDC', 100, 5, baseline=None)        # no baseline
            tfc2 = api.get_tf_connectivity('PDC', 100, 5, baseline=[110, -10])  # invalid baseline
            tfc3 = api.get_tf_connectivity('PDC', 100, 5, baseline=[0, 0])      # one-window baseline
            tfc4 = tfc1 - tfc1[:, :, :, [0]]
            tfc5 = api.get_tf_connectivity('PDC', 100, 5, baseline=[-np.inf, np.inf])  # full trial baseline
            tfc6 = tfc1 - np.mean(tfc1, axis=3, keepdims=True)
            self.assertTrue(np.allclose(tfc1, tfc2))
            self.assertTrue(np.allclose(tfc3, tfc4))
            self.assertTrue(np.allclose(tfc5, tfc6, rtol=1e-05, atol=1e-06))

            api.set_data(data, cl)
            
            self.assertFalse(np.any(np.isnan(api.data_)))
            self.assertFalse(np.any(np.isinf(api.data_)))
            
            api.do_cspvarica()
            
            self.assertEqual(api.get_connectivity('S').shape, (3,3,512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))
            
            for c in np.unique(cl):
                api.set_used_labels([c])

                api.fit_var()
                fc = api.get_connectivity('S')
                self.assertEqual(fc.shape, (3, 3, 512))

                tfc = api.get_tf_connectivity('S', 100, 50)
                self.assertEqual(tfc.shape, (3, 3, 512, (l-100)//50))

            api.set_data(data)
            api.remove_sources([0, 2])
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (1, 1, 512, (l-100)//50))

            try:
                api.optimize_var()
            except NotImplementedError:
                pass
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (1, 1, 512, (l-100)//50))
Exemplo n.º 46
0
    def testFunctionality(self):
        """ generate VAR signals, and apply the api to them
            do this for every backend """
        np.random.seed(3141592)

        # original model coefficients
        b01 = np.zeros((3, 6))
        b02 = np.zeros((3, 6))
        b01[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0], [-0.7, 0.0, 0.9, 0.0]]
        b02[0:3, 2:6] = [[0.4, 0.0, 0.0, 0.0], [0.4, 0.0, 0.4, 0.0], [0.0, 0.0, 0.4, 0.0]]
        m0 = b01.shape[0]
        cl = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1, 0])
        l = 200
        t = len(cl)

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3 / 1e3

        var = VAR(2)
        var.coef = b01
        sources1 = var.simulate([l, sum(cl == 0)], noisefunc)
        var.coef = b02
        sources2 = var.simulate([l, sum(cl == 1)], noisefunc)

        var.fit(sources1)
        var.fit(sources2)

        sources = np.zeros((t, m0, l))

        sources[cl == 0, :, :] = sources1
        sources[cl == 1, :, :] = sources2

        # simulate volume conduction... 3 sources smeared over 7 channels
        mix = [
            [0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
            [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
            [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5],
        ]
        data = datatools.dot_special(np.transpose(mix), sources)
        data += np.random.randn(*data.shape) * 0.001  # add small noise

        for backend_name, backend_gen in scot.backend.items():
            np.random.seed(3141592)  # reset random seed so we're independent of module order

            api = scot.Workspace({"model_order": 2}, reducedim=3, backend=backend_gen())

            api.set_data(data)

            api.do_ica()

            self.assertEqual(api.mixing_.shape, (3, 7))
            self.assertEqual(api.unmixing_.shape, (7, 3))

            api.do_mvarica()

            self.assertEqual(api.get_connectivity("S").shape, (3, 3, 512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))

            api.set_data(data)

            api.fit_var()

            self.assertEqual(api.get_connectivity("S").shape, (3, 3, 512))
            self.assertEqual(api.get_tf_connectivity("S", 100, 50).shape, (3, 3, 512, (l - 100) // 50))

            tfc1 = api.get_tf_connectivity("PDC", 100, 5, baseline=None)  # no baseline
            tfc2 = api.get_tf_connectivity("PDC", 100, 5, baseline=[110, -10])  # invalid baseline
            tfc3 = api.get_tf_connectivity("PDC", 100, 5, baseline=[0, 0])  # one-window baseline
            tfc4 = tfc1 - tfc1[:, :, :, [0]]
            tfc5 = api.get_tf_connectivity("PDC", 100, 5, baseline=[-np.inf, np.inf])  # full trial baseline
            tfc6 = tfc1 - np.mean(tfc1, axis=3, keepdims=True)
            self.assertTrue(np.allclose(tfc1, tfc2))
            self.assertTrue(np.allclose(tfc3, tfc4))
            self.assertTrue(np.allclose(tfc5, tfc6, rtol=1e-05, atol=1e-06))

            api.set_data(data, cl)

            self.assertFalse(np.any(np.isnan(api.data_)))
            self.assertFalse(np.any(np.isinf(api.data_)))

            api.do_cspvarica()

            self.assertEqual(api.get_connectivity("S").shape, (3, 3, 512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))

            for c in np.unique(cl):
                api.set_used_labels([c])

                api.fit_var()
                fc = api.get_connectivity("S")
                self.assertEqual(fc.shape, (3, 3, 512))

                tfc = api.get_tf_connectivity("S", 100, 50)
                self.assertEqual(tfc.shape, (3, 3, 512, (l - 100) // 50))

            api.set_data(data)
            api.remove_sources([0, 2])
            api.fit_var()
            self.assertEqual(api.get_connectivity("S").shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity("S", 100, 50).shape, (1, 1, 512, (l - 100) // 50))

            try:
                api.optimize_var()
            except NotImplementedError:
                pass
            api.fit_var()
            self.assertEqual(api.get_connectivity("S").shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity("S", 100, 50).shape, (1, 1, 512, (l - 100) // 50))