def testModelIdentification(self):
        """ generate VAR signals, mix them, and see if MVARICA can reconstruct the signals
            do this for every backend """

        # original model coefficients
        b0 = np.zeros((3, 6))
        b0[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0],
                        [-0.7, 0.0, 0.9, 0.0]]
        m0 = b0.shape[0]
        l, t = 1000, 100

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3

        var = VAR(2)
        var.coef = b0
        sources = var.simulate([l, t], noisefunc)

        # simulate volume conduction... 3 sources measured with 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(sources, mix)

        backend_modules = [import_module('scot.backend.' + b) for b in scot.backend.__all__]

        for bm in backend_modules:

            api = scot.Workspace({'model_order': 2}, backend=bm.backend)

            api.set_data(data)

            # apply MVARICA
            #  - default setting of 0.99 variance should reduce to 3 channels with this data
            #  - automatically determine delta (enough data, so it should most likely be 0)
            api.do_mvarica()
            #result = varica.mvarica(data, 2, delta='auto', backend=bm.backend)

            # ICA does not define the ordering and sign of components
            # so wee need to test all combinations to find if one of them fits the original coefficients
            permutations = np.array(
                [[0, 1, 2, 3, 4, 5], [0, 1, 4, 5, 2, 3], [2, 3, 4, 5, 0, 1], [2, 3, 0, 1, 4, 5], [4, 5, 0, 1, 2, 3],
                 [4, 5, 2, 3, 0, 1]])
            signperms = np.array(
                [[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, -1, -1], [1, 1, -1, -1, 1, 1], [1, 1, -1, -1, -1, -1],
                 [-1, -1, 1, 1, 1, 1], [-1, -1, 1, 1, -1, -1], [-1, -1, -1, -1, 1, 1], [-1, -1, -1, -1, -1, -1]])

            best, d = np.inf, None

            for perm in permutations:
                b = api.var_.coef[perm[::2] // 2, :]
                b = b[:, perm]
                for sgn in signperms:
                    c = b * np.repeat([sgn], 3, 0) * np.repeat([sgn[::2]], 6, 0).T
                    err = np.sum((c - b0) ** 2)
                    if err < best:
                        best = err
                        d = c

            self.assertTrue(np.all(abs(d - b0) < 0.05))
    def test_predict(self):
        l = 100000
        var = VAR(2)
        var.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])

        x = var.simulate(l)
        z = var.predict(x)

        # that limit is rather generous, but we don't want tests to fail due to random variation
        self.assertTrue(np.abs(np.var(x[100:, :] - z[100:, :]) - 1) < 0.02)
    def test_whiteness(self):
        r = np.random.randn(100, 5, 10)     # gaussian white noise
        r0 = r.copy()

        var = VAR(0)
        var.residuals = r

        p = var.test_whiteness(20)

        self.assertTrue(np.all(r == r0))    # make sure we don't modify the input
        self.assertTrue(p>0.05)             # test should be non-significant for white noise

        r[3:,1,:] = r[:-3,0,:]              # create cross-correlation at lag 3
        p = var.test_whiteness(20)
        self.assertFalse(p>0.05)            # now test should be significant
    def test_simulate(self):
        noisefunc = lambda: [1, 1]   # use deterministic function instead of noise
        num_samples = 100

        b = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])

        var = VAR(2)
        var.coef = b

        x = var.simulate(num_samples, noisefunc)

        # make sure we got expected values within reasonable accuracy
        for n in range(10, num_samples):
            self.assertTrue(np.all(
                np.abs(x[n, :] - 1 - np.dot(b[:, 0::2], x[n - 1, :]) - np.dot(b[:, 1::2], x[n - 2, :])) < epsilon))
    def testModelIdentification(self):
        """ generate independent signals, mix them, and see if ICA can reconstruct the mixing matrix
            do this for every backend """

        # original model coefficients
        b0 = np.zeros((3, 3))    # no connectivity
        m0 = b0.shape[0]
        l, t = 100, 100

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3

        var = VAR(1)
        var.coef = b0
        sources = var.simulate([l, t], noisefunc)

        # simulate volume conduction... 3 sources measured with 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(sources, mix)

        backend_modules = [import_module('scot.backend.' + b) for b in scot.backend.__all__]

        for bm in backend_modules:

            result = plainica.plainica(data, backend=bm.backend)

            i = result.mixing.dot(result.unmixing)
            self.assertTrue(np.allclose(i, np.eye(i.shape[0]), rtol=1e-6, atol=1e-7))

            permutations = [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, 1], [2, 1, 0]]

            bestdiff = np.inf
            bestmix = None

            absmix = np.abs(result.mixing)
            absmix /= np.max(absmix)

            for p in permutations:
                estmix = absmix[p, :]
                diff = np.sum((np.abs(estmix) - np.abs(mix)) ** 2)

                if diff < bestdiff:
                    bestdiff = diff
                    bestmix = estmix

            self.assertTrue(np.allclose(bestmix, mix, rtol=1e-1, atol=1e-1))
    def test_residuals(self):
        l = 100000
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        x = var0.simulate(l)

        var = VAR(2)
        var.fit(x)

        self.assertEqual(x.shape, var.residuals.shape)

        self.assertTrue(np.allclose(var.rescov, np.eye(var.rescov.shape[0]), 1e-2, 1e-2))
    def test_fit(self):
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        l = 100000
        x = var0.simulate(l)
        y = x.copy()

        var = VAR(2)
        var.fit(x)

        # make sure the input remains unchanged
        self.assertTrue(np.all(x == y))

        # that limit is rather generous, but we don't want tests to fail due to random variation
        self.assertTrue(np.all(np.abs(var0.coef - var.coef) < 0.02))
    def test_fit_regularized(self):
        l = 100000
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        x = var0.simulate(l)
        y = x.copy()

        var = VAR(10, delta=1)
        var.fit(x)

        # make sure the input remains unchanged
        self.assertTrue(np.all(x == y))

        b0 = np.zeros((2, 20))
        b0[:, 0:2] = var0.coef[:, 0:2]
        b0[:, 10:12] = var0.coef[:, 2:4]

        # that limit is rather generous, but we don't want tests to fail due to random variation
        self.assertTrue(np.all(np.abs(b0 - var.coef) < 0.02))
    def testFunctionality(self):
        """ generate VAR signals, and apply the api to them
            do this for every backend """

        # original model coefficients
        b01 = np.zeros((3, 6))
        b02 = np.zeros((3, 6))
        b01[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0],
                        [-0.7, 0.0, 0.9, 0.0]]
        b02[0:3, 2:6] = [[0.4, 0.0, 0.0, 0.0],
                        [0.4, 0.0, 0.4, 0.0],
                        [0.0, 0.0, 0.4, 0.0]]
        m0 = b01.shape[0]
        cl = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1, 0])
        l = 1000
        t = len(cl)

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3

        var = VAR(2)
        var.coef = b01
        sources1 = var.simulate([l, sum(cl==0)], noisefunc)
        var.coef = b02
        sources2 = var.simulate([l, sum(cl==1)], noisefunc)

        var.fit(sources1)
        print(var.coef)
        var.fit(sources2)
        print(var.coef)

        sources = np.zeros((l,m0,t))

        sources[:,:,cl==0] = sources1
        sources[:,:,cl==1] = sources2

        # simulate volume conduction... 3 sources measured with 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(sources, mix)

        backend_modules = [import_module('scot.backend.' + b) for b in scot.backend.__all__]

        for bm in backend_modules:

            api = scot.Workspace({'model_order': 2}, reducedim=3, backend=bm.backend)

            api.set_data(data)

            api.do_ica()

            self.assertEqual(api.mixing_.shape, (3, 7))
            self.assertEqual(api.unmixing_.shape, (7, 3))

            api.do_mvarica()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))

            api.set_data(data)

            api.fit_var()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (3, 3, 512, 18))

            api.set_data(data, cl)
            
            self.assertFalse(np.any(np.isnan(api.data_)))
            self.assertFalse(np.any(np.isinf(api.data_)))
            
            api.do_cspvarica()
            
            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))
            
            self.assertEqual(api.get_connectivity('S').shape, (3,3,512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))
            
            for c in np.unique(cl):
                api.set_used_labels([c])

                api.fit_var()
                fc = api.get_connectivity('S')
                self.assertEqual(fc.shape, (3, 3, 512))

                tfc = api.get_tf_connectivity('S', 100, 50)
                self.assertEqual(tfc.shape, (3, 3, 512, 18))

            api.set_data(data)
            api.remove_sources([0, 2])
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (1, 1, 512, 18))