示例#1
0
    def _discover_structure(data):

        # Add a random noise uniformly distributed to avoid singularity
        # when performing the ICA
        data += np.random.random_sample(data.shape)

        # Create the ICA node to get the inverse of the mixing matrix
        k, w, _ = decomposition.fastica(data)

        w = np.dot(w, k)
        n = w.shape[0]
        best_nzd = float("inf")
        best_slt = float("inf")
        best_w_permuted = w
        causality_matrix = None
        causal_perm = None

        if n < 9:
            perm = LiNGAM._perms(n)

            for i in range(perm.shape[1]):
                perm_matrix = np.eye(n)
                perm_matrix = perm_matrix[:, perm[:, i]]
                w_permuted = perm_matrix.dot(w)
                cost = LiNGAM._cost_non_zero_diag(w_permuted)
                if cost < best_nzd:
                    best_nzd = cost
                    best_w_permuted = w_permuted

            w_opt = best_w_permuted

            w_opt = w_opt / np.diag(w_opt).reshape((n, 1))
            b_matrix = np.eye(n) - w_opt
            best_b_permuted = b_matrix
            best_i = 0

            for i in range(perm.shape[1]):
                b_permuted = b_matrix[:, perm[:, i]][perm[:, i], :]
                cost = LiNGAM._cost_strictly_lower_triangular(
                    b_permuted)
                if cost < best_slt:
                    best_slt = cost
                    best_i = i
                    best_b_permuted = b_permuted

            causal_perm = perm[:, best_i]
            causality_matrix = b_matrix

            percent_upper = best_slt / np.sum(best_b_permuted ** 2)

            if percent_upper > 0.2:
                # TODO(David): Change that code to raise an exception instead
                logger.error("LiNGAM failed to run on the data set")
                logger.error(
                    "--> B permuted matrix is at best {}% lower triangular"
                    .format(percent_upper))

        return causality_matrix, causal_perm
示例#2
0
    def test_fastica(self):
        iris = datasets.load_iris()
        df = pdml.ModelFrame(iris)

        result = df.decomposition.fastica(random_state=self.random_state)
        expected = decomposition.fastica(iris.data,
                                         random_state=self.random_state)

        self.assertEqual(len(result), 3)
        self.assertIsInstance(result[0], pdml.ModelFrame)
        tm.assert_index_equal(result[0].index, df.data.columns)
        self.assert_numpy_array_almost_equal(result[0].values, expected[0])

        self.assertIsInstance(result[1], pdml.ModelFrame)
        self.assert_numpy_array_almost_equal(result[1].values, expected[1])

        self.assertIsInstance(result[2], pdml.ModelFrame)
        tm.assert_index_equal(result[2].index, df.index)
        self.assert_numpy_array_almost_equal(result[2].values, expected[2])

        result = df.decomposition.fastica(return_X_mean=True,
                                          random_state=self.random_state)
        expected = decomposition.fastica(iris.data, return_X_mean=True,
                                         random_state=self.random_state)

        self.assertEqual(len(result), 4)
        self.assertIsInstance(result[0], pdml.ModelFrame)
        tm.assert_index_equal(result[0].index, df.data.columns)
        self.assert_numpy_array_almost_equal(result[0].values, expected[0])

        self.assertIsInstance(result[1], pdml.ModelFrame)
        self.assert_numpy_array_almost_equal(result[1].values, expected[1])

        self.assertIsInstance(result[2], pdml.ModelFrame)
        tm.assert_index_equal(result[2].index, df.index)
        self.assert_numpy_array_almost_equal(result[2].values, expected[2])

        self.assert_numpy_array_almost_equal(result[3], expected[3])
示例#3
0
def test_non_square_fastica(add_noise=False):
    """ Test the FastICA algorithm on very simple data.
    """
    rng = np.random.RandomState(0)

    n_samples = 1000
    # Generate two sources:
    t = np.linspace(0, 100, n_samples)
    s1 = np.sin(t)
    s2 = np.ceil(np.sin(np.pi * t))
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing matrix
    mixing = rng.randn(6, 2)
    m = np.dot(mixing, s)

    if add_noise:
        m += 0.1 * rng.randn(6, n_samples)

    center_and_norm(m)

    k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
    s_ = s_.T

    # Check that the mixing model described in the docstring holds:
    assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))

    center_and_norm(s_)
    s1_, s2_ = s_
    # Check to see if the sources have been estimated
    # in the wrong order
    if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
        s2_, s1_ = s_
    s1_ *= np.sign(np.dot(s1_, s1))
    s2_ *= np.sign(np.dot(s2_, s2))

    # Check that we have estimated the original sources
    if not add_noise:
        assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
        assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
示例#4
0
def booty(ic,mtx,iternumfactor=4):
    """ 
    Calculates confidence of IC, and residues in IC via bootstrapping.
    
    ic is a matrix whos columns are indepdentent components.
    mtx is the pruned alignment matrix

    booty returns a single matrix of same size as ic, with each column
    containing values of confidences for each residue, which, when summed, 
    represents the confidence of the IC (between zero and one).
    
    iternumfactor determined the number of bootstraps as
    the times TIMES the number of columns in ic.

    The cluster can then be extracted from each column.
    """
    tokeep = ic.shape[1]
    numiter = int(iternumfactor*mtx.shape[1])
    icConf = np.zeros(ic.shape)
    
    # the appended t on variables stands for temp.
    for l in range(numiter):
        # generate new eigenspace.
        wtemp,vtemp = np.linalg.eigh(1.-pinfwrapper.infoDistance(resample(mtx)))
        idx = np.argsort(wtemp)
        vthresh = vtemp[:,idx[-tokeep:]]
        Kt,Wt,ICt = fastica(vthresh, n_components=tokeep, max_iter=20000, tol=.0001)
        
        # Now match and add to average.
        idmatch = matchic(ic,ICt)
        
        for g in range(tokeep):
            # for vectors this should be element-wise multiplications

            toadd = ICt[:,idmatch[g,1]]*ic[:,g]
            icConf[:,g] = icConf[:,g] + toadd*np.sign(np.sum(toadd))
        print("{} % Complete resampling".format(100*(l+1)/float(numiter)))

    return icConf/float(numiter)
def test_fastica_simple(add_noise, seed):
    # Test the FastICA algorithm on very simple data.
    rng = np.random.RandomState(seed)
    # scipy.stats uses the global RNG:
    n_samples = 1000
    # Generate two sources:
    s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
    s2 = stats.t.rvs(1, size=n_samples)
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing angle
    phi = 0.6
    mixing = np.array([[np.cos(phi), np.sin(phi)],
                       [np.sin(phi), -np.cos(phi)]])
    m = np.dot(mixing, s)

    if add_noise:
        m += 0.1 * rng.randn(2, 1000)

    center_and_norm(m)

    # function as fun arg
    def g_test(x):
        return x ** 3, (3 * x ** 2).mean(axis=-1)

    algos = ['parallel', 'deflation']
    nls = ['logcosh', 'exp', 'cube', g_test]
    whitening = [True, False]
    for algo, nl, whiten in itertools.product(algos, nls, whitening):
        if whiten:
            k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo,
                                      random_state=rng)
            assert_raises(ValueError, fastica, m.T, fun=np.tanh,
                          algorithm=algo)
        else:
            pca = PCA(n_components=2, whiten=True, random_state=rng)
            X = pca.fit_transform(m.T)
            k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False,
                                      random_state=rng)
            assert_raises(ValueError, fastica, X, fun=np.tanh,
                          algorithm=algo)
        s_ = s_.T
        # Check that the mixing model described in the docstring holds:
        if whiten:
            assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))

        center_and_norm(s_)
        s1_, s2_ = s_
        # Check to see if the sources have been estimated
        # in the wrong order
        if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
            s2_, s1_ = s_
        s1_ *= np.sign(np.dot(s1_, s1))
        s2_ *= np.sign(np.dot(s2_, s2))

        # Check that we have estimated the original sources
        if not add_noise:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
        else:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)

    # Test FastICA class
    _, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo,
                                random_state=seed)
    ica = FastICA(fun=nl, algorithm=algo, random_state=seed)
    sources = ica.fit_transform(m.T)
    assert_equal(ica.components_.shape, (2, 2))
    assert_equal(sources.shape, (1000, 2))

    assert_array_almost_equal(sources_fun, sources)
    assert_array_almost_equal(sources, ica.transform(m.T))

    assert_equal(ica.mixing_.shape, (2, 2))

    for fn in [np.tanh, "exp(-.5(x^2))"]:
        ica = FastICA(fun=fn, algorithm=algo)
        assert_raises(ValueError, ica.fit, m.T)

    assert_raises(TypeError, FastICA(fun=range(10)).fit, m.T)
示例#6
0
def test_fastica(add_noise=False):
    """ Test the FastICA algorithm on very simple data.
    """
    # scipy.stats uses the global RNG:
    rng = np.random.RandomState(0)
    n_samples = 1000
    # Generate two sources:
    s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
    s2 = stats.t.rvs(1, size=n_samples)
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing angle
    phi = 0.6
    mixing = np.array([[np.cos(phi),  np.sin(phi)],
                       [np.sin(phi), -np.cos(phi)]])
    m = np.dot(mixing, s)

    if add_noise:
        m += 0.1 * rng.randn(2, 1000)

    center_and_norm(m)

    algos = ['parallel', 'deflation']
    nls = ['logcosh', 'exp', 'cube']
    whitening = [True, False]
    for algo, nl, whiten in itertools.product(algos, nls, whitening):
        if whiten:
            k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
        else:
            X = PCA(n_components=2, whiten=True).fit_transform(m.T)
            k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo,
                                     whiten=False)
        s_ = s_.T
        # Check that the mixing model described in the docstring holds:
        if whiten:
            assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))

        center_and_norm(s_)
        s1_, s2_ = s_
        # Check to see if the sources have been estimated
        # in the wrong order
        if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
            s2_, s1_ = s_
        s1_ *= np.sign(np.dot(s1_, s1))
        s2_ *= np.sign(np.dot(s2_, s2))

        # Check that we have estimated the original sources
        if add_noise == False:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
        else:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)

    # Test FastICA class
    ica = FastICA(fun=nl, algorithm=algo, random_state=0)
    ica.fit(m.T)
    ica.get_mixing_matrix()
    assert_true(ica.components_.shape == (2, 2))
    assert_true(ica.sources_.shape == (1000, 2))
    def evaluate(self):
        """
        Compute the independent sources 
        """
        cls_attr_name = self.__class__.__name__+".time_series"
        self.time_series.trait["data"].log_debug(owner = cls_attr_name)
        
        ts_shape = self.time_series.data.shape
        
        #Need more observations than variables
        if ts_shape[0] < ts_shape[2]:
            msg = "ICA requires a longer timeseries (tpts > number of nodes)."
            LOG.error(msg)
            raise Exception, msg
            
        #Need more variables than components
        if self.n_components > ts_shape[2]:
            msg = "ICA requires more variables than components to extract (number of nodes > number of components)."
            LOG.error(msg)
            raise Exception, msg
        
        if self.n_components is None:
            self.n_components = ts_shape[2]
        
        #(n_components, n_components, state-variables, modes) --  unmixing matrix
        unmixing_matrix_shape = (self.n_components, self.n_components, ts_shape[1], ts_shape[3])
        LOG.info("unmixing matrix shape will be: %s" % str(unmixing_matrix_shape))
        
        # (n_components, nodes, state_variables, modes) -- prewhitening matrix
        prewhitening_matrix_shape = (self.n_components, ts_shape[2], ts_shape[1], ts_shape[3])
        LOG.info("prewhitening matrix shape will be: %s" % str(prewhitening_matrix_shape))
        
        
        unmixing_matrix = numpy.zeros(unmixing_matrix_shape)
        prewhitening_matrix = numpy.zeros(prewhitening_matrix_shape)
        
        
        #(tpts, n_components, state_variables, modes) -- unmixed sources time series
        data_ica = numpy.zeros((ts_shape[0], self.n_components, ts_shape[1], ts_shape[3]))
        
        #One un/mixing matrix for each state-var & mode.
        for mode in range(ts_shape[3]):
            for var in range(ts_shape[1]):
                # Assumes data must be whitened
                ica = fastica(self.time_series.data[:, var, :, mode], 
                                            n_components = self.n_components,
                                            whiten = True)
                # unmixed sources - component_time_series
                data_ica[:, :, var, mode] = ica[2]
                # prewhitening matrix
                prewhitening_matrix[:, :, var, mode] = ica[0]
                # unmixing matrix
                unmixing_matrix[:, :, var, mode] = ica[1]
        
        util.log_debug_array(LOG, prewhitening_matrix, "whitening_matrix")
        util.log_debug_array(LOG, unmixing_matrix, "unmixing_matrix")

        
        ica_result = mode_decompositions.IndependentComponents(source = self.time_series,
                                         component_time_series = data_ica, 
                                         #mixing_matrix = mixing_matrix,
                                         prewhitening_matrix = prewhitening_matrix,
                                         unmixing_matrix = unmixing_matrix,
                                         n_components = self.n_components, 
                                         use_storage = False)
        
        return ica_result
示例#8
0
def test_fastica_simple(add_noise, seed):
    # Test the FastICA algorithm on very simple data.
    rng = np.random.RandomState(seed)
    # scipy.stats uses the global RNG:
    n_samples = 1000
    # Generate two sources:
    s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
    s2 = stats.t.rvs(1, size=n_samples)
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing angle
    phi = 0.6
    mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi),
                                                    -np.cos(phi)]])
    m = np.dot(mixing, s)

    if add_noise:
        m += 0.1 * rng.randn(2, 1000)

    center_and_norm(m)

    # function as fun arg
    def g_test(x):
        return x**3, (3 * x**2).mean(axis=-1)

    algos = ['parallel', 'deflation']
    nls = ['logcosh', 'exp', 'cube', g_test]
    whitening = [True, False]
    for algo, nl, whiten in itertools.product(algos, nls, whitening):
        if whiten:
            k_, mixing_, s_ = fastica(m.T,
                                      fun=nl,
                                      algorithm=algo,
                                      random_state=rng)
            assert_raises(ValueError,
                          fastica,
                          m.T,
                          fun=np.tanh,
                          algorithm=algo)
        else:
            pca = PCA(n_components=2, whiten=True, random_state=rng)
            X = pca.fit_transform(m.T)
            k_, mixing_, s_ = fastica(X,
                                      fun=nl,
                                      algorithm=algo,
                                      whiten=False,
                                      random_state=rng)
            assert_raises(ValueError, fastica, X, fun=np.tanh, algorithm=algo)
        s_ = s_.T
        # Check that the mixing model described in the docstring holds:
        if whiten:
            assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))

        center_and_norm(s_)
        s1_, s2_ = s_
        # Check to see if the sources have been estimated
        # in the wrong order
        if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
            s2_, s1_ = s_
        s1_ *= np.sign(np.dot(s1_, s1))
        s2_ *= np.sign(np.dot(s2_, s2))

        # Check that we have estimated the original sources
        if not add_noise:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
        else:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)

    # Test FastICA class
    _, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=seed)
    ica = FastICA(fun=nl, algorithm=algo, random_state=seed)
    sources = ica.fit_transform(m.T)
    assert ica.components_.shape == (2, 2)
    assert sources.shape == (1000, 2)

    assert_array_almost_equal(sources_fun, sources)
    assert_array_almost_equal(sources, ica.transform(m.T))

    assert ica.mixing_.shape == (2, 2)

    for fn in [np.tanh, "exp(-.5(x^2))"]:
        ica = FastICA(fun=fn, algorithm=algo)
        assert_raises(ValueError, ica.fit, m.T)

    assert_raises(TypeError, FastICA(fun=range(10)).fit, m.T)
示例#9
0
文件: ica.py 项目: VolkanChen/jumeg
def ica_array(data_orig,
              explainedVar=1.0,
              overwrite=None,
              max_pca_components=None,
              method='infomax',
              cost_func='logcosh',
              weights=None,
              lrate=None,
              block=None,
              wchange=1e-16,
              annealdeg=60.,
              annealstep=0.9,
              n_subgauss=1,
              kurt_size=6000,
              maxsteps=200,
              verbose=True):
    """
    interface to perform (extended) Infomax ICA on a data array

        Parameters
        ----------
        data_orig : array of data to be decomposed [nchan, ntsl].
        explainedVar : float
            Value between 0 and 1; components will be selected by the
            cumulative percentage of explained variance.
        overwrite : if set the data array will be overwritten
            (this saves memory)
            default: overwrite=None
        max_pca_components : int | None
            The number of components used for PCA decomposition. If None, no
            dimension reduction will be applied and max_pca_components will equal
            the number of channels supplied on decomposing data.
        method : {'fastica', 'infomax', 'extended-infomax'}
          The ICA method to use. Defaults to 'infomax'.


        FastICA parameter:
        -----------------------------
        cost_func : String
             Cost function to use in FastICA algorithm. Could be
             either 'logcosh', 'exp' or 'cube'.


        (Extended) Infomax parameter:
        -----------------------------
        weights : initialize weights matrix
            default: None --> identity matrix is used
        lrate : initial learning rate (for most applications 1e-3 is
            a  good start)
            --> smaller learining rates will slowering the convergence
            it merely indicates the relative size of the change in weights
            default:  lrate = 0.010d/alog(nchan^2.0)
        block : his block size used to randomly extract (in time) a chop
            of data
            default:  block = floor(sqrt(ntsl/3d))
        wchange : iteration stops when weight changes are smaller then this
            number
            default: wchange = 1e-16
        annealdeg : if angle delta is larger then annealdeg (in degree) the
            learning rate will be reduce
            default:  annealdeg = 60
        annealstep : the learning rate will be reduced by this factor:
            lrate  *= annealstep
            default:  annealstep = 0.9
        extended : if set extended Infomax ICA is performed
            default: None
        n_subgauss : extended=int
            The number of subgaussian components. Only considered for extended
            Infomax.
            default: n_subgauss=1
        kurt_size : int
            The window size for kurtosis estimation. Only considered for extended
            Infomax.
            default: kurt_size=6000
        maxsteps : maximum number of iterations to be done
            default:  maxsteps = 200


        Returns
        -------
        weights : un-mixing matrix
        pca : instance of PCA
            Returns the instance of PCA where all information about the
            PCA decomposition are stored.
        activations : underlying sources
    """

    # -------------------------------------------
    # check overwrite option
    # -------------------------------------------
    if overwrite == None:
        data = data_orig.copy()
    else:
        data = data_orig

    # -------------------------------------------
    # perform centering and whitening of the data
    # -------------------------------------------
    if verbose:
        print "     ... perform centering and whitening ..."
    data, pca = whitening(data.T,
                          npc=max_pca_components,
                          explainedVar=explainedVar)

    # -------------------------------------------
    # now call ICA algortithm
    # -------------------------------------------
    # FastICA
    if method == 'fastica':
        from sklearn.decomposition import fastica
        _, unmixing_, sources_ = fastica(data,
                                         fun=cost_func,
                                         max_iter=maxsteps,
                                         tol=1e-4,
                                         whiten=True)
        activations = sources_.T
        weights = unmixing_

    # Infomax or extended Infomax
    else:
        if method == 'infomax':
            extended = False
        elif method == 'extended-infomax':
            extended = True
        else:
            print ">>>> WARNING: Entered ICA method not found!"
            print ">>>>          Allowed are fastica, extended-infomax and infomax"
            print ">>>>          Using now the default ICA method which is Infomax"
            extended = False

        weights = infomax(data,
                          weights=weights,
                          l_rate=lrate,
                          block=block,
                          w_change=wchange,
                          anneal_deg=annealdeg,
                          anneal_step=annealstep,
                          extended=extended,
                          n_subgauss=n_subgauss,
                          kurt_size=kurt_size,
                          max_iter=maxsteps,
                          verbose=verbose)
        activations = np.dot(weights, data.T)

    # return results
    return weights, pca, activations
示例#10
0
    def evaluate(self):
        """
        Compute the independent sources 
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        ts_shape = self.time_series.data.shape

        #Need more observations than variables
        if ts_shape[0] < ts_shape[2]:
            msg = "ICA requires a longer timeseries (tpts > number of nodes)."
            LOG.error(msg)
            raise Exception, msg

        #Need more variables than components
        if self.n_components > ts_shape[2]:
            msg = "ICA requires more variables than components to extract (number of nodes > number of components)."
            LOG.error(msg)
            raise Exception, msg

        if self.n_components is None:
            self.n_components = ts_shape[2]

        #(n_components, n_components, state-variables, modes) --  unmixing matrix
        unmixing_matrix_shape = (self.n_components, self.n_components,
                                 ts_shape[1], ts_shape[3])
        LOG.info("unmixing matrix shape will be: %s" %
                 str(unmixing_matrix_shape))

        # (n_components, nodes, state_variables, modes) -- prewhitening matrix
        prewhitening_matrix_shape = (self.n_components, ts_shape[2],
                                     ts_shape[1], ts_shape[3])
        LOG.info("prewhitening matrix shape will be: %s" %
                 str(prewhitening_matrix_shape))

        unmixing_matrix = numpy.zeros(unmixing_matrix_shape)
        prewhitening_matrix = numpy.zeros(prewhitening_matrix_shape)

        #(tpts, n_components, state_variables, modes) -- unmixed sources time series
        data_ica = numpy.zeros(
            (ts_shape[0], self.n_components, ts_shape[1], ts_shape[3]))

        #One un/mixing matrix for each state-var & mode.
        for mode in range(ts_shape[3]):
            for var in range(ts_shape[1]):
                # Assumes data must be whitened
                ica = fastica(self.time_series.data[:, var, :, mode],
                              n_components=self.n_components,
                              whiten=True)
                # unmixed sources - component_time_series
                data_ica[:, :, var, mode] = ica[2]
                # prewhitening matrix
                prewhitening_matrix[:, :, var, mode] = ica[0]
                # unmixing matrix
                unmixing_matrix[:, :, var, mode] = ica[1]

        util.log_debug_array(LOG, prewhitening_matrix, "whitening_matrix")
        util.log_debug_array(LOG, unmixing_matrix, "unmixing_matrix")

        ica_result = mode_decompositions.IndependentComponents(
            source=self.time_series,
            component_time_series=data_ica,
            #mixing_matrix = mixing_matrix,
            prewhitening_matrix=prewhitening_matrix,
            unmixing_matrix=unmixing_matrix,
            n_components=self.n_components,
            use_storage=False)

        return ica_result
示例#11
0
文件: ica.py 项目: l11x0m7/Paper
    totalSig = [gsigSin, gsigRect, gsigAngle, gsigNoise]
    # 混合信号X
    mixSig = []
    for i, majorSig in enumerate(totalSig):
        curSig = ica.mixSignal(majorSig,
                               *(totalSig[:i] + totalSig[i + 1:]),
                               drawable=False)
        mixSig.append(curSig)
    mixSig.append(mixSig[0] + np.random.random(mixSig[0].shape))
    mixSig = np.asarray(mixSig)

    # 以下是调用自己写的fastICA, 默认做了白化处理,不用白化效果貌似不太行
    xWhiten, V = ica.whiten(mixSig)
    # fun的选择和你假设的S的概率分布函数有关,一般假设为sigmoid函数, 则对应为tanh
    W = ica.fastICA(xWhiten, fun='tanh', n_component=4)
    recoverSig = np.dot(np.dot(W, V), mixSig)
    ica.draw(totalSig, 1)
    ica.draw(mixSig, 2)
    ica.draw(recoverSig, 3)
    ica.show()

    # 以下是调用sklearn包里面的fastICA
    # V对应白化处理的变换矩阵即Z = V * X, W对应S = W * Z
    V, W, S = fastica(mixSig.T, 4)
    # 不做白化处理的话就不用乘K
    # assert ((np.dot(np.dot(W, V), mixSig) - S.T) < 0.001).all()
    ica.draw(totalSig, 1)
    ica.draw(mixSig, 2)
    ica.draw(S.T, 3)
    ica.show()
示例#12
0
文件: ica.py 项目: VolkanChen/jumeg
def ica_array(data_orig, explainedVar=1.0, overwrite=None,
              max_pca_components=None, method='infomax',
              cost_func='logcosh', weights=None, lrate=None,
              block=None, wchange=1e-16, annealdeg=60.,
              annealstep=0.9, n_subgauss=1, kurt_size=6000,
              maxsteps=200, verbose=True):

    """
    interface to perform (extended) Infomax ICA on a data array

        Parameters
        ----------
        data_orig : array of data to be decomposed [nchan, ntsl].
        explainedVar : float
            Value between 0 and 1; components will be selected by the
            cumulative percentage of explained variance.
        overwrite : if set the data array will be overwritten
            (this saves memory)
            default: overwrite=None
        max_pca_components : int | None
            The number of components used for PCA decomposition. If None, no
            dimension reduction will be applied and max_pca_components will equal
            the number of channels supplied on decomposing data.
        method : {'fastica', 'infomax', 'extended-infomax'}
          The ICA method to use. Defaults to 'infomax'.


        FastICA parameter:
        -----------------------------
        cost_func : String
             Cost function to use in FastICA algorithm. Could be
             either 'logcosh', 'exp' or 'cube'.


        (Extended) Infomax parameter:
        -----------------------------
        weights : initialize weights matrix
            default: None --> identity matrix is used
        lrate : initial learning rate (for most applications 1e-3 is
            a  good start)
            --> smaller learining rates will slowering the convergence
            it merely indicates the relative size of the change in weights
            default:  lrate = 0.010d/alog(nchan^2.0)
        block : his block size used to randomly extract (in time) a chop
            of data
            default:  block = floor(sqrt(ntsl/3d))
        wchange : iteration stops when weight changes are smaller then this
            number
            default: wchange = 1e-16
        annealdeg : if angle delta is larger then annealdeg (in degree) the
            learning rate will be reduce
            default:  annealdeg = 60
        annealstep : the learning rate will be reduced by this factor:
            lrate  *= annealstep
            default:  annealstep = 0.9
        extended : if set extended Infomax ICA is performed
            default: None
        n_subgauss : extended=int
            The number of subgaussian components. Only considered for extended
            Infomax.
            default: n_subgauss=1
        kurt_size : int
            The window size for kurtosis estimation. Only considered for extended
            Infomax.
            default: kurt_size=6000
        maxsteps : maximum number of iterations to be done
            default:  maxsteps = 200


        Returns
        -------
        weights : un-mixing matrix
        pca : instance of PCA
            Returns the instance of PCA where all information about the
            PCA decomposition are stored.
        activations : underlying sources
    """



    # -------------------------------------------
    # check overwrite option
    # -------------------------------------------
    if overwrite == None:
        data = data_orig.copy()
    else:
        data = data_orig


    # -------------------------------------------
    # perform centering and whitening of the data
    # -------------------------------------------
    if verbose:
        print "     ... perform centering and whitening ..."
    data, pca = whitening(data.T, npc=max_pca_components, explainedVar=explainedVar)


    # -------------------------------------------
    # now call ICA algortithm
    # -------------------------------------------
    # FastICA
    if method == 'fastica':
        from sklearn.decomposition import fastica
        _, unmixing_, sources_ = fastica(data, fun=cost_func, max_iter=maxsteps, tol=1e-4,
                                         whiten=True)
        activations = sources_.T
        weights = unmixing_

    # Infomax or extended Infomax
    else:
        if method == 'infomax':
            extended = False
        elif method == 'extended-infomax':
            extended = True
        else:
            print ">>>> WARNING: Entered ICA method not found!"
            print ">>>>          Allowed are fastica, extended-infomax and infomax"
            print ">>>>          Using now the default ICA method which is Infomax"
            extended = False

        weights = infomax(data, weights=weights, l_rate=lrate, block=block,
                          w_change=wchange, anneal_deg=annealdeg, anneal_step=annealstep,
                          extended=extended, n_subgauss=n_subgauss, kurt_size=kurt_size,
                          max_iter=maxsteps, verbose=verbose)
        activations = np.dot(weights, data.T)

    # return results
    return weights, pca, activations
示例#13
0
picard_mix = np.linalg.pinv(W @ K)
fitted_A__ = smica.A.dot(picard_mix)


brain_sources = ica_mne.compute_sources(
    raw.get_data(picks=picks), method="pinv"
)
K, W, _ = picard(brain_sources)
picard_mix = np.linalg.pinv(W @ K)
fitted_A_ = smica.A.dot(picard_mix)


brain_sources = ica_mne.compute_sources(
    raw.get_data(picks=picks), method="pinv"
)
K, W, _ = fastica(brain_sources.T)
picard_mix = np.linalg.pinv(W @ K)
fastica_ = smica.A.dot(picard_mix)


gofs = dipolarity(smica.A, raw, picks)[0]
gofj = dipolarity(jdiag.A, raw, picks)[0]
gofi = dipolarity(ica_mne.A, raw, picks)[0]
gofsi = dipolarity(Asi, raw, picks)[0]
gofp = dipolarity(fitted_A_, raw, picks)[0]
gof_ss = dipolarity(fitted_A__, raw, picks)[0]
goff = dipolarity(fastica_, raw, picks)[0]
gof_subspace = dipolarity(fitted_A, raw, picks)[0]
plt.figure()
# plt.plot(np.sort(gofs), label='smica')
plt.plot(np.sort(gofj), label="jdiag")