コード例 #1
0
def compute_decomposition(algo_name, subject, n_components, decomp_num,
                          n_decomp):
    runs = [6, 10, 14]  # motor imagery: hands vs feet
    filename = '/home/pierre/work/smica/dataset/%s.set' % SUBJECTS[subject]
    epochs = mne.io.read_epochs_eeglab(filename, verbose='CRITICAL')
    epochs.set_channel_types({'LEYE': 'eog', 'REYE': 'eog'})
    epochs.filter(2, 60)
    picks = mne.pick_types(epochs.info,
                           meg=False,
                           eeg=True,
                           eog=False,
                           stim=False,
                           exclude='bads')
    n_bins = 40
    freqs = np.linspace(2, 60, n_bins + 1)
    if algo_name == 'smica':
        algorithm = ICA(n_components=n_components, freqs=freqs, rng=0)
        algo_args = dict(em_it=100000, tol=1e-8, n_it_min=100000)
        mixing = standard_mixing(epochs, picks, algorithm, algo_args)
    if algo_name == 'jdiag':
        algorithm = JDIAG_mne(n_components=n_components, freqs=freqs, rng=0)
        algo_args = dict(max_iter=1000, tol=1e-10)
        mixing = standard_mixing(epochs, picks, algorithm, algo_args)
    if algo_name == 'sobi':
        algorithm = SOBI_mne(1000,
                             n_components=n_components,
                             freqs=freqs,
                             rng=0)
        algo_args = dict()
        mixing = standard_mixing(epochs, picks, algorithm, algo_args)
    if algo_name == 'infomax':
        jdiag = JDIAG_mne(n_components=n_components, freqs=freqs, rng=0)
        jdiag.fit(epochs, picks, max_iter=10)
        sources = jdiag.compute_sources(method='pinv')
        K, W, _ = picard(sources, max_iter=1000, ortho=False)
        picard_mix = np.linalg.pinv(np.dot(W, K))
        mixing = np.dot(jdiag.A, picard_mix)
    if algo_name in ['pinv_infomax', 'wiener_infomax']:
        smica = ICA(n_components=n_components, freqs=freqs, rng=0)
        algo_args = dict(em_it=100000, tol=1e-8, n_it_min=100000)
        smica.fit(epochs, picks, corr=False, **algo_args)
        method = {
            'pinv_infomax': 'pinv',
            'wiener_infomax': 'wiener'
        }[algo_name]
        sources = smica.compute_sources(method=method)
        K, W, _ = picard(sources, max_iter=1000, ortho=False)
        picard_mix = np.linalg.pinv(np.dot(W, K))
        mixing = np.dot(smica.A, picard_mix)
    gof, _, _ = dipolarity(mixing, epochs, picks)
    print(decomp_num, n_decomp)
    return gof
コード例 #2
0
ファイル: test_solver.py プロジェクト: RomainBrault/picard
def test_bad_custom_density():
    class CustomDensity(object):
        def log_lik(self, Y):
            return Y**4 / 4

        def score_and_der(self, Y):
            return Y**3, 3 * Y**2 + 2.

    fun = CustomDensity()
    X = np.random.randn(2, 10)
    try:
        picard(X, fun=fun, random_state=0)
    except AssertionError:
        pass
    else:
        raise (AssertionError, 'Bad function undetected')
コード例 #3
0
ファイル: base.py プロジェクト: Risitop/Stabilized_ICA
def _ICA_decomposition(X, dict_params, method, max_iter):
    """ Apply FastICA or infomax (picard package) algorithm to the matrix X to solve the ICA problem.
               
    Parameters
    ----------
    X : 2D array-like, shape (n_observations , n_components) 
        Whitened matrix.
        
    dict_params : dict
        dictionary of keyword arguments for the functions FastICA or picard. See _check algorithm.
        
    method : str {'picard' , 'fastica'}
        python algorithm to solve the ICA problem. Either FastICA from scikit-learn or infomax and its extensions
        from picard package. See _check_algorithm.
        
    max_iter : int
        see https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.FastICA.html

    Returns
    -------
    2D array , shape (n_components , n_observations)
        components obtained from the ICA decomposition of X

    """
    if method == 'picard':
        _, _, S = picard(X.T,
                         max_iter=max_iter,
                         whiten=False,
                         centering=False,
                         **dict_params)
    else:
        ica = FastICA(max_iter=max_iter, whiten=False, **dict_params)
        S = ica.fit_transform(X).T
    return S
コード例 #4
0
ファイル: test_solver.py プロジェクト: RomainBrault/picard
def test_dots():
    N, T = 5, 100
    rng = np.random.RandomState(42)
    S = rng.laplace(size=(N, T))
    A = rng.randn(N, N)
    X = np.dot(A, S)
    n_components = [N, 3]
    tf = [False, True]
    w_inits = [None, 'id']
    for n_component, ortho, whiten, w_init in product(n_components, tf, tf,
                                                      w_inits):
        if w_init == 'id':
            if whiten:
                w_init = np.eye(n_component)
            else:
                w_init = np.eye(N)
        with warnings.catch_warnings(record=True):
            K, W, Y, X_mean = picard(X.copy(),
                                     ortho=ortho,
                                     whiten=whiten,
                                     return_X_mean=True,
                                     w_init=w_init,
                                     n_components=n_component,
                                     random_state=rng,
                                     max_iter=2,
                                     verbose=False)
        if not whiten:
            K = np.eye(N)
        if ortho and whiten:
            assert_allclose(Y.dot(Y.T) / T, np.eye(n_component), atol=1e-8)
        Y_prime = np.dot(W, K).dot(X - X_mean[:, None])
        assert_allclose(Y, Y_prime, atol=1e-7)
コード例 #5
0
ファイル: test_solver.py プロジェクト: RomainBrault/picard
def test_pre_fastica():
    N, T = 3, 1000
    rng = np.random.RandomState(42)
    names = ['tanh', 'cube']
    for j, fun in enumerate([Tanh(params=dict(alpha=0.5)), 'cube']):
        if j == 0:
            S = rng.laplace(size=(N, T))
        else:
            S = rng.uniform(low=-1, high=1, size=(N, T))
        A = rng.randn(N, N)
        X = np.dot(A, S)
        K, W, Y = picard(X.copy(),
                         fun=fun,
                         ortho=False,
                         random_state=0,
                         fastica_it=10)
        if fun == 'tanh':
            fun = Tanh()
        elif fun == 'exp':
            fun = Exp()
        elif fun == 'cube':
            fun = Cube()
        # Get the final gradient norm
        psiY = fun.score_and_der(Y)[0]
        G = np.inner(psiY, Y) / float(T) - np.eye(N)
        err_msg = 'fun %s, gradient norm greater than tol' % names[j]
        assert_allclose(G, np.zeros((N, N)), atol=1e-7, err_msg=err_msg)
        assert_equal(Y.shape, X.shape)
        assert_equal(W.shape, A.shape)
        assert_equal(K.shape, A.shape)
        WA = W.dot(K).dot(A)
        WA = permute(WA)  # Permute and scale
        err_msg = 'fun %s, wrong unmixing matrix' % names[j]
        assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1, err_msg=err_msg)
コード例 #6
0
ファイル: test_solver.py プロジェクト: RomainBrault/picard
def test_picardo():
    N, T = 3, 2000
    rng = np.random.RandomState(4)
    S = rng.laplace(size=(N, T))
    A = rng.randn(N, N)
    X = np.dot(A, S)
    names = ['tanh', 'exp', 'cube']
    for fastica_it in [None, 2]:
        for fun in names:
            print(fun)
            K, W, Y = picard(X.copy(),
                             fun=fun,
                             ortho=True,
                             random_state=rng,
                             fastica_it=fastica_it,
                             verbose=True)
            if fun == 'tanh':
                fun = Tanh()
            elif fun == 'exp':
                fun = Exp()
            elif fun == 'cube':
                fun = Cube()
            # Get the final gradient norm
            psiY = fun.score_and_der(Y)[0]
            G = np.inner(psiY, Y) / float(T) - np.eye(N)
            G = (G - G.T) / 2.  # take skew-symmetric part
            err_msg = 'fun %s, gradient norm greater than tol' % fun
            assert_allclose(G, np.zeros((N, N)), atol=1e-7, err_msg=err_msg)
            assert_equal(Y.shape, X.shape)
            assert_equal(W.shape, A.shape)
            assert_equal(K.shape, A.shape)
            WA = W.dot(K).dot(A)
            WA = permute(WA)  # Permute and scale
            err_msg = 'fun %s, wrong unmixing matrix' % fun
            assert_allclose(WA, np.eye(N), rtol=0, atol=0.1, err_msg=err_msg)
コード例 #7
0
ファイル: test_solver.py プロジェクト: arokem/picard
def test_no_regression():
    n_tests = 10
    baseline = {}
    baseline['lap', True] = 17.
    baseline['lap', False] = 23.
    baseline['gauss', True] = 58.
    baseline['gauss', False] = 60.
    N, T = 10, 1000
    for mode in ['lap', 'gauss']:
        for ortho in [True, False]:
            n_iters = []
            for i in range(n_tests):
                rng = np.random.RandomState(i)
                if mode == 'lap':
                    S = rng.laplace(size=(N, T))
                else:
                    S = rng.randn(N, T)
                A = rng.randn(N, N)
                X = np.dot(A, S)
                _, _, _, n_iter = picard(X,
                                         return_n_iter=True,
                                         ortho=ortho,
                                         random_state=rng)
                n_iters.append(n_iter)
            n_mean = np.mean(n_iters)
            nb_mean = baseline[mode, ortho]
            err_msg = 'mode=%s, ortho=%s. %d iterations, expecting <%d.'
            assert n_mean < nb_mean, err_msg % (mode, ortho, n_mean, nb_mean)
コード例 #8
0
ファイル: groupica.py プロジェクト: DawnSmithaa/mvlearn
    def fit(self, Xs, y=None):
        r"""Fit to the data.

        Estimate the parameters of the model

        Parameters
        ----------
        Xs : list of array-likes or numpy.ndarray
            - Xs length: n_views
            - Xs[i] shape: (n_samples, n_features_i)

        y : None
            Ignored variable.

        Returns
        -------
        self : object
            Returns the instance itself.
        """
        Xs = check_Xs(Xs, copy=True)
        self.means_ = [np.mean(X, axis=0) for X in Xs]
        gpca = GroupPCA(
            n_components=self.n_components,
            n_individual_components=self.n_individual_components,
            prewhiten=self.prewhiten,
            whiten=True,
            multiview_output=False,
            random_state=self.random_state,
        )
        X_pca = gpca.fit_transform(Xs)
        self.grouppca_ = gpca
        if self.solver == "fastica":
            K, W, sources = fastica(X_pca,
                                    **self.ica_kwargs,
                                    random_state=self.random_state)
        else:
            K, W, sources = picard(X_pca.T,
                                   **self.ica_kwargs,
                                   random_state=self.random_state)
            sources = sources.T
        if K is not None:
            self.components_ = np.dot(W, K)
        else:
            self.components_ = W
        self.mixing_ = linalg.pinv(self.components_)
        # Compute individual unmixing matrices by least-squares
        self.individual_mixing_ = []
        self.individual_components_ = []
        sources_pinv = linalg.pinv(sources)
        for X, mean in zip(Xs, self.means_):
            lstq_solution = np.dot(sources_pinv, X - mean)
            self.individual_components_.append(linalg.pinv(lstq_solution).T)
            self.individual_mixing_.append(lstq_solution.T)
        self.n_components_ = gpca.n_components_
        self.n_features_ = gpca.n_features_
        self.n_samples_ = gpca.n_samples_
        self.n_views_ = gpca.n_views_
        return self
コード例 #9
0
ファイル: test_solver.py プロジェクト: RomainBrault/picard
def test_dimension_reduction():
    N, T = 5, 1000
    n_components = 3
    rng = np.random.RandomState(42)
    S = rng.laplace(size=(N, T))
    A = rng.randn(N, N)
    X = np.dot(A, S)
    K, W, Y = picard(X.copy(),
                     n_components=n_components,
                     ortho=False,
                     random_state=rng,
                     max_iter=2)
    assert_equal(K.shape, (n_components, N))
    assert_equal(W.shape, (n_components, n_components))
    assert_equal(Y.shape, (n_components, T))
    with warnings.catch_warnings(record=True) as w:
        K, W, Y = picard(X.copy(),
                         n_components=n_components,
                         ortho=False,
                         whiten=False,
                         max_iter=1)
        assert len(w) == 2
コード例 #10
0
ファイル: test_solver.py プロジェクト: RomainBrault/picard
def test_shift():
    N, T = 5, 1000
    rng = np.random.RandomState(42)
    S = rng.laplace(size=(N, T))
    A = rng.randn(N, N)
    offset = rng.randn(N)
    X = np.dot(A, S) + offset[:, None]
    _, W, Y, X_mean = picard(X.copy(),
                             ortho=False,
                             whiten=False,
                             return_X_mean=True,
                             random_state=rng)
    assert_allclose(offset, X_mean, rtol=0, atol=0.2)
    WA = W.dot(A)
    WA = permute(WA)
    assert_allclose(WA, np.eye(N), rtol=0, atol=0.2)
    _, W, Y, X_mean = picard(X.copy(),
                             ortho=False,
                             whiten=False,
                             centering=False,
                             return_X_mean=True,
                             random_state=rng)
    assert_allclose(X_mean, 0)
コード例 #11
0
    def _single_view_fit(self, X):
        Ki, Wi, Si = picard(
            X,
            ortho=False,
            extended=False,
            centering=False,
            max_iter=self.max_iter,
            tol=self.tol,
            random_state=self.random_state,
        )
        scale = np.linalg.norm(Si, axis=1)
        Si = Si / scale[:, None]
        Wi = np.dot(Wi, Ki) / scale[:, None]

        return Si, Wi
コード例 #12
0
ファイル: test_solver.py プロジェクト: agramfort/picard
def test_picard():
    N, T = 2, 10000
    rng = np.random.RandomState(42)
    S = rng.laplace(size=(N, T))
    A = rng.randn(N, N)
    X = np.dot(A, S)
    for precon in [1, 2]:
        Y, W = picard(X, precon=precon, verbose=True)
        # Get the final gradient norm
        G = np.inner(np.tanh(Y / 2.), Y) / float(T) - np.eye(N)
        assert_allclose(G, np.zeros((N, N)), atol=1e-7)
        assert_equal(Y.shape, X.shape)
        assert_equal(W.shape, A.shape)
        WA = np.dot(W, A)
        WA = get_perm(WA)[1]  # Permute and scale
        assert_allclose(WA, np.eye(N), rtol=1e-2, atol=1e-2)
コード例 #13
0
    def fit(self, Xs, y=None):
        r"""
        Fits the model to the views Xs.

        Parameters
        ----------
        Xs : list of array-likes or numpy.ndarray
            - Xs length: n_views
            - Xs[i] shape: (n_samples, n_features_i)
            Training data to recover a source and unmixing matrices from.
        y : ignored

        Returns
        -------
        self : returns an instance of itself.
        """
        P, Xs = _reduce_data(
            Xs, self.n_components, self.n_jobs
        )
        Xs = np.asarray([X.T for X in Xs])
        n_pb, p, n = Xs.shape
        Xs_concat = np.vstack(Xs)
        U, S, V = np.linalg.svd(Xs_concat, full_matrices=False)
        U = U[:, :p]
        S = S[:p]
        V = V[:p]
        Xs_reduced = np.diag(S).dot(V)
        K, W, S = picard(
            Xs_reduced,
            ortho=False,
            extended=False,
            centering=False,
            max_iter=self.max_iter,
            tol=self.tol,
            random_state=self.random_state,
        )
        scale = np.linalg.norm(S, axis=1)
        S = S / scale[:, None]
        W = np.array([S.dot(np.linalg.pinv(X)) for X in Xs])
        self.components_ = P
        self.unmixings_ = np.swapaxes(W, 1, 2)
        self.source_ = S.T

        return self
コード例 #14
0
ファイル: test_solver.py プロジェクト: arokem/picard
def test_extended():
    N, T = 4, 2000
    n = N // 2
    rng = np.random.RandomState(42)

    S = np.concatenate(
        (rng.laplace(size=(n, T)), rng.uniform(low=-1, high=1, size=(n, T))),
        axis=0)
    print(S.shape)
    A = rng.randn(N, N)
    X = np.dot(A, S)
    K, W, Y = picard(X, ortho=False, random_state=0, extended=True)
    assert Y.shape == X.shape
    assert W.shape == A.shape
    assert K.shape == A.shape
    WA = W.dot(K).dot(A)
    WA = permute(WA)  # Permute and scale
    err_msg = 'wrong unmixing matrix'
    assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1, err_msg=err_msg)
コード例 #15
0
def ica_find_rotation(basis, n_subjects_ica, random_state):
    """
    Finds rotation r such that
    r.dot(srm.basis_list[0]) is the appropriate basis

    Parameters
    ----------

    basis: list of array

    n_subjects_ica: int
        Number of randomly selected subject used to fit ica

    transpose: bool
        if False: basis[i] has shape [n_components, n_voxels]
        if True: basis[i] has shape [n_voxels, n_components]
    """
    if n_subjects_ica == 0:
        raise ValueError("ICA is used to find optimal rotation but \
        n_subjects_ica == 0. Please set a positive value for n_subjects_ica")

    if n_subjects_ica is None:
        index = np.arange(len(basis))
        n_subjects_ica = len(basis)
    else:
        index = random_state.choice(np.arange(len(basis)),
                                    size=n_subjects_ica,
                                    replace=False)

    used_basis = []
    for i in index:
        basis_i = safe_load(basis[i])
        used_basis.append(basis_i)

    used_basis = np.concatenate(used_basis, axis=1) / np.sqrt(n_subjects_ica)

    n_features, n_samples = used_basis.shape
    used_basis = used_basis * np.sqrt(n_samples)
    K, W, Y = picard(used_basis, whiten=False, max_iter=1000)
    return W
コード例 #16
0
ファイル: plot_ica_synth.py プロジェクト: stralu/picard
n_samples = 2000
time = np.linspace(0, 8, n_samples)

s1 = np.sin(2 * time) * np.sin(40 * time)
s2 = np.sin(3 * time)**5
s3 = np.random.laplace(size=s1.shape)

S = np.c_[s1, s2, s3].T

S /= S.std(axis=1)[:, np.newaxis]  # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]])  # Mixing matrix
X = np.dot(A, S)  # Generate observations

# Compute ICA
_, _, Y_picard = picard(X, ortho=False, random_state=0)
_, _, Y_picardo = picard(X, ortho=True, random_state=0)

###############################################################################
# Plot results

models = [X, S, Y_picard, Y_picardo]
names = [
    'Observations (mixed signal)', 'True Sources',
    'ICA recovered signals with Picard', 'ICA recovered signals with Picard-O'
]
colors = ['red', 'steelblue', 'orange']

for ii, (model, name) in enumerate(zip(models, names), 1):
    fig, axes = plt.subplots(3, 1, figsize=(6, 4), sharex=True, sharey=True)
    plt.suptitle(name)
コード例 #17
0
ファイル: plot_ica_eeg.py プロジェクト: kwayeke/picard
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'

raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 40, n_jobs=1)  # 1Hz high pass is often helpful for fitting ICA

picks = mne.pick_types(raw.info, meg=False, eeg=True, eog=False,
                       stim=False, exclude='bads')

random_state = 0
data = raw[picks, :][0]
data = data[:, ::2]  # decimate a bit

###############################################################################
# Run ICA on data, after reducing the dimension

K, W, Y = picard(data, n_components=30, ortho=True, random_state=0)

###############################################################################
# Plot results

n_plots = 10
T_plots = 1000
order = np.argsort(kurtosis(Y[:, :T_plots], axis=1))[::-1]
models = [data[:n_plots], Y[order[:n_plots][::-1]]]
names = ['Observations (raw EEG)',
         'ICA recovered sources']

fig, axes = plt.subplots(2, 1, figsize=(7, 7))
for ii, (model, name, ax) in enumerate(zip(models, names, axes)):
    ax.set_title(name)
    ax.get_xaxis().set_visible(False)
コード例 #18
0
ファイル: ica.py プロジェクト: Kotzly/BCI_MsC
    def _fit(self, data, fit_type):
        """Aux function."""
        random_state = check_random_state(self.random_state)
        n_channels, n_samples = data.shape
        self._compute_pre_whitener(data)
        data = self._pre_whiten(data)

        pca = _PCA(n_components=self._max_pca_components, whiten=True)
        data = pca.fit_transform(data.T)
        use_ev = pca.explained_variance_ratio_
        n_pca = self.n_pca_components
        if isinstance(n_pca, float):
            n_pca = int(_exp_var_ncomp(use_ev, n_pca)[0])
        elif n_pca is None:
            n_pca = len(use_ev)
        assert isinstance(n_pca, (int, np.int_))

        # If user passed a float, select the PCA components explaining the
        # given cumulative variance. This information will later be used to
        # only submit the corresponding parts of the data to ICA.
        if self.n_components is None:
            # None case: check if n_pca_components or 0.999999 yields smaller
            msg = "Selecting by non-zero PCA components"
            self.n_components_ = min(n_pca,
                                     _exp_var_ncomp(use_ev, 0.999999)[0])
        elif isinstance(self.n_components, float):
            self.n_components_, ev = _exp_var_ncomp(use_ev, self.n_components)
            if self.n_components_ == 1:
                raise RuntimeError(
                    "One PCA component captures most of the "
                    f"explained variance ({100 * ev}%), your threshold "
                    "results in 1 component. You should select "
                    "a higher value.")
            msg = "Selecting by explained variance"
        else:
            msg = "Selecting by number"
            self.n_components_ = _ensure_int(self.n_components)
        # check to make sure something okay happened
        if self.n_components_ > n_pca:
            ev = np.cumsum(use_ev)
            ev /= ev[-1]
            evs = 100 * ev[[self.n_components_ - 1, n_pca - 1]]
            raise RuntimeError(
                f"n_components={self.n_components} requires "
                f"{self.n_components_} PCA values (EV={evs[0]:0.1f}%) but "
                f"n_pca_components ({self.n_pca_components}) results in "
                f"only {n_pca} components (EV={evs[1]:0.1f}%)")
        logger.info("%s: %s components" % (msg, self.n_components_))

        # the things to store for PCA
        self.pca_mean_ = pca.mean_
        self.pca_components_ = pca.components_
        self.pca_explained_variance_ = pca.explained_variance_
        del pca
        # update number of components
        self._update_ica_names()
        if self.n_pca_components is not None and self.n_pca_components > len(
                self.pca_components_):
            raise ValueError(
                f"n_pca_components ({self.n_pca_components}) is greater than "
                f"the number of PCA components ({len(self.pca_components_)})")

        # take care of ICA
        sel = slice(0, self.n_components_)
        if self.method == "fastica":
            from sklearn.decomposition import FastICA

            ica = FastICA(whiten=False,
                          random_state=random_state,
                          **self.fit_params)
            ica.fit(data[:, sel])
            self.unmixing_matrix_ = ica.components_
            self.n_iter_ = ica.n_iter_
        elif self.method in ("infomax", "extended-infomax"):
            unmixing_matrix, n_iter = infomax(
                data[:, sel],
                random_state=random_state,
                return_n_iter=True,
                **self.fit_params,
            )
            self.unmixing_matrix_ = unmixing_matrix
            self.n_iter_ = n_iter
            del unmixing_matrix, n_iter
        elif self.method == "picard":
            from picard import picard

            _, W, _, n_iter = picard(
                data[:, sel].T,
                whiten=False,
                return_n_iter=True,
                random_state=random_state,
                **self.fit_params,
            )
            self.unmixing_matrix_ = W
            self.n_iter_ = n_iter + 1  # picard() starts counting at 0
            del _, n_iter
        elif self.method in _coro_kwargs_dict:
            kwargs = _coro_kwargs_dict[self.method]
            coroica_constructor = UwedgeICA if self.method != "coro" else CoroICA
            coroica = coroica_constructor(n_components=self.n_components,
                                          **kwargs)
            coroica.fit(data[:, sel])
            self.unmixing_matrix_ = coroica.V_
            self.n_iter_ = coroica.n_iter_ + 1

        elif self.method in _jade_kwargs_dict:
            kwargs = _jade_kwargs_dict[self.method]
            jade_ica = JadeICA(self.n_components, **kwargs)
            jade_ica.fit(data[:, sel].T)
            self.unmixing_matrix_ = jade_ica.B
            self.n_iter_ = jade_ica.n_iter + 1

        assert self.unmixing_matrix_.shape == (self.n_components_, ) * 2
        norms = self.pca_explained_variance_
        stable = norms / norms[0] > 1e-6  # to be stable during pinv
        norms = norms[:self.n_components_]
        if not stable[self.n_components_ - 1]:
            max_int = np.where(stable)[0][-1] + 1
            warn(f"Using n_components={self.n_components} (resulting in "
                 f"n_components_={self.n_components_}) may lead to an "
                 f"unstable mixing matrix estimation because the ratio "
                 f"between the largest ({norms[0]:0.2g}) and smallest "
                 f"({norms[-1]:0.2g}) variances is too large (> 1e6); "
                 f"consider setting n_components=0.999999 or an "
                 f"integer <= {max_int}")
        norms = np.sqrt(norms)
        norms[norms == 0] = 1.0
        self.unmixing_matrix_ /= norms  # whitening
        self._update_mixing_matrix()
        self.current_fit = fit_type
コード例 #19
0
ファイル: matDecomposition.py プロジェクト: MLBeginners/spkit
    def fit(self, X, normalize=False):
        """Run the ICA decomposition on X.


        X = array like: Shape (nf,ns) or (nCh, nSamples)

        """

        if self.max_pca_components is None:
            self.max_pca_components = X.shape[0]

        self.n_samples_ = X.shape[1]

        Xw, self.whitener_ = self.whitening(X)

        from sklearn.decomposition import PCA

        if not check_version('sklearn', '0.18'):
            pca = PCA(n_components=self.max_pca_components,
                      whiten=True,
                      copy=True)
        else:
            pca = PCA(n_components=self.max_pca_components,
                      whiten=True,
                      copy=True,
                      svd_solver='full')

        Xpca = pca.fit_transform(Xw.T)

        self.pca_mean_ = pca.mean_
        self.pca_components_ = pca.components_
        self.pca_explained_variance_ = exp_var = pca.explained_variance_
        if not check_version('sklearn', '0.16'):
            # sklearn < 0.16 did not apply whitening to the components, so we
            # need to do this manually
            self.pca_components_ *= np.sqrt(exp_var[:, None])
        del pca

        if self.method == 'fastica':
            from sklearn.decomposition import FastICA
            ica = FastICA(whiten=False,
                          random_state=self.random_state,
                          **self.fit_params)
            ica.fit(Xpca)
            self.unmixing_matrix_ = ica.components_

        elif self.method in ('infomax', 'extended-infomax'):
            self.unmixing_matrix_ = infomax(Xpca,
                                            random_state=self.random_state,
                                            **self.fit_params)

        elif self.method == 'picard':
            from picard import picard
            _, W, _ = picard(Xpca.T,
                             whiten=False,
                             random_state=self.random_state,
                             **self.fit_params)
            del _

            self.unmixing_matrix_ = W

        self.unmixing_matrix_ /= np.sqrt(exp_var)[None, :]  # whitening
        self.mixing_matrix_ = np.linalg.pinv(self.unmixing_matrix_)

        nf, ns = X.shape
        var = np.sum(self.mixing_matrix_**2, axis=0) * np.sum(
            X**2, axis=1) / (nf * ns - 1)
        if normalize:
            var /= var.sum()

        order = var.argsort()[::-1]
        self.mixing_matrix_ = self.mixing_matrix_[:, order]
        self.unmixing_matrix_ = self.unmixing_matrix_[order, :]
コード例 #20
0
def groupica(
    X,
    n_components=None,
    dimension_reduction="pca",
    max_iter=1000,
    random_state=None,
    tol=1e-7,
    ortho=False,
    extended=False,
):
    """
    Performs PCA on concatenated data across groups (ex: subjects)
    and apply ICA on reduced data.

    Parameters
    ----------
    X : np array of shape (n_groups, n_features, n_samples)
        Training vector, where n_groups is the number of groups,
        n_samples is the number of samples and
        n_components is the number of components.
    n_components : int, optional
        Number of components to extract.
        If None, no dimension reduction is performed
    dimension_reduction: str, optional
        if srm: use srm to reduce the data
        if pca: use group specific pca to reduce the data
    max_iter : int, optional
        Maximum number of iterations to perform
    random_state : int, RandomState instance or None, optional (default=None)
        Used to perform a random initialization. If int, random_state is
        the seed used by the random number generator; If RandomState
        instance, random_state is the random number generator; If
        None, the random number generator is the RandomState instance
        used by np.random.
    tol : float, optional
        A positive scalar giving the tolerance at which
        the un-mixing matrices are considered to have converged.
    ortho: bool, optional
        If True, uses Picard-O. Otherwise, uses the standard Picard.
    extended: None or bool, optional
        If True, uses the extended algorithm to separate sub and
        super-Gaussian sources.
        By default, True if ortho == True, False otherwise.

    Returns
    -------
    P : np array of shape (n_groups, n_components, n_features)
        P is the projection matrix that projects data in reduced space
    W : np array of shape (n_groups, n_components, n_components)
        Estimated un-mixing matrices
    S : np array of shape (n_components, n_samples)
        Estimated source


    See also
    --------
    permica
    multiviewica
    """
    P, X = reduce_data(X,
                       n_components=n_components,
                       dimension_reduction=dimension_reduction)
    n_pb, p, n = X.shape
    X_concat = np.vstack(X)
    U, S, V = randomized_svd(X_concat, n_components=p)
    X_reduced = np.diag(S).dot(V)
    U = np.split(U, n_pb, axis=0)
    K, W, S = picard(
        X_reduced,
        ortho=ortho,
        extended=extended,
        centering=False,
        max_iter=max_iter,
        tol=tol,
        random_state=random_state,
    )
    scale = np.linalg.norm(S, axis=1)
    S = S / scale[:, None]
    W = np.array([S.dot(np.linalg.pinv(x)) for x in X])
    return P, W, S
コード例 #21
0
# #
# #
# sobi = SOBI_mne(100, n_components, freqs, rng=0)
# sobi.fit(raw, picks=picks)
raw.filter(2, 70)
ica = ICA_mne(n_components=n_components, method='fastica', random_state=0)
ica.fit(raw, picks=picks)

ica_mne = transfer_to_ica(raw, picks, freqs,
                          ica.get_sources(raw).get_data(),
                          ica.get_components())

brain_sources = smica.compute_sources()
K, W, _ = picard(brain_sources,
                 ortho=False,
                 verbose=True,
                 random_state=0,
                 max_iter=1000)
picard_mix = np.linalg.pinv(W @ K)
A_wiener = smica.A.dot(picard_mix)
gof_wiener = dipolarity(A_wiener, raw, picks, fname_bem, n_jobs=3)[0]

brain_sources = smica.compute_sources(method='pinv')
K, W, _ = picard(brain_sources,
                 ortho=False,
                 verbose=True,
                 random_state=0,
                 max_iter=1000)
picard_mix = np.linalg.pinv(W @ K)
A_pinv = smica.A.dot(picard_mix)
gof_pinv = dipolarity(A_pinv, raw, picks, fname_bem, n_jobs=3)[0]
コード例 #22
0
data = data[:, ::2]  # decimate a bit

# Center
data -= np.mean(data, axis=1, keepdims=True)

# Apply PCA for dimension reduction and whitenning.

n_components = 30
pca = PCA(n_components=n_components, whiten=True, svd_solver='full')
pca.fit(data)

X = pca.components_ * np.sqrt(data.shape[1])

# Run ICA on X

Y, W = picard(X)

###############################################################################
# Plot results
###############################################################################

n_plots = 10
order = np.argsort(kurtosis(Y[:, :1000], axis=1))[::-1]
models = [data[:n_plots], Y[order[:n_plots][::-1]]]
names = ['Observations (raw EEG)', 'ICA recovered sources']
fig, axes = plt.subplots(2, 1, figsize=(7, 7))
for ii, (model, name, ax) in enumerate(zip(models, names, axes)):
    ax.set_title(name)
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
    offsets = np.max(model, axis=1) - np.min(model, axis=1)
コード例 #23
0
ファイル: lab02.py プロジェクト: HappyButter-WFiIS/WFiIS-IMN
import picard
import newton
import rk2

if __name__ == "__main__":
    picard.picard()
    newton.newton()
    rk2.rk2()
コード例 #24
0
###############################################################################
# Plot the corresponding functions

x = np.linspace(-2, 2, 100)
log_likelihood = custom_density.log_lik(x)
psi, psi_der = custom_density.score_and_der(x)

names = ['log-likelihood', 'score', 'score derivative']

plt.figure()
for values, name in zip([log_likelihood, psi, psi_der], names):
    plt.plot(x, values, label=name)
plt.legend()
plt.title("Custom density")
plt.show()

###############################################################################
# Run Picard on toy dataset using this density

rng = np.random.RandomState(0)
N, T = 5, 1000
S = rng.laplace(size=(N, T))
A = rng.randn(N, N)
X = np.dot(A, S)
K, W, Y = picard(X, fun=custom_density, random_state=0)
plt.figure()
plt.imshow(permute(W.dot(K).dot(A)), interpolation='nearest')
plt.title('Product between the estimated unmixing matrix and the mixing'
          'matrix')
plt.show()
コード例 #25
0
 def ortho(Wi):
     K, U, W_i = picard(Wi.T, centering=False)
     a = np.sqrt(W_i.dot(W_i.T)[0, 0])
     W_i = W_i / a
     return W_i.T
コード例 #26
0
def permica(
    X,
    n_components=None,
    dimension_reduction="pca",
    max_iter=1000,
    random_state=None,
    tol=1e-7,
):
    """
    Performs one ICA per group (ex: subject) and align sources
    using the hungarian algorithm.

    Parameters
    ----------
    X : np array of shape (n_groups, n_features, n_samples)
        Training vector, where n_groups is the number of groups,
        n_samples is the number of samples and
        n_components is the number of components.
    n_components : int, optional
        Number of components to extract.
        If None, no dimension reduction is performed
    dimension_reduction: str, optional
        if srm: use srm to reduce the data
        if pca: use group specific pca to reduce the data
    max_iter : int, optional
        Maximum number of iterations to perform
    random_state : int, RandomState instance or None, optional (default=None)
        Used to perform a random initialization. If int, random_state is
        the seed used by the random number generator; If RandomState
        instance, random_state is the random number generator; If
        None, the random number generator is the RandomState instance
        used by np.random.
    tol : float, optional
        A positive scalar giving the tolerance at which
        the un-mixing matrices are considered to have converged.

    Returns
    -------
    P : np array of shape (n_groups, n_components, n_features)
        K is the projection matrix that projects data in reduced space
    W : np array of shape (n_groups, n_components, n_components)
        Estimated un-mixing matrices
    S : np array of shape (n_components, n_samples)
        Estimated source
    """
    P, X = reduce_data(X,
                       n_components=n_components,
                       dimension_reduction=dimension_reduction)
    n_pb, p, n = X.shape
    W = np.zeros((n_pb, p, p))
    S = np.zeros((n_pb, p, n))
    for i, x in enumerate(X):
        Ki, Wi, Si = picard(
            x,
            ortho=False,
            extended=False,
            centering=False,
            max_iter=max_iter,
            tol=tol,
            random_state=random_state,
        )
        scale = np.linalg.norm(Si, axis=1)
        S[i] = Si / scale[:, None]
        W[i] = np.dot(Wi, Ki) / scale[:, None]
    orders, signs, S = _find_ordering(S)
    for i, (order, sign) in enumerate(zip(orders, signs)):
        W[i] = sign[:, None] * W[i][order, :]
    return P, W, S
コード例 #27
0
# #
# #
raw.filter(2, 70)
ica = ICA_mne(n_components=n_components, method='fastica', random_state=0)
ica.fit(raw, picks=picks)

ica_mne = transfer_to_ica(raw, picks, freqs,
                          ica.get_sources(raw).get_data(),
                          ica.get_components())

smica.plot_clusters(16)

source_clusters = [0, 2]
idx = np.where(np.logical_or(smica.labels == 0, smica.labels == 2))[0]
brain_sources = smica.compute_sources()[idx]
K, W, _ = picard(brain_sources)
picard_mix = np.linalg.pinv(W @ K)
brain_A = smica.A[:, idx]
fitted_A = brain_A.dot(picard_mix)
Asi = smica.A.copy()
Asi[:, idx] = fitted_A

brain_sources = smica.compute_sources()
K, W, _ = picard(brain_sources)
picard_mix = np.linalg.pinv(W @ K)
fitted_A = smica.A.dot(picard_mix)
brain_sources = smica.compute_sources(raw.get_data(picks=picks), method='pinv')
K, W, _ = picard(brain_sources)
picard_mix = np.linalg.pinv(W @ K)
fitted_A__ = smica.A.dot(picard_mix)