コード例 #1
0
ファイル: test_infomax.py プロジェクト: jdammers/mne-python
def test_infomax_weights_ini():
    """Test the infomax algorithm w/initial weights matrix."""
    X = np.random.random((3, 100))
    weights = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)

    w1 = infomax(X, max_iter=0, weights=weights, extended=True)
    w2 = infomax(X, max_iter=0, weights=weights, extended=False)

    assert_almost_equal(w1, weights)
    assert_almost_equal(w2, weights)
コード例 #2
0
def test_non_square_infomax():
    """ Test non-square infomax
    """
    from sklearn.decomposition import RandomizedPCA

    rng = np.random.RandomState(0)

    n_samples = 200
    # Generate two sources:
    t = np.linspace(0, 100, n_samples)
    s1 = np.sin(t)
    s2 = np.ceil(np.sin(np.pi * t))
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing matrix
    n_observed = 6
    mixing = rng.randn(n_observed, 2)
    for add_noise in (False, True):
        m = np.dot(mixing, s)

        if add_noise:
            m += 0.1 * rng.randn(n_observed, n_samples)

        center_and_norm(m)
        pca = RandomizedPCA(n_components=2, whiten=True, random_state=rng)
        m = m.T
        m = pca.fit_transform(m)
        # we need extended since input signals are sub-gaussian
        unmixing_ = infomax(m, random_state=rng, extended=True)
        s_ = np.dot(unmixing_, m.T)
        # Check that the mixing model described in the docstring holds:
        mixing_ = linalg.pinv(unmixing_.T)

        assert_almost_equal(m, s_.T.dot(mixing_))

        center_and_norm(s_)
        s1_, s2_ = s_
        # Check to see if the sources have been estimated
        # in the wrong order
        if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
            s2_, s1_ = s_
        s1_ *= np.sign(np.dot(s1_, s1))
        s2_ *= np.sign(np.dot(s2_, s2))

        # Check that we have estimated the original sources
        if not add_noise:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
コード例 #3
0
ファイル: test_infomax.py プロジェクト: agramfort/mne-python
def test_infomax_simple(add_noise=False):
    """ Test the infomax algorithm on very simple data.
    """
    from sklearn.decomposition import RandomizedPCA

    rng = np.random.RandomState(0)
    # scipy.stats uses the global RNG:
    np.random.seed(0)
    n_samples = 1000
    # Generate two sources:
    s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
    s2 = stats.t.rvs(1, size=n_samples)
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing angle
    phi = 0.6
    mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]])
    m = np.dot(mixing, s)

    if add_noise:
        m += 0.1 * rng.randn(2, 1000)

    center_and_norm(m)

    algos = [True, False]
    for algo in algos:
        X = RandomizedPCA(n_components=2, whiten=True).fit_transform(m.T)
        k_ = infomax(X, extended=algo)
        s_ = np.dot(k_, X.T)

        center_and_norm(s_)
        s1_, s2_ = s_
        # Check to see if the sources have been estimated
        # in the wrong order
        if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
            s2_, s1_ = s_
        s1_ *= np.sign(np.dot(s1_, s1))
        s2_ *= np.sign(np.dot(s2_, s2))

        # Check that we have estimated the original sources
        if not add_noise:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
        else:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
コード例 #4
0
def test_non_square_infomax():
    """Test non-square infomax."""
    rng = np.random.RandomState(0)

    n_samples = 200
    # Generate two sources:
    t = np.linspace(0, 100, n_samples)
    s1 = np.sin(t)
    s2 = np.ceil(np.sin(np.pi * t))
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing matrix
    n_observed = 6
    mixing = rng.randn(n_observed, 2)
    for add_noise in (False, True):
        m = np.dot(mixing, s)

        if add_noise:
            m += 0.1 * rng.randn(n_observed, n_samples)

        center_and_norm(m)
        m = m.T
        m = _get_pca(rng).fit_transform(m)
        # we need extended since input signals are sub-gaussian
        unmixing_ = infomax(m, random_state=rng, extended=True)
        s_ = np.dot(unmixing_, m.T)
        # Check that the mixing model described in the docstring holds:
        mixing_ = linalg.pinv(unmixing_.T)

        assert_almost_equal(m, s_.T.dot(mixing_))

        center_and_norm(s_)
        s1_, s2_ = s_
        # Check to see if the sources have been estimated
        # in the wrong order
        if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
            s2_, s1_ = s_
        s1_ *= np.sign(np.dot(s1_, s1))
        s2_ *= np.sign(np.dot(s2_, s2))

        # Check that we have estimated the original sources
        if not add_noise:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
コード例 #5
0
def test_infomax_simple():
    """ Test the infomax algorithm on very simple data.
    """
    rng = np.random.RandomState(0)
    # scipy.stats uses the global RNG:
    np.random.seed(0)
    n_samples = 500
    # Generate two sources:
    s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
    s2 = stats.t.rvs(1, size=n_samples)
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing angle
    phi = 0.6
    mixing = np.array([[np.cos(phi),  np.sin(phi)],
                       [np.sin(phi), -np.cos(phi)]])
    for add_noise in (False, True):
        m = np.dot(mixing, s)
        if add_noise:
            m += 0.1 * rng.randn(2, n_samples)
        center_and_norm(m)

        algos = [True, False]
        for algo in algos:
            X = _get_pca().fit_transform(m.T)
            k_ = infomax(X, extended=algo)
            s_ = np.dot(k_, X.T)

            center_and_norm(s_)
            s1_, s2_ = s_
            # Check to see if the sources have been estimated
            # in the wrong order
            if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
                s2_, s1_ = s_
            s1_ *= np.sign(np.dot(s1_, s1))
            s2_ *= np.sign(np.dot(s2_, s2))

            # Check that we have estimated the original sources
            if not add_noise:
                assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
                assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
            else:
                assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
                assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
コード例 #6
0
def test_infomax_blowup():
    """ Test the infomax algorithm blowup condition
    """
    from sklearn.decomposition import RandomizedPCA
    # scipy.stats uses the global RNG:
    np.random.seed(0)
    n_samples = 100
    # Generate two sources:
    s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
    s2 = stats.t.rvs(1, size=n_samples)
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing angle
    phi = 0.6
    mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi),
                                                    -np.cos(phi)]])
    m = np.dot(mixing, s)

    center_and_norm(m)

    X = RandomizedPCA(n_components=2, whiten=True).fit_transform(m.T)
    k_ = infomax(X, extended=True, l_rate=0.1)
    s_ = np.dot(k_, X.T)

    center_and_norm(s_)
    s1_, s2_ = s_
    # Check to see if the sources have been estimated
    # in the wrong order
    if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
        s2_, s1_ = s_
    s1_ *= np.sign(np.dot(s1_, s1))
    s2_ *= np.sign(np.dot(s2_, s2))

    # Check that we have estimated the original sources
    assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
    assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
コード例 #7
0
def test_infomax_blowup():
    """ Test the infomax algorithm blowup condition
    """

    # scipy.stats uses the global RNG:
    np.random.seed(0)
    n_samples = 100
    # Generate two sources:
    s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
    s2 = stats.t.rvs(1, size=n_samples)
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s

    # Mixing angle
    phi = 0.6
    mixing = np.array([[np.cos(phi),  np.sin(phi)],
                       [np.sin(phi), -np.cos(phi)]])
    m = np.dot(mixing, s)

    center_and_norm(m)

    X = _get_pca().fit_transform(m.T)
    k_ = infomax(X, extended=True, l_rate=0.1)
    s_ = np.dot(k_, X.T)

    center_and_norm(s_)
    s1_, s2_ = s_
    # Check to see if the sources have been estimated
    # in the wrong order
    if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
        s2_, s1_ = s_
    s1_ *= np.sign(np.dot(s1_, s1))
    s2_ *= np.sign(np.dot(s2_, s2))

    # Check that we have estimated the original sources
    assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
    assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
コード例 #8
0
def test_mne_python_vs_eeglab():
    """ Test eeglab vs mne_python infomax code.
    """
    random_state = 42

    list_ch_types = ['eeg', 'mag']

    for ch_type in list_ch_types:

        if ch_type == 'eeg':
            eeglab_results_file = 'eeglab_infomax_results_eeg_data.mat'
        elif ch_type == 'mag':
            eeglab_results_file = 'eeglab_infomax_results_meg_data.mat'

        Y = generate_data_for_comparing_against_eeglab_infomax(ch_type,
                                                               random_state)
        N = Y.shape[0]
        T = Y.shape[1]

        # For comparasion against eeglab, make sure the folowing
        # parameter have the same value in mne_python and eeglab:
        #
        # - starting point
        # - random state
        # - learning rate
        # - block size
        # - blowup parameter
        # - blowup_fac parameter
        # - tolerance for stopping the algorithm
        # - number of iterations
        #
        # Notes:
        # * By default, eeglab whiten the data using the "sphering transform"
        #   instead of pca. The mne_python infomax code does not
        #   whiten the data. To make sure both mne_python and eeglab starts
        #   from the same point (i.e., the same matrix), we need to make sure
        #   to whiten the data outside, and pass these whiten data to
        #   mne_python and eeglab. Finally, we need to tell eeglab that
        #   the input data is already whiten, this can be done by calling
        #   eeglab with the following sintax:
        #
        #   [unmixing,sphere,meanvar,bias,signs,lrates,sources,y] = ...
        #       runica( Y, 'sphering', 'none');
        #
        #   By calling eeglab using the former code, we are using its default
        #   parameters, which are specified below in the section
        #   "EEGLAB default parameters".
        #
        # * eeglab does not expose a parameter for fixing the random state.
        #   Therefore, to accomplish this, we need to edit the runica.m
        #   file located at /path_to_eeglab/functions/sigprocfunc/runica.m
        #
        #   i) Comment the line related with the random number generator
        #      (line 812).
        #   ii) Then, add the following line just below line 812:
        #       rng(42); %use 42 as random seed.
        #
        # * eeglab does not have the parameter "n_small_angle",
        #   so we need to disable it for making a fair comparison.
        #
        # * Finally, we need to take the unmixing matrix estimated by the
        #   mne_python infomax implementation and order the components
        #   in the same way that eeglab does. This is done below in the section
        #   "Order the components in the same way that eeglab does".

        ###############################################################
        # EEGLAB default parameters
        ###############################################################
        l_rate_eeglab = 0.00065 / np.log(N)
        block_eeglab = int(np.ceil(np.min([5 * np.log(T), 0.3 * T])))
        blowup_eeglab = 1e9
        blowup_fac_eeglab = 0.8
        max_iter_eeglab = 512

        if N > 32:
            w_change_eeglab = 1e-7
        else:
            w_change_eeglab = 1e-6
        ###############################################################

        # Call mne_python infomax version using the following sintax
        # to obtain the same result than eeglab version
        unmixing = infomax(Y.T, extended=False,
                           random_state=random_state,
                           max_iter=max_iter_eeglab,
                           l_rate=l_rate_eeglab,
                           block=block_eeglab,
                           w_change=w_change_eeglab,
                           blowup=blowup_eeglab,
                           blowup_fac=blowup_fac_eeglab,
                           n_small_angle=None,
                           )

        #######################################################################
        # Order the components in the same way that eeglab does
        #######################################################################

        sources = np.dot(unmixing, Y)
        mixing = pinv(unmixing)

        mvar = np.sum(mixing ** 2, axis=0) * \
            np.sum(sources ** 2, axis=1) / (N * T - 1)
        windex = np.argsort(mvar)[::-1]

        unmixing_ordered = unmixing[windex, :]
        #######################################################################

        #######################################################################
        # Load the eeglab results, then compare the unmixing matrices estimated
        # by mne_python and eeglab. To make the comparison use the
        # \ell_inf norm:
        # ||unmixing_mne_python - unmixing_eeglab||_inf
        #######################################################################

        eeglab_data = sio.loadmat(op.join(base_dir, eeglab_results_file))
        unmixing_eeglab = eeglab_data['unmixing_eeglab']

        maximum_difference = np.max(np.abs(unmixing_ordered - unmixing_eeglab))

        assert_almost_equal(maximum_difference, 1e-12, decimal=10)
コード例 #9
0
def test_mne_python_vs_eeglab():
    """ Test eeglab vs mne_python infomax code."""
    random_state = 42

    methods = ['infomax', 'extended_infomax']
    ch_types = ['eeg', 'mag']
    for ch_type in ch_types:
        Y = generate_data_for_comparing_against_eeglab_infomax(
            ch_type, random_state)
        N, T = Y.shape
        for method in methods:
            eeglab_results_file = (
                'eeglab_%s_results_%s_data.mat' %
                (method, dict(eeg='eeg', mag='meg')[ch_type]))

            # For comparasion against eeglab, make sure the following
            # parameters have the same value in mne_python and eeglab:
            #
            # - starting point
            # - random state
            # - learning rate
            # - block size
            # - blowup parameter
            # - blowup_fac parameter
            # - tolerance for stopping the algorithm
            # - number of iterations
            # - anneal_step parameter
            #
            # Notes:
            # * By default, eeglab whiten the data using "sphering transform"
            #   instead of pca. The mne_python infomax code does not
            #   whiten the data. To make sure both mne_python and eeglab starts
            #   from the same point (i.e., the same matrix), we need to make
            #   sure to whiten the data outside, and pass these whiten data to
            #   mne_python and eeglab. Finally, we need to tell eeglab that
            #   the input data is already whiten, this can be done by calling
            #   eeglab with the following syntax:
            #
            #   % Run infomax
            #   [unmixing,sphere,meanvar,bias,signs,lrates,sources,y] = ...
            #       runica( Y, 'sphering', 'none');
            #
            #   % Run extended infomax
            #   [unmixing,sphere,meanvar,bias,signs,lrates,sources,y]  = ...
            #       runica( Y, 'sphering', 'none', 'extended', 1);
            #
            #   By calling eeglab using the former code, we are using its
            #   default parameters, which are specified below in the section
            #   "EEGLAB default parameters".
            #
            # * eeglab does not expose a parameter for fixing the random state.
            #   Therefore, to accomplish this, we need to edit the runica.m
            #   file located at /path_to_eeglab/functions/sigprocfunc/runica.m
            #
            #   i) Comment the line related with the random number generator
            #      (line 812).
            #   ii) Then, add the following line just below line 812:
            #       rng(42); %use 42 as random seed.
            #
            # * eeglab does not have the parameter "n_small_angle",
            #   so we need to disable it for making a fair comparison.
            #
            # * Finally, we need to take the unmixing matrix estimated by the
            #   mne_python infomax implementation and order the components
            #   in the same way that eeglab does. This is done below in the
            #   section "Order the components in the same way that eeglab does"

            # EEGLAB default parameters
            l_rate_eeglab = 0.00065 / np.log(N)
            block_eeglab = int(np.ceil(np.min([5 * np.log(T), 0.3 * T])))
            blowup_eeglab = 1e9
            blowup_fac_eeglab = 0.8
            max_iter_eeglab = 512

            if method == 'infomax':
                anneal_step_eeglab = 0.9
                use_extended = False

            elif method == 'extended_infomax':
                anneal_step_eeglab = 0.98
                use_extended = True

            w_change_eeglab = 1e-7 if N > 32 else 1e-6

            # Call mne_python infomax version using the following sintax
            # to obtain the same result than eeglab version
            unmixing = infomax(Y.T,
                               extended=use_extended,
                               random_state=random_state,
                               max_iter=max_iter_eeglab,
                               l_rate=l_rate_eeglab,
                               block=block_eeglab,
                               w_change=w_change_eeglab,
                               blowup=blowup_eeglab,
                               blowup_fac=blowup_fac_eeglab,
                               n_small_angle=None,
                               anneal_step=anneal_step_eeglab)

            # Order the components in the same way that eeglab does
            sources = np.dot(unmixing, Y)
            mixing = pinv(unmixing)

            mvar = np.sum(mixing ** 2, axis=0) * \
                np.sum(sources ** 2, axis=1) / (N * T - 1)
            windex = np.argsort(mvar)[::-1]

            unmixing_ordered = unmixing[windex, :]

            # Load the eeglab results, then compare the unmixing matrices
            # estimated by mne_python and eeglab. To make the comparison use
            # the \ell_inf norm:
            # ||unmixing_mne_python - unmixing_eeglab||_inf

            eeglab_data = sio.loadmat(op.join(base_dir, eeglab_results_file))
            unmixing_eeglab = eeglab_data['unmixing_eeglab']

            maximum_difference = np.max(
                np.abs(unmixing_ordered - unmixing_eeglab))

            assert_almost_equal(maximum_difference, 1e-12, decimal=10)
コード例 #10
0
ファイル: backend_mne.py プロジェクト: cbrnr/scot
 def wrapper_infomax(data, random_state=None):
     """Call Infomax for ICA calculation."""
     u = infomax(datatools.cat_trials(data).T, extended=True,
                 random_state=random_state).T
     m = sp.linalg.pinv(u)
     return m, u