コード例 #1
0
ファイル: test_evoked.py プロジェクト: pombreda/mne-python
def test_evoked_arithmetic():
    """Test evoked arithmetic
    """
    ev = read_evokeds(fname, condition=0)
    ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
    ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)

    # combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
    # data should be added according to their `nave` weights
    # nave = ev1.nave + ev2.nave
    ev = ev1 + ev2
    assert_equal(ev.nave, ev1.nave + ev2.nave)
    assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))
    ev = ev1 - ev2
    assert_equal(ev.nave, ev1.nave + ev2.nave)
    assert_equal(ev.comment, ev1.comment + ' - ' + ev2.comment)
    assert_allclose(ev.data, np.ones_like(ev1.data))
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ev = merge_evoked([ev1, ev2])
    assert_true(len(w) >= 1)
    assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))

    # default comment behavior if evoked.comment is None
    old_comment1 = ev1.comment
    old_comment2 = ev2.comment
    ev1.comment = None
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ev = ev1 - ev2
        assert_equal(ev.comment, 'unknown')
    ev1.comment = old_comment1
    ev2.comment = old_comment2

    # equal weighting
    ev = combine_evoked([ev1, ev2], weights='equal')
    assert_allclose(ev.data, np.zeros_like(ev1.data))

    # combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
    ev = combine_evoked([ev1, ev2], weights=[1, 0])
    assert_equal(ev.nave, ev1.nave)
    assert_allclose(ev.data, ev1.data)

    # simple subtraction (like in oddball)
    ev = combine_evoked([ev1, ev2], weights=[1, -1])
    assert_allclose(ev.data, 2 * np.ones_like(ev1.data))

    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])

    # grand average
    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
    ch_names = evoked1.ch_names[2:]
    evoked1.info['bads'] = ['EEG 008']  # test interpolation
    evoked1.drop_channels(evoked1.ch_names[:1])
    evoked2.drop_channels(evoked2.ch_names[1:2])
    gave = grand_average([evoked1, evoked2])
    assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
    assert_equal(ch_names, gave.ch_names)
    assert_equal(gave.nave, 2)
コード例 #2
0
def test_decim():
    """Test evoked decimation."""
    rng = np.random.RandomState(0)
    n_channels, n_times = 10, 20
    dec_1, dec_2 = 2, 3
    decim = dec_1 * dec_2
    sfreq = 10.
    sfreq_new = sfreq / decim
    data = rng.randn(n_channels, n_times)
    info = create_info(n_channels, sfreq, 'eeg')
    with info._unlock():
        info['lowpass'] = sfreq_new / float(decim)
    evoked = EvokedArray(data, info, tmin=-1)
    evoked_dec = evoked.copy().decimate(decim)
    evoked_dec_2 = evoked.copy().decimate(decim, offset=1)
    evoked_dec_3 = evoked.decimate(dec_1).decimate(dec_2)
    assert_array_equal(evoked_dec.data, data[:, ::decim])
    assert_array_equal(evoked_dec_2.data, data[:, 1::decim])
    assert_array_equal(evoked_dec.data, evoked_dec_3.data)

    # Check proper updating of various fields
    assert evoked_dec.first == -2
    assert evoked_dec.last == 1
    assert_array_equal(evoked_dec.times, [-1, -0.4, 0.2, 0.8])
    assert evoked_dec_2.first == -2
    assert evoked_dec_2.last == 1
    assert_array_equal(evoked_dec_2.times, [-0.9, -0.3, 0.3, 0.9])
    assert evoked_dec_3.first == -2
    assert evoked_dec_3.last == 1
    assert_array_equal(evoked_dec_3.times, [-1, -0.4, 0.2, 0.8])

    # make sure the time nearest zero is also sample number 0.
    for ev in (evoked_dec, evoked_dec_2, evoked_dec_3):
        lowest_index = np.argmin(np.abs(np.arange(ev.first, ev.last)))
        idxs_of_times_nearest_zero = \
            np.where(np.abs(ev.times) == np.min(np.abs(ev.times)))[0]
        # we use `in` here in case two times are equidistant from 0.
        assert lowest_index in idxs_of_times_nearest_zero
        assert len(idxs_of_times_nearest_zero) in (1, 2)

    # Now let's do it with some real data
    raw = read_raw_fif(raw_fname)
    events = read_events(event_name)
    sfreq_new = raw.info['sfreq'] / decim
    with raw.info._unlock():
        raw.info['lowpass'] = sfreq_new / 4.  # suppress aliasing warnings
    picks = pick_types(raw.info, meg=True, eeg=True, exclude=())
    epochs = Epochs(raw, events, 1, -0.2, 0.5, picks=picks, preload=True)
    for offset in (0, 1):
        ev_ep_decim = epochs.copy().decimate(decim, offset).average()
        ev_decim = epochs.average().decimate(decim, offset)
        expected_times = epochs.times[offset::decim]
        assert_allclose(ev_decim.times, expected_times)
        assert_allclose(ev_ep_decim.times, expected_times)
        expected_data = epochs.get_data()[:, :, offset::decim].mean(axis=0)
        assert_allclose(ev_decim.data, expected_data)
        assert_allclose(ev_ep_decim.data, expected_data)
        assert_equal(ev_decim.info['sfreq'], sfreq_new)
        assert_array_equal(ev_decim.times, expected_times)
コード例 #3
0
ファイル: test_evoked.py プロジェクト: annapasca/mne-python
def test_evoked_baseline():
    """Test evoked baseline."""
    evoked = read_evokeds(fname, condition=0, baseline=None)

    # Here we create a data_set with constant data.
    evoked = EvokedArray(np.ones_like(evoked.data), evoked.info,
                         evoked.times[0])

    # Mean baseline correction is applied, since the data is equal to its mean
    # the resulting data should be a matrix of zeroes.
    evoked.apply_baseline((None, None))

    assert_allclose(evoked.data, np.zeros_like(evoked.data))
コード例 #4
0
ファイル: test_evoked.py プロジェクト: yop0/mne-python
def test_evoked_baseline():
    """Test evoked baseline."""
    evoked = read_evokeds(fname, condition=0, baseline=None)

    # Here we create a data_set with constant data.
    evoked = EvokedArray(np.ones_like(evoked.data), evoked.info,
                         evoked.times[0])

    # Mean baseline correction is applied, since the data is equal to its mean
    # the resulting data should be a matrix of zeroes.
    evoked.apply_baseline((None, None))

    assert_allclose(evoked.data, np.zeros_like(evoked.data))
コード例 #5
0
ファイル: test_evoked.py プロジェクト: bpinsard/mne-python
def test_evoked_arithmetic():
    """Test evoked arithmetic
    """
    ev = read_evokeds(fname, condition=0)
    ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
    ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)

    # combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
    # data should be added according to their `nave` weights
    # nave = ev1.nave + ev2.nave
    ev = ev1 + ev2
    assert_equal(ev.nave, ev1.nave + ev2.nave)
    assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))
    ev = ev1 - ev2
    assert_equal(ev.nave, ev1.nave + ev2.nave)
    assert_allclose(ev.data, np.ones_like(ev1.data))
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ev = merge_evoked([ev1, ev2])
    assert_true(len(w) >= 1)
    assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))

    # equal weighting
    ev = combine_evoked([ev1, ev2], weights='equal')
    assert_allclose(ev.data, np.zeros_like(ev1.data))

    # combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
    ev = combine_evoked([ev1, ev2], weights=[1, 0])
    assert_equal(ev.nave, ev1.nave)
    assert_allclose(ev.data, ev1.data)

    # simple subtraction (like in oddball)
    ev = combine_evoked([ev1, ev2], weights=[1, -1])
    assert_allclose(ev.data, 2 * np.ones_like(ev1.data))

    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])

    # grand average
    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
    ch_names = evoked1.ch_names[2:]
    evoked1.info['bads'] = ['EEG 008']  # test interpolation
    evoked1.drop_channels(evoked1.ch_names[:1])
    evoked2.drop_channels(evoked2.ch_names[1:2])
    gave = grand_average([evoked1, evoked2])
    assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
    assert_equal(ch_names, gave.ch_names)
    assert_equal(gave.nave, 2)
コード例 #6
0
ファイル: test_evoked.py プロジェクト: sbp894/mne-python
def test_decim():
    """Test evoked decimation."""
    rng = np.random.RandomState(0)
    n_channels, n_times = 10, 20
    dec_1, dec_2 = 2, 3
    decim = dec_1 * dec_2
    sfreq = 10.
    sfreq_new = sfreq / decim
    data = rng.randn(n_channels, n_times)
    info = create_info(n_channels, sfreq, 'eeg')
    info['lowpass'] = sfreq_new / float(decim)
    evoked = EvokedArray(data, info, tmin=-1)
    evoked_dec = evoked.copy().decimate(decim)
    evoked_dec_2 = evoked.copy().decimate(decim, offset=1)
    evoked_dec_3 = evoked.decimate(dec_1).decimate(dec_2)
    assert_array_equal(evoked_dec.data, data[:, ::decim])
    assert_array_equal(evoked_dec_2.data, data[:, 1::decim])
    assert_array_equal(evoked_dec.data, evoked_dec_3.data)

    # Check proper updating of various fields
    assert evoked_dec.first == -1
    assert evoked_dec.last == 2
    assert_array_equal(evoked_dec.times, [-1, -0.4, 0.2, 0.8])
    assert evoked_dec_2.first == -1
    assert evoked_dec_2.last == 2
    assert_array_equal(evoked_dec_2.times, [-0.9, -0.3, 0.3, 0.9])
    assert evoked_dec_3.first == -1
    assert evoked_dec_3.last == 2
    assert_array_equal(evoked_dec_3.times, [-1, -0.4, 0.2, 0.8])

    # Now let's do it with some real data
    raw = read_raw_fif(raw_fname)
    events = read_events(event_name)
    sfreq_new = raw.info['sfreq'] / decim
    raw.info['lowpass'] = sfreq_new / 4.  # suppress aliasing warnings
    picks = pick_types(raw.info, meg=True, eeg=True, exclude=())
    epochs = Epochs(raw, events, 1, -0.2, 0.5, picks=picks, preload=True)
    for offset in (0, 1):
        ev_ep_decim = epochs.copy().decimate(decim, offset).average()
        ev_decim = epochs.average().decimate(decim, offset)
        expected_times = epochs.times[offset::decim]
        assert_allclose(ev_decim.times, expected_times)
        assert_allclose(ev_ep_decim.times, expected_times)
        expected_data = epochs.get_data()[:, :, offset::decim].mean(axis=0)
        assert_allclose(ev_decim.data, expected_data)
        assert_allclose(ev_ep_decim.data, expected_data)
        assert_equal(ev_decim.info['sfreq'], sfreq_new)
        assert_array_equal(ev_decim.times, expected_times)
コード例 #7
0
ファイル: sesame.py プロジェクト: sarasommariva/sesameeg
    def _prepare_epochs_data(self, epochs, top_min, top_max):
        ep_data = epochs.get_data()
        evoked = EvokedArray(ep_data[0], epochs.info)
        if self.fourier is False:
            self._get_topographies(evoked, top_min, top_max)
            temp_list = list()
            for ie, _e in enumerate(ep_data):
                if self.subsample is not None:
                    if ie == 0:
                        print('Subsampling data with step {0}'.format(
                            self.subsample))
                    temp_list.append(_e[:, self.s_min:self.s_max +
                                        1:self.subsample])
                else:
                    temp_list.append(_e[:, self.s_min:self.s_max + 1])
            return np.hstack(temp_list)
        elif self.fourier is True:
            tstep = 1 / evoked.info['sfreq']
            evoked_f = evoked.copy()
            evoked_f.data *= np.hamming(evoked.data.shape[1])
            evoked_f.data = (np.fft.rfft(evoked_f.data))
            freqs = np.fft.rfftfreq(evoked.data.shape[1], tstep)
            print('Data have been converted to the frequency domain.')

            self._get_topographies(evoked_f, top_min, top_max, freqs=freqs)

            temp_list = list()
            temp_list2 = list()
            for ie, _e in enumerate(ep_data):
                _e *= np.hamming(_e.shape[1])
                _e_f = np.fft.rfft(_e)
                if self.subsample is not None:
                    if ie == 0:
                        print('Subsampling data with step {0}'.format(
                            self.subsample))
                    temp_list.append(_e_f[:, self.s_min:self.s_max +
                                          1:self.subsample])
                else:
                    temp_list.append(_e_f[:, self.s_min:self.s_max + 1])

                for _data_temp in temp_list:
                    for l in _data_temp.T:
                        temp_list2.append(
                            np.vstack([np.real(l), np.imag(l)]).T)
                return np.hstack(temp_list2)
        else:
            raise ValueError
コード例 #8
0
ファイル: test_evoked.py プロジェクト: adykstra/mne-python
def test_array_epochs():
    """Test creating evoked from array."""
    tempdir = _TempDir()

    # creating
    rng = np.random.RandomState(42)
    data1 = rng.randn(20, 60)
    sfreq = 1e3
    ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
    types = ['eeg'] * 20
    info = create_info(ch_names, sfreq, types)
    evoked1 = EvokedArray(data1, info, tmin=-0.01)

    # save, read, and compare evokeds
    tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
    evoked1.save(tmp_fname)
    evoked2 = read_evokeds(tmp_fname)[0]
    data2 = evoked2.data
    assert_allclose(data1, data2)
    assert_allclose(evoked1.times, evoked2.times)
    assert_equal(evoked1.first, evoked2.first)
    assert_equal(evoked1.last, evoked2.last)
    assert_equal(evoked1.kind, evoked2.kind)
    assert_equal(evoked1.nave, evoked2.nave)

    # now compare with EpochsArray (with single epoch)
    data3 = data1[np.newaxis, :, :]
    events = np.c_[10, 0, 1]
    evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average()
    assert_allclose(evoked1.data, evoked3.data)
    assert_allclose(evoked1.times, evoked3.times)
    assert_equal(evoked1.first, evoked3.first)
    assert_equal(evoked1.last, evoked3.last)
    assert_equal(evoked1.kind, evoked3.kind)
    assert_equal(evoked1.nave, evoked3.nave)

    # test kind check
    with pytest.raises(ValueError, match='Invalid value'):
        EvokedArray(data1, info, tmin=0, kind=1)
    with pytest.raises(ValueError, match='Invalid value'):
        EvokedArray(data1, info, kind='mean')

    # test match between channels info and data
    ch_names = ['EEG %03d' % (i + 1) for i in range(19)]
    types = ['eeg'] * 19
    info = create_info(ch_names, sfreq, types)
    pytest.raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
コード例 #9
0
def test_apply_function_evk():
    """Check the apply_function method for evoked data."""
    # create fake evoked data to use for checking apply_function
    data = np.random.rand(10, 1000)
    info = create_info(10, 1000., 'eeg')
    evoked = EvokedArray(data, info)
    evoked_data = evoked.data.copy()
    # check apply_function channel-wise
    mult_param = -1
    kwargs = dict(mult_param=mult_param)
    applied = evoked.apply_function(fun,
                                    picks=None,
                                    dtype=None,
                                    n_jobs=1,
                                    **kwargs)
    assert np.shape(applied.data) == np.shape(evoked_data)
    assert np.equal(applied.data, evoked_data * mult_param).all()
コード例 #10
0
def test_array_epochs():
    """Test creating evoked from array
    """
    tempdir = _TempDir()

    # creating
    rng = np.random.RandomState(42)
    data1 = rng.randn(20, 60)
    sfreq = 1e3
    ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
    types = ['eeg'] * 20
    info = create_info(ch_names, sfreq, types)
    evoked1 = EvokedArray(data1, info, tmin=-0.01)

    # save, read, and compare evokeds
    tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
    evoked1.save(tmp_fname)
    evoked2 = read_evokeds(tmp_fname)[0]
    data2 = evoked2.data
    assert_allclose(data1, data2)
    assert_allclose(evoked1.times, evoked2.times)
    assert_equal(evoked1.first, evoked2.first)
    assert_equal(evoked1.last, evoked2.last)
    assert_equal(evoked1.kind, evoked2.kind)
    assert_equal(evoked1.nave, evoked2.nave)

    # now compare with EpochsArray (with single epoch)
    data3 = data1[np.newaxis, :, :]
    events = np.c_[10, 0, 1]
    evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average()
    assert_allclose(evoked1.data, evoked3.data)
    assert_allclose(evoked1.times, evoked3.times)
    assert_equal(evoked1.first, evoked3.first)
    assert_equal(evoked1.last, evoked3.last)
    assert_equal(evoked1.kind, evoked3.kind)
    assert_equal(evoked1.nave, evoked3.nave)

    # test kind check
    assert_raises(TypeError, EvokedArray, data1, info, tmin=0, kind=1)
    assert_raises(ValueError, EvokedArray, data1, info, tmin=0, kind='mean')

    # test match between channels info and data
    ch_names = ['EEG %03d' % (i + 1) for i in range(19)]
    types = ['eeg'] * 19
    info = create_info(ch_names, sfreq, types)
    assert_raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
コード例 #11
0
def test_apply_function_evk():
    """Check the apply_function method for evoked data."""
    # create fake evoked data to use for checking apply_function
    data = np.random.rand(10, 1000)
    info = create_info(10, 1000., 'eeg')
    evoked = EvokedArray(data, info)
    evoked_data = evoked.data.copy()

    # check apply_function channel-wise

    def fun(data, multiplier):
        return data * multiplier

    mult = -1
    applied = evoked.apply_function(fun, n_jobs=None, multiplier=mult)
    assert np.shape(applied.data) == np.shape(evoked_data)
    assert np.equal(applied.data, evoked_data * mult).all()
コード例 #12
0
ファイル: plot_meg.py プロジェクト: SherazKhan/kaggleMEG
from mne.io.meas_info import create_info
print data.shape

sfreq = d['sfreq']
ch_names = layout.names
print ch_names
types = ['mag'] * len(ch_names)
info = create_info(ch_names, sfreq, types)
events = np.vstack(zip([125 for _ in xrange(len(labels))], [0 for _ in xrange(len(labels))], labels))
print events.shape

#help(EpochsArray)
ea = EpochsArray(data, info, events, tmin=d['tmin'])

from mne.evoked import EvokedArray
evoked = EvokedArray(data[labels==1].mean(axis=0), info, tmin=d['tmin'])
print evoked
print info
times = np.arange(0.05, 0.15, 0.01)
print len(info['ch_names'])
evoked.plot_topomap(0.15, ch_type='mag', layout=layout, size=10, colorbar=False)
#evoked.plot_topomap(times, ch_type='mag', layout=layout, size=10, colorbar=False)

evoked = EvokedArray(data[labels==0].mean(axis=0), info, tmin=d['tmin'])
evoked.plot_topomap(0.15, ch_type='mag', layout=layout, size=10, colorbar=False)
#evoked.plot_topomap(times, ch_type='mag', layout=layout, size=10, colorbar=False)

evoked = EvokedArray(data[labels==1].mean(axis=0) - data[labels==0].mean(axis=0), info, tmin=d['tmin'])
evoked.plot_topomap(0.25, ch_type='mag', layout=layout, size=10, colorbar=False)

コード例 #13
0
ファイル: test_evoked.py プロジェクト: yop0/mne-python
def test_arithmetic():
    """Test evoked arithmetic."""
    ev = read_evokeds(fname, condition=0)
    ev20 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
    ev30 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=30)

    tol = dict(rtol=1e-9, atol=0)
    # test subtraction
    sub1 = combine_evoked([ev, ev], weights=[1, -1])
    sub2 = combine_evoked([ev, -ev], weights=[1, 1])
    assert np.allclose(sub1.data, np.zeros_like(sub1.data), atol=1e-20)
    assert np.allclose(sub2.data, np.zeros_like(sub2.data), atol=1e-20)
    # test nave weighting. Expect signal ampl.: 1*(20/50) + 1*(30/50) == 1
    # and expect nave == ev1.nave + ev2.nave
    ev = combine_evoked([ev20, ev30], weights='nave')
    assert np.allclose(ev.nave, ev20.nave + ev30.nave)
    assert np.allclose(ev.data, np.ones_like(ev.data), **tol)
    # test equal-weighted sum. Expect signal ampl. == 2
    # and expect nave == 1/sum(1/naves) == 1/(1/20 + 1/30) == 12
    ev = combine_evoked([ev20, ev30], weights=[1, 1])
    assert np.allclose(ev.nave, 12.)
    assert np.allclose(ev.data, ev20.data + ev30.data, **tol)
    # test equal-weighted average. Expect signal ampl. == 1
    # and expect nave == 1/sum(weights²/naves) == 1/(0.5²/20 + 0.5²/30) == 48
    ev = combine_evoked([ev20, ev30], weights='equal')
    assert np.allclose(ev.nave, 48.)
    assert np.allclose(ev.data, np.mean([ev20.data, ev30.data], axis=0), **tol)
    # test zero weights
    ev = combine_evoked([ev20, ev30], weights=[1, 0])
    assert ev.nave == ev20.nave
    assert np.allclose(ev.data, ev20.data, **tol)

    # default comment behavior if evoked.comment is None
    old_comment1 = ev20.comment
    ev20.comment = None
    ev = combine_evoked([ev20, -ev30], weights=[1, -1])
    assert_equal(ev.comment.count('unknown'), 2)
    assert ('-unknown' in ev.comment)
    assert (' + ' in ev.comment)
    ev20.comment = old_comment1

    with pytest.raises(ValueError, match="Invalid value for the 'weights'"):
        combine_evoked([ev20, ev30], weights='foo')
    with pytest.raises(ValueError, match='weights must be the same size as'):
        combine_evoked([ev20, ev30], weights=[1])

    # grand average
    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
    ch_names = evoked1.ch_names[2:]
    evoked1.info['bads'] = ['EEG 008']  # test interpolation
    evoked1.drop_channels(evoked1.ch_names[:1])
    evoked2.drop_channels(evoked2.ch_names[1:2])
    gave = grand_average([evoked1, evoked2])
    assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
    assert_equal(ch_names, gave.ch_names)
    assert_equal(gave.nave, 2)
    with pytest.raises(TypeError, match='All elements must be an instance of'):
        grand_average([1, evoked1])
    gave = grand_average([ev20, ev20, -ev30])  # (1 + 1 + -1) / 3  =  1/3
    assert_allclose(gave.data, np.full_like(gave.data, 1. / 3.))

    # test channel (re)ordering
    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
    data2 = evoked2.data  # assumes everything is ordered to the first evoked
    data = (evoked1.data + evoked2.data) / 2.
    evoked2.reorder_channels(evoked2.ch_names[::-1])
    assert not np.allclose(data2, evoked2.data)
    with pytest.warns(RuntimeWarning, match='reordering'):
        evoked3 = combine_evoked([evoked1, evoked2], weights=[0.5, 0.5])
    assert np.allclose(evoked3.data, data)
    assert evoked1.ch_names != evoked2.ch_names
    assert evoked1.ch_names == evoked3.ch_names
コード例 #14
0
    stderr = sqrt_mse * error_terms[ind]

    # compute t values
    t_val = beta / stderr
    # and p-values
    p_val = 2 * stats.t.sf(np.abs(t_val), dfs)

    # project results back to channels x time points space
    beta = beta.reshape((n_channels, n_times))
    stderr = stderr.reshape((n_channels, n_times))
    t_val = t_val.reshape((n_channels, n_times))
    # replace p-values == 0 with asymptotic value `tiny`
    p_val = np.clip(p_val, tiny, 1.).reshape((n_channels, n_times))

    # create evoked object for plotting
    lm_betas[predictor] = EvokedArray(beta, epochs_info, tmin)
    stderrs[predictor] = EvokedArray(stderr, epochs_info, tmin)
    t_vals[predictor] = EvokedArray(t_val, epochs_info, tmin)
    p_vals[predictor] = p_val

    # For better interpretation, we'll transform p-values to
    # Shannon information values (i.e., surprise values) by taking the
    # negative log2 of the p-value. In contrast to the p-value, the resulting
    # "s-value" is not a probability. Rather, it constitutes a continuous
    # measure of information (in bits) against the test hypothesis (see [1]
    # above for further details).
    s_vals[predictor] = EvokedArray(-np.log2(p_val) * 1e-6, epochs_info, tmin)

###############################################################################
# plot inference results for predictor "phase-coherence"
コード例 #15
0
ファイル: test_evoked.py プロジェクト: yu2shi4/mne-python
def test_arithmetic():
    """Test evoked arithmetic."""
    ev = read_evokeds(fname, condition=0)
    ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
    ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)

    # combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
    # data should be added according to their `nave` weights
    # nave = ev1.nave + ev2.nave
    ev = combine_evoked([ev1, ev2], weights='nave')
    assert_allclose(ev.nave, ev1.nave + ev2.nave)
    assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))

    # with same trial counts, a bunch of things should be equivalent
    for weights in ('nave', [0.5, 0.5]):
        ev = combine_evoked([ev1, ev1], weights=weights)
        assert_allclose(ev.data, ev1.data)
        assert_allclose(ev.nave, 2 * ev1.nave)
        ev = combine_evoked([ev1, -ev1], weights=weights)
        assert_allclose(ev.data, 0., atol=1e-20)
        assert_allclose(ev.nave, 2 * ev1.nave)
    # adding evoked to itself
    ev = combine_evoked([ev1, ev1], weights='equal')
    assert_allclose(ev.data, 2 * ev1.data)
    assert_allclose(ev.nave, ev1.nave / 2)
    # subtracting evoked from itself
    ev = combine_evoked([ev1, -ev1], weights='equal')
    assert_allclose(ev.data, 0., atol=1e-20)
    assert_allclose(ev.nave, ev1.nave / 2)
    # subtracting different evokeds
    ev = combine_evoked([ev1, -ev2], weights='equal')
    assert_allclose(ev.data, 2., atol=1e-20)
    expected_nave = 1. / (1. / ev1.nave + 1. / ev2.nave)
    assert_allclose(ev.nave, expected_nave)

    # default comment behavior if evoked.comment is None
    old_comment1 = ev1.comment
    old_comment2 = ev2.comment
    ev1.comment = None
    ev = combine_evoked([ev1, -ev2], weights=[1, -1])
    assert_equal(ev.comment.count('unknown'), 2)
    assert ('-unknown' in ev.comment)
    assert (' + ' in ev.comment)
    ev1.comment = old_comment1
    ev2.comment = old_comment2

    # equal weighting
    ev = combine_evoked([ev1, ev2], weights='equal')
    assert_allclose(ev.data, np.zeros_like(ev1.data))

    # combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
    ev = combine_evoked([ev1, ev2], weights=[1, 0])
    assert_allclose(ev.nave, ev1.nave)
    assert_allclose(ev.data, ev1.data)

    # simple subtraction (like in oddball)
    ev = combine_evoked([ev1, ev2], weights=[1, -1])
    assert_allclose(ev.data, 2 * np.ones_like(ev1.data))

    pytest.raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
    pytest.raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])

    # grand average
    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
    ch_names = evoked1.ch_names[2:]
    evoked1.info['bads'] = ['EEG 008']  # test interpolation
    evoked1.drop_channels(evoked1.ch_names[:1])
    evoked2.drop_channels(evoked2.ch_names[1:2])
    gave = grand_average([evoked1, evoked2])
    assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
    assert_equal(ch_names, gave.ch_names)
    assert_equal(gave.nave, 2)
    pytest.raises(TypeError, grand_average, [1, evoked1])
    gave = grand_average([ev1, ev1, ev2])  # (1 + 1 + -1) / 3  =  1/3
    assert_allclose(gave.data, np.full_like(gave.data, 1. / 3.))

    # test channel (re)ordering
    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
    data2 = evoked2.data  # assumes everything is ordered to the first evoked
    data = (evoked1.data + evoked2.data) / 2.
    evoked2.reorder_channels(evoked2.ch_names[::-1])
    assert not np.allclose(data2, evoked2.data)
    with pytest.warns(RuntimeWarning, match='reordering'):
        ev3 = combine_evoked([evoked1, evoked2], weights=[0.5, 0.5])
    assert np.allclose(ev3.data, data)
    assert evoked1.ch_names != evoked2.ch_names
    assert evoked1.ch_names == ev3.ch_names
コード例 #16
0
epochs_info = stroop_epo_nb.info
n_channels = len(epochs_info['ch_names'])
n_times = len(stroop_epo_nb.times)
times = stroop_epo_nb.times
tmin = stroop_epo_nb.tmin

# placeholder for results
betas_evoked = dict()
r2_evoked = dict()

# ###############################################################################
# 2) loop through subjects and extract betas
for n_subj, subj in enumerate(subjects):
    subj_beta = betas[n_subj, :]
    subj_beta = subj_beta.reshape((n_channels, n_times))
    betas_evoked[str(subj)] = EvokedArray(subj_beta, epochs_info, tmin)

    subj_r2 = r2[n_subj, :]
    subj_r2 = subj_r2.reshape((n_channels, n_times))
    r2_evoked[str(subj)] = EvokedArray(subj_r2, epochs_info, tmin)

effect_of_condition = grand_average(
    [betas_evoked[str(subj)] for subj in subjects])
cue_r2 = grand_average([r2_evoked[str(subj)] for subj in subjects])

###############################################################################
# 3) Plot beta weights for the effect of condition

# arguments fot the time-series maps
ts_args = dict(gfp=False,
               time_unit='s',
コード例 #17
0
ファイル: test_evoked.py プロジェクト: ahoejlund/mne-python
def test_arithmetic():
    """Test evoked arithmetic"""
    ev = read_evokeds(fname, condition=0)
    ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
    ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)

    # combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
    # data should be added according to their `nave` weights
    # nave = ev1.nave + ev2.nave
    with warnings.catch_warnings(record=True):  # deprecation no weights
        ev = combine_evoked([ev1, ev2])
    assert_equal(ev.nave, ev1.nave + ev2.nave)
    assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))

    # with same trial counts, a bunch of things should be equivalent
    for weights in ('nave', 'equal', [0.5, 0.5]):
        ev = combine_evoked([ev1, ev1], weights=weights)
        assert_allclose(ev.data, ev1.data)
        assert_equal(ev.nave, 2 * ev1.nave)
        ev = combine_evoked([ev1, -ev1], weights=weights)
        assert_allclose(ev.data, 0., atol=1e-20)
        assert_equal(ev.nave, 2 * ev1.nave)
    ev = combine_evoked([ev1, -ev1], weights='equal')
    assert_allclose(ev.data, 0., atol=1e-20)
    assert_equal(ev.nave, 2 * ev1.nave)
    ev = combine_evoked([ev1, -ev2], weights='equal')
    expected = int(round(1. / (0.25 / ev1.nave + 0.25 / ev2.nave)))
    assert_equal(expected, 27)  # this is reasonable
    assert_equal(ev.nave, expected)

    # default comment behavior if evoked.comment is None
    old_comment1 = ev1.comment
    old_comment2 = ev2.comment
    ev1.comment = None
    ev = combine_evoked([ev1, -ev2], weights=[1, -1])
    assert_equal(ev.comment.count('unknown'), 2)
    assert_true('-unknown' in ev.comment)
    assert_true(' + ' in ev.comment)
    ev1.comment = old_comment1
    ev2.comment = old_comment2

    # equal weighting
    ev = combine_evoked([ev1, ev2], weights='equal')
    assert_allclose(ev.data, np.zeros_like(ev1.data))

    # combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
    ev = combine_evoked([ev1, ev2], weights=[1, 0])
    assert_equal(ev.nave, ev1.nave)
    assert_allclose(ev.data, ev1.data)

    # simple subtraction (like in oddball)
    ev = combine_evoked([ev1, ev2], weights=[1, -1])
    assert_allclose(ev.data, 2 * np.ones_like(ev1.data))

    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])

    # grand average
    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
    ch_names = evoked1.ch_names[2:]
    evoked1.info['bads'] = ['EEG 008']  # test interpolation
    evoked1.drop_channels(evoked1.ch_names[:1])
    evoked2.drop_channels(evoked2.ch_names[1:2])
    gave = grand_average([evoked1, evoked2])
    assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
    assert_equal(ch_names, gave.ch_names)
    assert_equal(gave.nave, 2)
    assert_raises(ValueError, grand_average, [1, evoked1])
コード例 #18
0
def _get_matrix_from_inverse_operator(inverse_operator,
                                      forward,
                                      labels=None,
                                      method='dSPM',
                                      lambda2=1. / 9.,
                                      pick_ori=None,
                                      mode='mean',
                                      n_svd_comp=1):
    """Get inverse matrix from an inverse operator
    Currently works only for fixed/loose orientation constraints
    For loose orientation constraint, the CTFs are computed for the radial
    component (pick_ori='normal').
    Parameters
    ----------
    inverse_operator : instance of InverseOperator
        The inverse operator.
    forward : dict
        The forward operator.
    method : 'MNE' | 'dSPM' | 'sLORETA'
        Inverse methods (for apply_inverse).
    labels : list of Label | None
        Labels for which CTFs shall be computed. If None, inverse matrix for
        all vertices will be returned.
    lambda2 : float
        The regularization parameter (for apply_inverse).
    pick_ori : None | "normal"
        pick_ori : None | "normal"
        If "normal", rather than pooling the orientations by taking the norm,
        only the radial component is kept. This is only implemented
        when working with loose orientations (for apply_inverse).
        Determines whether whole inverse matrix G will have one or three rows
        per vertex. This will also affect summary measures for labels.
    mode : 'mean' | 'sum' | 'svd'
        CTFs can be computed for different summary measures with labels:
        'sum' or 'mean': sum or means of sub-inverse for labels
        This corresponds to situations where labels can be assumed to be
        homogeneously activated.
        'svd': SVD components of sub-inverse for labels
        This is better suited for situations where activation patterns are
        assumed to be more variable.
        "sub-inverse" is the part of the inverse matrix that belongs to
        vertices within invidual labels.
    n_svd_comp : int
        Number of SVD components for which CTFs will be computed and output
        (irrelevant for 'sum' and 'mean'). Explained variances within
        sub-inverses are shown in screen output.
    Returns
    -------
    invmat : ndarray
        Inverse matrix associated with inverse operator and specified
        parameters.
    label_singvals : list of dict
        Singular values of SVD for sub-matrices of inverse operator
        (only if mode='svd').
        Diffent list entries per label. Since SVD is applied separately for
        channel types, dictionaries may contain keys 'mag', 'grad' or 'eeg'.
    """
    mode = mode.lower()

    # apply_inverse cannot produce 3 separate orientations
    # therefore 'force_fixed=True' is required
    if not forward['surf_ori']:
        raise RuntimeError('Forward has to be surface oriented and '
                           'force_fixed=True.')
    if not (forward['source_ori'] == 1):
        raise RuntimeError('Forward has to be surface oriented and '
                           'force_fixed=True.')

    if labels:
        logger.info("About to process %d labels" % len(labels))
    else:
        logger.info("Computing whole inverse operator.")

    info_inv = _prepare_info(inverse_operator)

    info_fwd = forward['info']

    # only use channels that are good for inverse operator and forward sol
    ch_names_inv = info_inv['ch_names']
    n_chs_inv = len(ch_names_inv)
    bads_inv = inverse_operator['info']['bads']

    # good channels
    ch_names = [c for c in ch_names_inv if (c not in bads_inv)]

    n_chs = len(ch_names)  # number of good channels in inv_op

    # indices of bad channels
    ch_idx_bads = np.array([ch_names_inv.index(ch) for ch in bads_inv])

    # create identity matrix as input for inverse operator
    # set elements to zero for non-selected channels
    id_mat = np.eye(n_chs_inv)

    # convert identity matrix to evoked data type (pretending it's an epoch)
    ev_id = EvokedArray(id_mat, info=info_inv, tmin=0.)

    # apply inverse operator to identity matrix in order to get inverse matrix
    # free orientation constraint not possible because apply_inverse would
    # combine components

    # pick_ori='normal' required because apply_inverse won't give separate
    # orientations
    if ~inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
        pick_ori = 'normal'
    else:
        pick_ori = None

    # columns for bad channels will be zero
    invmat_mat_op = apply_inverse(ev_id,
                                  inverse_operator,
                                  lambda2=lambda2,
                                  method=method,
                                  pick_ori=pick_ori)

    # turn source estimate into numpty array
    invmat_mat = invmat_mat_op.data

    # remove columns for bad channels (better for SVD)
    invmat_mat = np.delete(invmat_mat, ch_idx_bads, axis=1)

    logger.info("Dimension of inverse matrix: %s" % str(invmat_mat.shape))

    invmat_summary = []

    # if mode='svd', label_singvals will collect all SVD singular values for
    # labels
    label_singvals = []

    if labels:  # if labels specified, get summary of inverse matrix for labels
        for ll in labels:
            if ll.hemi == 'rh':
                # for RH labels, add number of LH vertices
                offset = forward['src'][0]['vertno'].shape[0]
                # remember whether we are in the LH or RH
                this_hemi = 1
            elif ll.hemi == 'lh':
                offset = 0
                this_hemi = 0
            else:
                raise RuntimeError("Cannot determine hemisphere of label.")

            # get vertices on cortical surface inside label
            idx = np.intersect1d(ll.vertices,
                                 forward['src'][this_hemi]['vertno'])

            # get vertices in source space inside label
            fwd_idx = np.searchsorted(forward['src'][this_hemi]['vertno'], idx)

            # get sub-inverse for label vertices, one row per vertex
            # TO BE CHANGED: assumes that both fwd and inv have fixed
            # orientations
            # i.e. one source per vertex
            invmat_lbl = invmat_mat[fwd_idx + offset, :]

            # compute summary data for labels
            if mode == 'sum':  # takes sum across estimators in label
                logger.info("Computing sums within labels")
                this_invmat_summary = invmat_lbl.sum(axis=0)
                this_invmat_summary = np.vstack(this_invmat_summary).T
            elif mode == 'mean':
                logger.info("Computing means within labels")
                this_invmat_summary = invmat_lbl.mean(axis=0)
                this_invmat_summary = np.vstack(this_invmat_summary).T
            elif mode == 'svd':  # takes svd of sub-inverse in label
                logger.info("Computing SVD within labels, using %d "
                            "component(s)" % n_svd_comp)

                this_invmat_summary, s_svd_types = _label_svd(
                    invmat_lbl.T, n_svd_comp, info_inv)

                this_invmat_summary = this_invmat_summary.T
                label_singvals.append(s_svd_types)

            invmat_summary.append(this_invmat_summary)

        invmat = np.concatenate(invmat_summary, axis=0)

    else:  # no labels provided: return whole matrix
        invmat = invmat_mat

    return invmat, label_singvals
コード例 #19
0
def test_evoked_baseline(tmp_path):
    """Test evoked baseline."""
    evoked = read_evokeds(fname, condition=0, baseline=None)

    # Here we create a data_set with constant data.
    evoked = EvokedArray(np.ones_like(evoked.data), evoked.info,
                         evoked.times[0])
    assert evoked.baseline is None

    evoked_baselined = EvokedArray(np.ones_like(evoked.data),
                                   evoked.info,
                                   evoked.times[0],
                                   baseline=(None, 0))
    assert_allclose(evoked_baselined.baseline, (evoked_baselined.tmin, 0))
    del evoked_baselined

    # Mean baseline correction is applied, since the data is equal to its mean
    # the resulting data should be a matrix of zeroes.
    baseline = (None, None)
    evoked.apply_baseline(baseline)
    assert_allclose(evoked.baseline, (evoked.tmin, evoked.tmax))
    assert_allclose(evoked.data, np.zeros_like(evoked.data))

    # Test that the .baseline attribute changes if we apply a different
    # baseline now.
    baseline = (None, 0)
    evoked.apply_baseline(baseline)
    assert_allclose(evoked.baseline, (evoked.tmin, 0))

    # By default for our test file, no baseline should be set upon reading
    evoked = read_evokeds(fname, condition=0)
    assert evoked.baseline is None

    # Test that the .baseline attribute is set when we call read_evokeds()
    # with a `baseline` parameter.
    baseline = (-0.2, -0.1)
    evoked = read_evokeds(fname, condition=0, baseline=baseline)
    assert_allclose(evoked.baseline, baseline)

    # Test that the .baseline attribute survives an I/O roundtrip.
    evoked = read_evokeds(fname, condition=0)
    baseline = (-0.2, -0.1)
    evoked.apply_baseline(baseline)
    assert_allclose(evoked.baseline, baseline)

    tmp_fname = tmp_path / 'test-ave.fif'
    evoked.save(tmp_fname)
    evoked_read = read_evokeds(tmp_fname, condition=0)
    assert_allclose(evoked_read.baseline, evoked.baseline)

    # We shouldn't be able to remove a baseline correction after it has been
    # applied.
    evoked = read_evokeds(fname, condition=0)
    baseline = (-0.2, -0.1)
    evoked.apply_baseline(baseline)
    with pytest.raises(ValueError, match='already been baseline-corrected'):
        evoked.apply_baseline(None)
コード例 #20
0
times = cue_epo_nb.times
tmin = cue_epo_nb.tmin

# split channels into ROIs for results section
selections = make_1020_channel_selections(epochs_info, midline='12z')

# placeholder for results
betas_evoked = dict()
r2_evoked = dict()

# ###############################################################################
# 2) loop through subjects and extract betas
for n_subj, subj in enumerate(subjects):
    subj_beta = betas[n_subj, :]
    subj_beta = subj_beta.reshape((n_channels, n_times))
    betas_evoked[str(subj)] = EvokedArray(subj_beta, epochs_info, tmin)

    subj_r2 = r2[n_subj, :]
    subj_r2 = subj_r2.reshape((n_channels, n_times))
    r2_evoked[str(subj)] = EvokedArray(subj_r2, epochs_info, tmin)

effect_of_cue = grand_average([betas_evoked[str(subj)] for subj in subjects])
cue_r2 = grand_average([r2_evoked[str(subj)] for subj in subjects])

###############################################################################
# 3) Plot beta weights for the effect of condition

# arguments fot the time-series maps
ts_args = dict(gfp=False,
               time_unit='s',
               ylim=dict(eeg=[-6.5, 6.5]),
コード例 #21
0
# extract the coefficients for linear model estimator
betas = get_coef(linear_model, 'coef_')

# project coefficients back to a channels x time points space.
lm_betas = dict()
ci = dict(lower_bound=dict(), upper_bound=dict())
# loop through predictors
for ind, predictor in enumerate(predictors):
    # extract coefficients and CI for predictor in question
    # and project back to channels x time points
    beta = betas[:, ind].reshape((n_channels, n_times))
    lower_bound = lower[:, ind].reshape((n_channels, n_times))
    upper_bound = upper[:, ind].reshape((n_channels, n_times))
    # create evoked object containing the back projected coefficients
    # for each predictor
    lm_betas[predictor] = EvokedArray(beta, epochs_info, tmin)
    # dictionary containing upper and lower confidence boundaries
    ci['lower_bound'][predictor] = lower_bound
    ci['upper_bound'][predictor] = upper_bound

###############################################################################
# plot results of linear regression

# only show -250 to 500 ms
ts_args = dict(xlim=(-.25, 0.5))

# predictor to plot
predictor = 'phase-coherence'
# electrode to plot
pick = epochs.info['ch_names'].index('B8')
コード例 #22
0
# thus we'll call LinearRegression with fit_intercept=False

# set up and fit model
linear_model = LinearRegression(fit_intercept=False)
linear_model.fit(group_design, betas)

# extract group-level beta coefficients
group_coefs = get_coef(linear_model, 'coef_')

# only keep relevant predictor
group_betas = group_coefs[:, group_pred_col]

# back projection to channels x time points
group_betas = group_betas.reshape((n_channels, n_times))
# create evoked object containing the back projected coefficients
group_betas_evoked = EvokedArray(group_betas, epochs_info, tmin)

###############################################################################
# plot the modulating effect of age on the phase coherence predictor (i.e.,
# how the effect of phase coherence varies as a function of subject age)
# using whole electrode montage and whole scalp by taking the
# same physical electrodes across subjects

# index of B8 in array
electrode = 'B8'
pick = group_betas_evoked.ch_names.index(electrode)

# create figure
fig, ax = plt.subplots(figsize=(7, 4))
ax = plot_compare_evokeds(group_betas_evoked,
                          ylim=dict(eeg=[-3, 3]),
コード例 #23
0
###############################################################################
# fit linear model with sklearn

# set up model and fit linear model
linear_model = LinearRegression(fit_intercept=False)
linear_model.fit(design, Y)

# extract the coefficients for linear model estimator
betas = get_coef(linear_model, 'coef_')

# calculate coefficient of determination (r-squared)
r_squared = r2_score(Y, linear_model.predict(design), multioutput='raw_values')
# project r-squared back to channels by times space
r_squared = r_squared.reshape((n_channels, n_times))
r_squared = EvokedArray(r_squared, epochs_info, tmin)

###############################################################################
# plot model r-squared

# only show -250 to 500 ms
ts_args = dict(xlim=(-.25, 0.5), unit=False, ylim=dict(eeg=[0, 0.8]))
topomap_args = dict(cmap='Reds',
                    scalings=dict(eeg=1),
                    vmin=0,
                    vmax=0.8,
                    average=0.05)
# create plot
fig = r_squared.plot_joint(ts_args=ts_args,
                           topomap_args=topomap_args,
                           title='Proportion of variance explained by '
コード例 #24
0
    # only keep relevant predictor
    betas[iteration, :] = coefs[:, pred_col]

    # the matrix of coefficients has a shape of number of observations in
    # the vertorized channel data by number of predictors;
    # thus, we can loop through the columns i.e., the predictors)
    # of the coefficient matrix and extract coefficients for each predictor
    # in order to project them back to a channels x time points space.
    lm_betas = dict()

    # extract coefficients
    beta = betas[iteration, :]
    # back projection to channels x time points
    beta = beta.reshape((n_channels, n_times))
    # create evoked object containing the back projected coefficients
    lm_betas['phase-coherence'] = EvokedArray(beta, epochs_info, tmin)

    # save results
    betas_evoked[str(subjects[iteration])] = lm_betas

    # clean up
    del linear_model

###############################################################################
# compute mean beta-coefficient for predictor phase-coherence

# subject ids
subjects = [str(subj) for subj in subjects]

# extract phase-coherence betas for each subject
phase_coherence = [betas_evoked[subj]['phase-coherence'] for subj in subjects]
コード例 #25
0
ファイル: test_evoked.py プロジェクト: annapasca/mne-python
def test_arithmetic():
    """Test evoked arithmetic."""
    ev = read_evokeds(fname, condition=0)
    ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
    ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)

    # combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
    # data should be added according to their `nave` weights
    # nave = ev1.nave + ev2.nave
    ev = combine_evoked([ev1, ev2], weights='nave')
    assert_equal(ev.nave, ev1.nave + ev2.nave)
    assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))

    # with same trial counts, a bunch of things should be equivalent
    for weights in ('nave', 'equal', [0.5, 0.5]):
        ev = combine_evoked([ev1, ev1], weights=weights)
        assert_allclose(ev.data, ev1.data)
        assert_equal(ev.nave, 2 * ev1.nave)
        ev = combine_evoked([ev1, -ev1], weights=weights)
        assert_allclose(ev.data, 0., atol=1e-20)
        assert_equal(ev.nave, 2 * ev1.nave)
    ev = combine_evoked([ev1, -ev1], weights='equal')
    assert_allclose(ev.data, 0., atol=1e-20)
    assert_equal(ev.nave, 2 * ev1.nave)
    ev = combine_evoked([ev1, -ev2], weights='equal')
    expected = int(round(1. / (0.25 / ev1.nave + 0.25 / ev2.nave)))
    assert_equal(expected, 27)  # this is reasonable
    assert_equal(ev.nave, expected)

    # default comment behavior if evoked.comment is None
    old_comment1 = ev1.comment
    old_comment2 = ev2.comment
    ev1.comment = None
    ev = combine_evoked([ev1, -ev2], weights=[1, -1])
    assert_equal(ev.comment.count('unknown'), 2)
    assert_true('-unknown' in ev.comment)
    assert_true(' + ' in ev.comment)
    ev1.comment = old_comment1
    ev2.comment = old_comment2

    # equal weighting
    ev = combine_evoked([ev1, ev2], weights='equal')
    assert_allclose(ev.data, np.zeros_like(ev1.data))

    # combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
    ev = combine_evoked([ev1, ev2], weights=[1, 0])
    assert_equal(ev.nave, ev1.nave)
    assert_allclose(ev.data, ev1.data)

    # simple subtraction (like in oddball)
    ev = combine_evoked([ev1, ev2], weights=[1, -1])
    assert_allclose(ev.data, 2 * np.ones_like(ev1.data))

    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])

    # grand average
    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
    ch_names = evoked1.ch_names[2:]
    evoked1.info['bads'] = ['EEG 008']  # test interpolation
    evoked1.drop_channels(evoked1.ch_names[:1])
    evoked2.drop_channels(evoked2.ch_names[1:2])
    gave = grand_average([evoked1, evoked2])
    assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
    assert_equal(ch_names, gave.ch_names)
    assert_equal(gave.nave, 2)
    assert_raises(ValueError, grand_average, [1, evoked1])
コード例 #26
0
    # only keep relevant predictor
    betas[iteration, :] = coefs[:, pred_col]

    # the matrix of coefficients has a shape of number of observations in
    # the vertorized channel data by number of predictors;
    # thus, we can loop through the columns i.e., the predictors)
    # of the coefficient matrix and extract coefficients for each predictor
    # in order to project them back to a channels x time points space.
    lm_betas = dict()

    # extract coefficients
    beta = betas[iteration, :]
    # back projection to channels x time points
    beta = beta.reshape((n_channels, n_times))
    # create evoked object containing the back projected coefficients
    lm_betas['phase-coherence'] = EvokedArray(beta, epochs_info, tmin)

    # save results
    betas_evoked[str(subjects[iteration])] = lm_betas

    # clean up
    del linear_model

###############################################################################
# compute mean beta-coefficient for predictor phase-coherence

# subject ids
subjects = [str(subj) for subj in subjects]

# extract phase-coherence betas for each subject
phase_coherence = [betas_evoked[subj]['phase-coherence'] for subj in subjects]