def test_transform_data():
    """Test applying linear (time) transform to data"""
    # make up some data
    n_sensors, n_vertices, n_times = 10, 20, 4
    kernel = np.random.randn(n_vertices, n_sensors)
    sens_data = np.random.randn(n_sensors, n_times)

    vertices = np.arange(n_vertices)
    data = np.dot(kernel, sens_data)

    for idx, tmin_idx, tmax_idx in\
            zip([None, np.arange(n_vertices / 2, n_vertices)],
                [None, 1], [None, 3]):

        if idx is None:
            idx_use = slice(None, None)
        else:
            idx_use = idx

        data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])

        for stc_data in (data, (kernel, sens_data)):
            stc = SourceEstimate(stc_data, vertices=vertices,
                                 tmin=0., tstep=1.)
            stc_data_t = stc.transform_data(_my_trans, idx=idx,
                                            tmin_idx=tmin_idx,
                                            tmax_idx=tmax_idx)
            assert_allclose(data_f, stc_data_t)
def test_volume_stc():
    """Test reading and writing volume STCs
    """
    N = 100
    data = np.arange(N)[:, np.newaxis]
    datas = [data, data, np.arange(2)[:, np.newaxis]]
    vertno = np.arange(N)
    vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]]
    vertno_reads = [vertno, vertno, np.arange(2)]
    for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
        stc = SourceEstimate(data, vertno, 0, 1)
        assert_true(stc.is_surface() is False)
        fname_temp = op.join(tempdir, 'temp-vl.stc')
        stc_new = stc
        for _ in xrange(2):
            stc_new.save(fname_temp)
            stc_new = read_source_estimate(fname_temp)
            assert_true(stc_new.is_surface() is False)
            assert_array_equal(vertno_read, stc_new.vertno)
            assert_array_almost_equal(stc.data, stc_new.data)
    # now let's actually read a MNE-C processed file
    stc = read_source_estimate(fname_vol, 'sample')
    assert_true('sample' in repr(stc))
    stc_new = stc
    assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
    for _ in xrange(2):
        fname_temp = op.join(tempdir, 'temp-vol.w')
        stc_new.save(fname_temp, ftype='w')
        stc_new = read_source_estimate(fname_temp)
        assert_true(stc_new.is_surface() is False)
        assert_array_equal(stc.vertno, stc_new.vertno)
        assert_array_almost_equal(stc.data, stc_new.data)
def test_stc_arithmetic():
    """Test arithmetic for STC files
    """
    fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
    stc = SourceEstimate(fname)
    data = stc.data.copy()

    out = list()
    for a in [data, stc]:
        a = a + a * 3 + 3 * a - a ** 2 / 2

        a += a
        a -= a
        a /= 2 * a
        a *= -a

        a += 2
        a -= 1
        a *= -1
        a /= 2
        a **= 3
        out.append(a)

    assert_array_equal(out[0], out[1].data)
    assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
Exemple #4
0
def test_stc_mpl():
    """Test plotting source estimates with matplotlib."""
    sample_src = read_source_spaces(src_fname)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.ones((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
    with pytest.warns(RuntimeWarning, match='not included'):
        stc.plot(subjects_dir=subjects_dir, time_unit='s', views='ven',
                 hemi='rh', smoothing_steps=2, subject='sample',
                 backend='matplotlib', spacing='oct1', initial_time=0.001,
                 colormap='Reds')
        fig = stc.plot(subjects_dir=subjects_dir, time_unit='ms', views='dor',
                       hemi='lh', smoothing_steps=2, subject='sample',
                       backend='matplotlib', spacing='ico2', time_viewer=True,
                       colormap='mne')
        time_viewer = fig.time_viewer
        _fake_click(time_viewer, time_viewer.axes[0], (0.5, 0.5))  # change t
        time_viewer.canvas.key_press_event('ctrl+right')
        time_viewer.canvas.key_press_event('left')
    pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
                  hemi='both', subject='sample', backend='matplotlib')
    pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
                  time_unit='ss', subject='sample', backend='matplotlib')
    plt.close('all')
def test_transform():
    """Test applying linear (time) transform to data."""
    # make up some data
    n_verts_lh, n_verts_rh, n_times = 10, 10, 10
    vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
    data = rng.randn(n_verts_lh + n_verts_rh, n_times)
    stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)

    # data_t.ndim > 2 & copy is True
    stcs_t = stc.transform(_my_trans, copy=True)
    assert (isinstance(stcs_t, list))
    assert_array_equal(stc.times, stcs_t[0].times)
    assert_equal(stc.vertices, stcs_t[0].vertices)

    data = np.concatenate((stcs_t[0].data[:, :, None],
                           stcs_t[1].data[:, :, None]), axis=2)
    data_t = stc.transform_data(_my_trans)
    assert_array_equal(data, data_t)  # check against stc.transform_data()

    # data_t.ndim > 2 & copy is False
    pytest.raises(ValueError, stc.transform, _my_trans, copy=False)

    # data_t.ndim = 2 & copy is True
    tmp = deepcopy(stc)
    stc_t = stc.transform(np.abs, copy=True)
    assert (isinstance(stc_t, SourceEstimate))
    assert_array_equal(stc.data, tmp.data)  # xfrm doesn't modify original?

    # data_t.ndim = 2 & copy is False
    times = np.round(1000 * stc.times)
    verts = np.arange(len(stc.lh_vertno),
                      len(stc.lh_vertno) + len(stc.rh_vertno), 1)
    verts_rh = stc.rh_vertno
    tmin_idx = np.searchsorted(times, 0)
    tmax_idx = np.searchsorted(times, 501)  # Include 500ms in the range
    data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=tmin_idx,
                                tmax_idx=tmax_idx)
    stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
    assert (isinstance(stc, SourceEstimate))
    assert_equal(stc.tmin, 0.)
    assert_equal(stc.times[-1], 0.5)
    assert_equal(len(stc.vertices[0]), 0)
    assert_equal(stc.vertices[1], verts_rh)
    assert_array_equal(stc.data, data_t)

    times = np.round(1000 * stc.times)
    tmin_idx, tmax_idx = np.searchsorted(times, 0), np.searchsorted(times, 250)
    data_t = stc.transform_data(np.abs, tmin_idx=tmin_idx, tmax_idx=tmax_idx)
    stc.transform(np.abs, tmin=0, tmax=250, copy=False)
    assert_equal(stc.tmin, 0.)
    assert_equal(stc.times[-1], 0.2)
    assert_array_equal(stc.data, data_t)
def test_transform():
    """Test applying linear (time) transform to data"""
    # make up some data
    n_verts_lh, n_verts_rh, n_times = 10, 10, 10
    vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
    data = np.random.randn(n_verts_lh + n_verts_rh, n_times)
    stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)

    # data_t.ndim > 2 & copy is True
    stcs_t = stc.transform(_my_trans, copy=True)
    assert_true(isinstance(stcs_t, list))
    assert_array_equal(stc.times, stcs_t[0].times)
    assert_equal(stc.vertno, stcs_t[0].vertno)

    data = np.concatenate((stcs_t[0].data[:, :, None],
                           stcs_t[1].data[:, :, None]), axis=2)
    data_t = stc.transform_data(_my_trans)
    assert_array_equal(data, data_t)  # check against stc.transform_data()

    # data_t.ndim > 2 & copy is False
    assert_raises(ValueError, stc.transform, _my_trans, copy=False)

    # data_t.ndim = 2 & copy is True
    tmp = deepcopy(stc)
    stc_t = stc.transform(np.abs, copy=True)
    assert_true(isinstance(stc_t, SourceEstimate))
    assert_array_equal(stc.data, tmp.data)  # xfrm doesn't modify original?

    # data_t.ndim = 2 & copy is False
    times = np.round(1000 * stc.times)
    verts = np.arange(len(stc.lh_vertno),
                      len(stc.lh_vertno) + len(stc.rh_vertno), 1)
    verts_rh = stc.rh_vertno
    t_idx = [np.where(times >= -50)[0][0], np.where(times <= 500)[0][-1]]
    data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=t_idx[0],
                                tmax_idx=t_idx[-1])
    stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
    assert_true(isinstance(stc, SourceEstimate))
    assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.5))
    assert_true(len(stc.vertno[0]) == 0)
    assert_equal(stc.vertno[1], verts_rh)
    assert_array_equal(stc.data, data_t)

    times = np.round(1000 * stc.times)
    t_idx = [np.where(times >= 0)[0][0], np.where(times <= 250)[0][-1]]
    data_t = stc.transform_data(np.abs, tmin_idx=t_idx[0], tmax_idx=t_idx[-1])
    stc.transform(np.abs, tmin=0, tmax=250, copy=False)
    assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.2))
    assert_array_equal(stc.data, data_t)
def test_io_w():
    """Test IO for w files
    """
    w_fname = op.join(data_path, 'MEG', 'sample',
                      'sample_audvis-meg-oct-6-fwd-sensmap')

    src = SourceEstimate(w_fname)

    src.save('tmp', ftype='w')

    src2 = SourceEstimate('tmp-lh.w')

    assert_array_almost_equal(src.data, src2.data)
    assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
    assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
Exemple #8
0
def test_limits_to_control_points():
    """Test functionality for determing control points
    """
    sample_src = read_source_spaces(src_fname)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    from mayavi import mlab
    stc.plot(subjects_dir=subjects_dir)
    stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
             subjects_dir=subjects_dir)
    stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
    stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
    figs = [mlab.figure(), mlab.figure()]
    assert_raises(RuntimeError, stc.plot, clim='auto', figure=figs)

    # Test both types of incorrect limits key (lims/pos_lims)
    assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
                  clim=dict(kind='value', lims=(5, 10, 15)))
    assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
                  clim=dict(kind='value', pos_lims=(5, 10, 15)))

    # Test for correct clim values
    colormap = 'mne'
    assert_raises(ValueError, stc.plot, colormap=colormap,
                  clim=dict(pos_lims=(5, 10, 15, 20)))
    assert_raises(ValueError, stc.plot, colormap=colormap,
                  clim=dict(pos_lims=(5, 10, 15), kind='foo'))
    assert_raises(ValueError, stc.plot, colormap=colormap, clim='foo')
    assert_raises(ValueError, stc.plot, colormap=colormap, clim=(5, 10, 15))
    assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto')
    assert_raises(ValueError, stc.plot, hemi='foo', clim='auto')

    # Test that stc.data contains enough unique values to use percentages
    clim = 'auto'
    stc._data = np.zeros_like(stc.data)
    assert_raises(ValueError, plot_source_estimates, stc,
                  colormap=colormap, clim=clim)
    mlab.close()
def test_limits_to_control_points():
    """Test functionality for determing control points."""
    sample_src = read_source_spaces(src_fname)
    kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.RandomState(0).rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    mlab = _import_mlab()
    stc.plot(**kwargs)
    stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
    stc.plot(colormap='hot', clim='auto', **kwargs)
    stc.plot(colormap='mne', clim='auto', **kwargs)
    figs = [mlab.figure(), mlab.figure()]
    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
    assert_raises(ValueError, stc.plot, clim='auto', figure=figs, **kwargs)

    # Test both types of incorrect limits key (lims/pos_lims)
    assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
                  clim=dict(kind='value', lims=(5, 10, 15)), **kwargs)
    assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
                  clim=dict(kind='value', pos_lims=(5, 10, 15)), **kwargs)

    # Test for correct clim values
    assert_raises(ValueError, stc.plot,
                  clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
    assert_raises(ValueError, stc.plot, colormap='mne',
                  clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
    assert_raises(ValueError, stc.plot,
                  clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
    assert_raises(ValueError, stc.plot, colormap='mne', clim='foo', **kwargs)
    assert_raises(ValueError, stc.plot, clim=(5, 10, 15), **kwargs)
    assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
                  **kwargs)
    assert_raises(ValueError, stc.plot, hemi='foo', clim='auto', **kwargs)

    # Test handling of degenerate data
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # thresholded maps
        stc._data.fill(0.)
        plot_source_estimates(stc, **kwargs)
        assert any('All data were zero' in str(ww.message) for ww in w)
    mlab.close(all=True)
def test_morphed_source_space_return():
    """Test returning a morphed source space to the original subject"""
    # let's create some random data on fsaverage
    data = rng.randn(20484, 1)
    tmin, tstep = 0, 1.
    src_fs = read_source_spaces(fname_fs)
    stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs],
                            tmin, tstep, 'fsaverage')

    # Create our morph source space
    src_morph = morph_source_spaces(src_fs, 'sample',
                                    subjects_dir=subjects_dir)

    # Morph the data over using standard methods
    stc_morph = stc_fs.morph('sample', [s['vertno'] for s in src_morph],
                             smooth=1, subjects_dir=subjects_dir)

    # We can now pretend like this was real data we got e.g. from an inverse.
    # To be complete, let's remove some vertices
    keeps = [np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10])
             for v in stc_morph.vertices]
    stc_morph = SourceEstimate(
        np.concatenate([stc_morph.lh_data[keeps[0]],
                        stc_morph.rh_data[keeps[1]]]),
        [v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep,
        'sample')

    # Return it to the original subject
    stc_morph_return = stc_morph.to_original_src(
        src_fs, subjects_dir=subjects_dir)

    # Compare to the original data
    stc_morph_morph = stc_morph.morph('fsaverage', stc_morph_return.vertices,
                                      smooth=1,
                                      subjects_dir=subjects_dir)
    assert_equal(stc_morph_return.subject, stc_morph_morph.subject)
    for ii in range(2):
        assert_array_equal(stc_morph_return.vertices[ii],
                           stc_morph_morph.vertices[ii])
    # These will not match perfectly because morphing pushes data around
    corr = np.corrcoef(stc_morph_return.data[:, 0],
                       stc_morph_morph.data[:, 0])[0, 1]
    assert_true(corr > 0.99, corr)

    # Degenerate cases
    stc_morph.subject = None  # no .subject provided
    assert_raises(ValueError, stc_morph.to_original_src,
                  src_fs, subject_orig='fsaverage', subjects_dir=subjects_dir)
    stc_morph.subject = 'sample'
    del src_fs[0]['subject_his_id']  # no name in src_fsaverage
    assert_raises(ValueError, stc_morph.to_original_src,
                  src_fs, subjects_dir=subjects_dir)
    src_fs[0]['subject_his_id'] = 'fsaverage'  # name mismatch
    assert_raises(ValueError, stc_morph.to_original_src,
                  src_fs, subject_orig='foo', subjects_dir=subjects_dir)
    src_fs[0]['subject_his_id'] = 'sample'
    src = read_source_spaces(fname)  # wrong source space
    assert_raises(RuntimeError, stc_morph.to_original_src,
                  src, subjects_dir=subjects_dir)
def test_morph_data():
    """Test morphing of data
    """
    subject_from = 'sample'
    subject_to = 'fsaverage'
    fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
    stc_from = SourceEstimate(fname)
    stc_from.crop(0.09, 0.1)  # for faster computation
    stc_to = morph_data(subject_from, subject_to, stc_from,
                            grade=3, smooth=12, buffer_size=1000)
    stc_to.save('%s_audvis-meg' % subject_to)

    stc_to2 = morph_data(subject_from, subject_to, stc_from,
                            grade=3, smooth=12, buffer_size=3)
    assert_array_almost_equal(stc_to.data, stc_to2.data)

    mean_from = stc_from.data.mean(axis=0)
    mean_to = stc_to.data.mean(axis=0)
    assert_true(np.corrcoef(mean_to, mean_from).min() > 0.999)
Exemple #12
0
def test_limits_to_control_points():
    """Test functionality for determing control points
    """
    sample_src = read_source_spaces(src_fname)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.zeros((n_verts * n_time))
    stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1)

    # Test both types of incorrect limits key (lims/pos_lims)
    clim = dict(kind='value', lims=(5, 10, 15))
    colormap = 'mne_analyze'
    assert_raises(KeyError, plot_source_estimates, stc, 'sample',
                  colormap=colormap, clim=clim)

    clim = dict(kind='value', pos_lims=(5, 10, 15))
    colormap = 'hot'
    assert_raises(KeyError, plot_source_estimates, stc, 'sample',
                  colormap=colormap, clim=clim)

    # Test for correct clim values
    clim['pos_lims'] = (5, 10, 15, 20)
    colormap = 'mne_analyze'
    assert_raises(ValueError, plot_source_estimates, stc, 'sample',
                  colormap=colormap, clim=clim)
    clim = 'foo'
    assert_raises(ValueError, plot_source_estimates, stc, 'sample',
                  colormap=colormap, clim=clim)
    clim = (5, 10, 15)
    assert_raises(ValueError, plot_source_estimates, stc, 'sample',
                  colormap=colormap, clim=clim)

    # Test that stc.data contains enough unique values to use percentages
    clim = 'auto'
    stc._data = np.zeros_like(stc.data)
    assert_raises(ValueError, plot_source_estimates, stc, 'sample',
                  colormap=colormap, clim=clim)
Exemple #13
0
def test_morphed_source_space_return():
    """Test returning a morphed source space to the original subject."""
    # let's create some random data on fsaverage
    data = rng.randn(20484, 1)
    tmin, tstep = 0, 1.
    src_fs = read_source_spaces(fname_fs)
    stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs], tmin, tstep,
                            'fsaverage')
    n_verts_fs = sum(len(s['vertno']) for s in src_fs)

    # Create our morph source space
    src_morph = morph_source_spaces(src_fs,
                                    'sample',
                                    subjects_dir=subjects_dir)
    n_verts_sample = sum(len(s['vertno']) for s in src_morph)
    assert n_verts_fs == n_verts_sample

    # Morph the data over using standard methods
    stc_morph = compute_source_morph(src_fs,
                                     'fsaverage',
                                     'sample',
                                     spacing=[s['vertno'] for s in src_morph],
                                     smooth=1,
                                     subjects_dir=subjects_dir,
                                     warn=False).apply(stc_fs)
    assert stc_morph.data.shape[0] == n_verts_sample

    # We can now pretend like this was real data we got e.g. from an inverse.
    # To be complete, let's remove some vertices
    keeps = [
        np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10])
        for v in stc_morph.vertices
    ]
    stc_morph = SourceEstimate(
        np.concatenate([
            stc_morph.lh_data[keeps[0]], stc_morph.rh_data[keeps[1]]
        ]), [v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep,
        'sample')

    # Return it to the original subject
    stc_morph_return = stc_morph.to_original_src(src_fs,
                                                 subjects_dir=subjects_dir)

    # This should fail (has too many verts in SourceMorph)
    with pytest.warns(RuntimeWarning, match='vertices not included'):
        morph = compute_source_morph(src_morph,
                                     subject_from='sample',
                                     spacing=stc_morph_return.vertices,
                                     smooth=1,
                                     subjects_dir=subjects_dir)
    with pytest.raises(ValueError, match='vertices do not match'):
        morph.apply(stc_morph)

    # Compare to the original data
    with pytest.warns(RuntimeWarning, match='vertices not included'):
        stc_morph_morph = compute_source_morph(
            src=stc_morph,
            subject_from='sample',
            spacing=stc_morph_return.vertices,
            smooth=1,
            subjects_dir=subjects_dir).apply(stc_morph)

    assert_equal(stc_morph_return.subject, stc_morph_morph.subject)
    for ii in range(2):
        assert_array_equal(stc_morph_return.vertices[ii],
                           stc_morph_morph.vertices[ii])
    # These will not match perfectly because morphing pushes data around
    corr = np.corrcoef(stc_morph_return.data[:, 0],
                       stc_morph_morph.data[:, 0])[0, 1]
    assert corr > 0.99, corr

    # Explicitly test having two vertices map to the same target vertex. We
    # simulate this by having two vertices be at the same position.
    src_fs2 = src_fs.copy()
    vert1, vert2 = src_fs2[0]['vertno'][:2]
    src_fs2[0]['rr'][vert1] = src_fs2[0]['rr'][vert2]
    stc_morph_return = stc_morph.to_original_src(src_fs2,
                                                 subjects_dir=subjects_dir)

    # test to_original_src method result equality
    for ii in range(2):
        assert_array_equal(stc_morph_return.vertices[ii],
                           stc_morph_morph.vertices[ii])

    # These will not match perfectly because morphing pushes data around
    corr = np.corrcoef(stc_morph_return.data[:, 0],
                       stc_morph_morph.data[:, 0])[0, 1]
    assert corr > 0.99, corr

    # Degenerate cases
    stc_morph.subject = None  # no .subject provided
    pytest.raises(ValueError,
                  stc_morph.to_original_src,
                  src_fs,
                  subject_orig='fsaverage',
                  subjects_dir=subjects_dir)
    stc_morph.subject = 'sample'
    del src_fs[0]['subject_his_id']  # no name in src_fsaverage
    pytest.raises(ValueError,
                  stc_morph.to_original_src,
                  src_fs,
                  subjects_dir=subjects_dir)
    src_fs[0]['subject_his_id'] = 'fsaverage'  # name mismatch
    pytest.raises(ValueError,
                  stc_morph.to_original_src,
                  src_fs,
                  subject_orig='foo',
                  subjects_dir=subjects_dir)
    src_fs[0]['subject_his_id'] = 'sample'
    src = read_source_spaces(fname)  # wrong source space
    pytest.raises(RuntimeError,
                  stc_morph.to_original_src,
                  src,
                  subjects_dir=subjects_dir)
Exemple #14
0
def test_dipole_fitting():
    """Test dipole fitting."""
    amp = 100e-9
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    fname_dtemp = op.join(tempdir, 'test.dip')
    fname_sim = op.join(tempdir, 'test-ave.fif')
    fwd = convert_forward_solution(read_forward_solution(fname_fwd),
                                   surf_ori=False, force_fixed=True,
                                   use_cps=True)
    evoked = read_evokeds(fname_evo)[0]
    cov = read_cov(fname_cov)
    n_per_hemi = 5
    vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
                for s in fwd['src']]
    nv = sum(len(v) for v in vertices)
    stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
    evoked = simulate_evoked(fwd, stc, evoked.info, cov, nave=evoked.nave,
                             random_state=rng)
    # For speed, let's use a subset of channels (strange but works)
    picks = np.sort(np.concatenate([
        pick_types(evoked.info, meg=True, eeg=False)[::2],
        pick_types(evoked.info, meg=False, eeg=True)[::2]]))
    evoked.pick_channels([evoked.ch_names[p] for p in picks])
    evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
    write_evokeds(fname_sim, evoked)

    # Run MNE-C version
    run_subprocess([
        'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
        '--noise', fname_cov, '--dip', fname_dtemp,
        '--mri', fname_fwd, '--reg', '0', '--tmin', '0',
    ])
    dip_c = read_dipole(fname_dtemp)

    # Run mne-python version
    sphere = make_sphere_model(head_radius=0.1)
    with warnings.catch_warnings(record=True):
        dip, residuals = fit_dipole(evoked, cov, sphere, fname_fwd)

    # Sanity check: do our residuals have less power than orig data?
    data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
    resi_rms = np.sqrt(np.sum(residuals ** 2, axis=0))
    assert_true((data_rms > resi_rms * 0.95).all(),
                msg='%s (factor: %s)' % ((data_rms / resi_rms).min(), 0.95))

    # Compare to original points
    transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
    transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
    assert_equal(fwd['src'][0]['coord_frame'], FIFF.FIFFV_COORD_HEAD)
    src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)
    src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)

    # MNE-C skips the last "time" point :(
    out = dip.crop(dip_c.times[0], dip_c.times[-1])
    assert_true(dip is out)
    src_rr, src_nn = src_rr[:-1], src_nn[:-1]

    # check that we did about as well
    corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
    for d in (dip_c, dip):
        new = d.pos
        diffs = new - src_rr
        corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
        dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
        gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
                                                     axis=1)))]
        amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
        gofs += [np.mean(d.gof)]
    factor = 0.8
    assert_true(dists[0] / factor >= dists[1], 'dists: %s' % dists)
    assert_true(corrs[0] * factor <= corrs[1], 'corrs: %s' % corrs)
    assert_true(gc_dists[0] / factor >= gc_dists[1] * 0.8,
                'gc-dists (ori): %s' % gc_dists)
    assert_true(amp_errs[0] / factor >= amp_errs[1],
                'amplitude errors: %s' % amp_errs)
    # This one is weird because our cov/sim/picking is weird
    assert_true(gofs[0] * factor <= gofs[1] * 2, 'gof: %s' % gofs)
def test_brain_timeviewer(renderer):
    """Test _TimeViewer primitives."""
    if renderer.get_3d_backend() == "mayavi":
        pytest.skip()  # Skip PySurfer.TimeViewer
    elif renderer.get_3d_backend() == "pyvista":
        # Widgets are not available offscreen
        import pyvista
        orig_offscreen = pyvista.OFF_SCREEN
        pyvista.OFF_SCREEN = False
        # Disable testing to allow interactive window
        renderer.MNE_3D_BACKEND_TESTING = False

    sample_src = read_source_spaces(src_fname)

    # dense version
    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.zeros((n_verts * n_time))
    stc_size = stc_data.size
    stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = \
        np.random.RandomState(0).rand(stc_data.size // 20)
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1)

    fmin = stc.data.min()
    fmax = stc.data.max()
    brain_data = _Brain(subject_id,
                        'split',
                        surf,
                        size=300,
                        subjects_dir=subjects_dir)
    for hemi in ['lh', 'rh']:
        hemi_idx = 0 if hemi == 'lh' else 1
        data = getattr(stc, hemi + '_data')
        vertices = stc.vertices[hemi_idx]
        brain_data.add_data(data,
                            fmin=fmin,
                            hemi=hemi,
                            fmax=fmax,
                            colormap='hot',
                            vertices=vertices,
                            colorbar=True)

    time_viewer = _TimeViewer(brain_data)
    time_viewer.time_call(value=0)
    time_viewer.orientation_call(value='lat', update_widget=True)
    time_viewer.orientation_call(value='medial', update_widget=True)
    time_viewer.smoothing_call(value=1)
    time_viewer.fmin_call(value=12.0)
    time_viewer.fmax_call(value=4.0)
    time_viewer.fmid_call(value=6.0)
    time_viewer.fmid_call(value=4.0)
    time_viewer.fscale_call(value=1.1)
    time_viewer.toggle_interface()
    time_viewer.playback_speed_call(value=0.1)
    time_viewer.toggle_playback()
    time_viewer.apply_auto_scaling()
    time_viewer.restore_user_scaling()

    link_viewer = _LinkViewer([brain_data])
    link_viewer.set_time_point(value=0)
    link_viewer.set_playback_speed(value=0.1)
    link_viewer.toggle_playback()

    if renderer.get_3d_backend() == "pyvista":
        pyvista.OFF_SCREEN = orig_offscreen
Exemple #16
0
def test_simulate_round_trip(raw_data):
    """Test simulate_raw round trip calculations."""
    # Check a diagonal round-trip
    raw, src, stc, trans, sphere = raw_data
    raw.pick_types(meg=True, stim=True)
    bem = read_bem_solution(bem_1_fname)
    old_bem = bem.copy()
    old_src = src.copy()
    old_trans = trans.copy()
    fwd = make_forward_solution(raw.info, trans, src, bem)
    # no omissions
    assert (sum(len(s['vertno'])
                for s in src) == sum(len(s['vertno'])
                                     for s in fwd['src']) == 36)
    # make sure things were not modified
    assert (
        old_bem['surfs'][0]['coord_frame'] == bem['surfs'][0]['coord_frame'])
    assert trans == old_trans
    _compare_source_spaces(src, old_src)
    data = np.eye(fwd['nsource'])
    raw.crop(0, len(data) / raw.info['sfreq'], include_tmax=False)
    stc = SourceEstimate(data, [s['vertno'] for s in fwd['src']], 0,
                         1. / raw.info['sfreq'])
    for use_fwd in (None, fwd):
        if use_fwd is None:
            use_trans, use_src, use_bem = trans, src, bem
        else:
            use_trans = use_src = use_bem = None
        this_raw = simulate_raw(raw.info,
                                stc,
                                use_trans,
                                use_src,
                                use_bem,
                                forward=use_fwd)
        this_raw.pick_types(meg=True, eeg=True)
        assert (old_bem['surfs'][0]['coord_frame'] == bem['surfs'][0]
                ['coord_frame'])
        assert trans == old_trans
        _compare_source_spaces(src, old_src)
        this_fwd = convert_forward_solution(fwd, force_fixed=True)
        assert_allclose(this_raw[:][0],
                        this_fwd['sol']['data'],
                        atol=1e-12,
                        rtol=1e-6)
    with pytest.raises(ValueError, match='If forward is not None then'):
        simulate_raw(raw.info, stc, trans, src, bem, forward=fwd)
    # Not iterable
    with pytest.raises(TypeError, match='SourceEstimate, tuple, or iterable'):
        simulate_raw(raw.info, 0., trans, src, bem, None)
    # STC with a source that `src` does not have
    assert 0 not in src[0]['vertno']
    vertices = [[0, fwd['src'][0]['vertno'][0]], []]
    stc_bad = SourceEstimate(data[:2], vertices, 0, 1. / raw.info['sfreq'])
    with pytest.warns(RuntimeWarning, match='1 of 2 SourceEstimate vertices'):
        simulate_raw(raw.info, stc_bad, trans, src, bem)
    assert 0 not in fwd['src'][0]['vertno']
    with pytest.warns(RuntimeWarning, match='1 of 2 SourceEstimate vertices'):
        simulate_raw(raw.info, stc_bad, None, None, None, forward=fwd)
    # dev_head_t mismatch
    fwd['info']['dev_head_t']['trans'][0, 0] = 1.
    with pytest.raises(ValueError, match='dev_head_t.*does not match'):
        simulate_raw(raw.info, stc, None, None, None, forward=fwd)
Exemple #17
0
for ii, cluster_ind in enumerate(good_cluster_inds):
	data.fill(0)
	v_inds = clusters[cluster_ind][1]
	t_inds = clusters[cluster_ind][0]
	data[v_inds, t_inds] = T_obs[t_inds, v_inds]
	# Store a nice visualization of the cluster by summing across time (in ms)
	data = np.sign(data) * np.logical_not(data == 0) * dataface.tstep
	data_summary[:, ii + 1] = 1e3 * np.sum(data, axis=1)
		
	# save as stc
for i, cluster_ind in enumerate(good_cluster_inds):
	v_inds = clusters[cluster_ind][1]
	t_inds = clusters[cluster_ind][0]
	data[v_inds, t_inds] = T_obs[t_inds, v_inds]
	
stc_cluster_vis = SourceEstimate(data, fsave_vertices, tmin=dataface.tmin, tstep=dataface.tstep)
stc_cluster_vis.save('/neurospin/meg/meg_tmp/ResonanceMeg_Baptiste_2009/MEG/inter_subject/processed/STC_face_vs_house_clust')

########################################################################################"



# Implement gave variable
erfAAgave[s] = np.mean(erfAA_trial,axis=0)
erfAVgave[s] = np.mean(erfAV_trial,axis=0)
erfVAgave[s] = np.mean(erfVA_trial,axis=0)
erfVVgave[s] = np.mean(erfVV_trial,axis=0)

# Compute statistic between subjects
#threshold = 6.0
T_obs_Aon_inter, clusters_Aon_inter, cluster_p_values_Aon_inter, H0_Aon_inter = \
Exemple #18
0
bem_dir = op.join(subjects_dir, subject, 'bem')
fname_raw = op.join(data_dir, 'raw_fif', '%s_funloc_raw.fif' % subj)
trans = op.join(data_dir, 'trans', '%s-trans.fif' % subj)
bem = op.join(bem_dir, '%s-5120-5120-5120-bem-sol.fif' % subject)
src = read_source_spaces(op.join(bem_dir, '%s-oct-6-src.fif' % subject))
sfreq = read_info(fname_raw, verbose=False)['sfreq']

# ############################################################################
# construct appropriate brain activity

print('Constructing original (simulated) sources')
tmin, tmax = -0.2, 0.8
vertices = [s['vertno'] for s in src]
n_vertices = sum(s['nuse'] for s in src)
data = np.zeros((n_vertices, int((tmax - tmin) * sfreq)))
stc = SourceEstimate(data, vertices, -0.2, 1. / sfreq, subject)

# limit activation to a square pulse in time at two vertices in space
labels = [
    read_labels_from_annot(subject,
                           'aparc.a2009s',
                           hemi,
                           regexp='G_temp_sup-G_T_transv')[0]
    for hi, hemi in enumerate(('lh', 'rh'))
]
stc = stc.in_label(labels[0] + labels[1])
stc.data.fill(0)
stc.data[:, (stc.times >= pulse_tmin) & (stc.times <= pulse_tmax)] = 10e-9

# ############################################################################
# Simulate data
def test_extract_label_time_course():
    """Test extraction of label time courses from stc."""
    n_stcs = 3
    n_times = 50

    src = read_inverse_operator(fname_inv)['src']
    vertices = [src[0]['vertno'], src[1]['vertno']]
    n_verts = len(vertices[0]) + len(vertices[1])

    # get some labels
    labels_lh = read_labels_from_annot('sample',
                                       hemi='lh',
                                       subjects_dir=subjects_dir)
    labels_rh = read_labels_from_annot('sample',
                                       hemi='rh',
                                       subjects_dir=subjects_dir)
    labels = list()
    labels.extend(labels_lh[:5])
    labels.extend(labels_rh[:4])

    n_labels = len(labels)

    label_means = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
    label_maxs = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))

    # compute the mean with sign flip
    label_means_flipped = np.zeros_like(label_means)
    for i, label in enumerate(labels):
        label_means_flipped[i] = i * np.mean(label_sign_flip(label, src))

    # generate some stc's with known data
    stcs = list()
    for i in range(n_stcs):
        data = np.zeros((n_verts, n_times))
        # set the value of the stc within each label
        for j, label in enumerate(labels):
            if label.hemi == 'lh':
                idx = np.intersect1d(vertices[0], label.vertices)
                idx = np.searchsorted(vertices[0], idx)
            elif label.hemi == 'rh':
                idx = np.intersect1d(vertices[1], label.vertices)
                idx = len(vertices[0]) + np.searchsorted(vertices[1], idx)
            data[idx] = label_means[j]

        this_stc = SourceEstimate(data, vertices, 0, 1)
        stcs.append(this_stc)

    # test some invalid inputs
    assert_raises(ValueError,
                  extract_label_time_course,
                  stcs,
                  labels,
                  src,
                  mode='notamode')

    # have an empty label
    empty_label = labels[0].copy()
    empty_label.vertices += 1000000
    assert_raises(ValueError,
                  extract_label_time_course,
                  stcs,
                  empty_label,
                  src,
                  mode='mean')

    # but this works:
    with warnings.catch_warnings(record=True):  # empty label
        tc = extract_label_time_course(stcs,
                                       empty_label,
                                       src,
                                       mode='mean',
                                       allow_empty=True)
    for arr in tc:
        assert_true(arr.shape == (1, n_times))
        assert_array_equal(arr, np.zeros((1, n_times)))

    # test the different modes
    modes = ['mean', 'mean_flip', 'pca_flip', 'max']

    for mode in modes:
        label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
        label_tc_method = [
            stc.extract_label_time_course(labels, src, mode=mode)
            for stc in stcs
        ]
        assert_true(len(label_tc) == n_stcs)
        assert_true(len(label_tc_method) == n_stcs)
        for tc1, tc2 in zip(label_tc, label_tc_method):
            assert_true(tc1.shape == (n_labels, n_times))
            assert_true(tc2.shape == (n_labels, n_times))
            assert_true(np.allclose(tc1, tc2, rtol=1e-8, atol=1e-16))
            if mode == 'mean':
                assert_array_almost_equal(tc1, label_means)
            if mode == 'mean_flip':
                assert_array_almost_equal(tc1, label_means_flipped)
            if mode == 'max':
                assert_array_almost_equal(tc1, label_maxs)

    # test label with very few vertices (check SVD conditionals)
    label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
    x = label_sign_flip(label, src)
    assert_true(len(x) == 2)
    label = Label(vertices=[], hemi='lh')
    x = label_sign_flip(label, src)
    assert_true(x.size == 0)
def plot_visualize_mft_sources(fwdmag, stcdata, tmin, tstep,
                               subject, subjects_dir):
    '''
    Plot the MFT sources at time point of peak.
    '''
    print "##### Attempting to plot:"
    # cf. decoding/plot_decoding_spatio_temporal_source.py
    vertices = [s['vertno'] for s in fwdmag['src']]
    if len(vertices) == 1:
        vertices = [fwdmag['src'][0]['vertno'][fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] <= -0.],
                    fwdmag['src'][0]['vertno'][fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] > -0.]]

    stc_feat = SourceEstimate(stcdata, vertices=vertices,
                              tmin=-0.2, tstep=tstep, subject=subject)
    for hemi in ['lh', 'rh']:
        brain = stc_feat.plot(surface='white', hemi=hemi, subjects_dir=subjects_dir,
                              transparent=True, clim='auto')
        brain.show_view('lateral')
        # use peak getter to move visualization to the time point of the peak
        tmin = 0.095
        tmax = 0.10
        print "Restricting peak search to [%fs, %fs]" % (tmin, tmax)
        if hemi == 'both':
            vertno_max, time_idx = stc_feat.get_peak(hemi='rh', time_as_index=True,
                                                     tmin=tmin, tmax=tmax)
        else:
            vertno_max, time_idx = stc_feat.get_peak(hemi=hemi, time_as_index=True,
                                                     tmin=tmin, tmax=tmax)
        if hemi == 'lh':
            comax = fwdmag['src'][0]['rr'][vertno_max]
            print "hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][0]['rr'][vertno_max] = " %\
                  (hemi, vertno_max, time_idx), comax
        elif len(fwdmag['src']) > 1:
            comax = fwdmag['src'][1]['rr'][vertno_max]
            print "hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][1]['rr'][vertno_max] = " %\
                  (hemi, vertno_max, time_idx), comax

        print "hemi=%s: setting time_idx=%d" % (hemi, time_idx)
        brain.set_data_time_index(time_idx)
        # draw marker at maximum peaking vertex
        brain.add_foci(vertno_max, coords_as_verts=True, hemi=hemi, color='blue',
                       scale_factor=0.6)
        offsets = np.append([0], [s['nuse'] for s in fwdmag['src']])
        if hemi == 'lh':
            ifoci = [np.nonzero([stcdata[0:offsets[1],time_idx]>=0.25*np.max(stcdata[:,time_idx])][0])]
            vfoci = fwdmag['src'][0]['vertno'][ifoci[0][0]]
            cfoci = fwdmag['src'][0]['rr'][vfoci]
            print "Coords  of %d sel. vfoci: " % cfoci.shape[0]
            print cfoci
            print "vfoci: "
            print vfoci
            print "brain.geo['lh'].coords[vfoci] : "
            print brain.geo['lh'].coords[vfoci]
        elif len(fwdmag['src']) > 1:
            ifoci = [np.nonzero([stcdata[offsets[1]:,time_idx]>=0.25*np.max(stcdata[:,time_idx])][0])]
            vfoci = fwdmag['src'][1]['vertno'][ifoci[0][0]]
            cfoci = fwdmag['src'][1]['rr'][vfoci]
            print "Coords  of %d sel. vfoci: " % cfoci.shape[0]
            print cfoci
            print "vfoci: "
            print vfoci
            print "brain.geo['rh'].coords[vfoci] : "
            print brain.geo['rh'].coords[vfoci]

        mrfoci = np.zeros(cfoci.shape)
        invmri_head_t = invert_transform(fwdmag['info']['mri_head_t'])
        mrfoci = apply_trans(invmri_head_t['trans'],cfoci, move=True)
        print "mrfoci: "
        print mrfoci

        # Just some blops:
        bloblist = np.zeros((300,3))
        for i in xrange(100):
            bloblist[i,0] = float(i)
            bloblist[i+100,1] = float(i)
            bloblist[i+200,2] = float(i)
        mrblobs = apply_trans(invmri_head_t['trans'], bloblist, move=True)
        brain.save_image('testfig_map_%s.png' % hemi)
        brain.close()
Exemple #21
0
def test_simulate_raw_bem(raw_data):
    """Test simulation of raw data with BEM."""
    raw, src_ss, stc, trans, sphere = raw_data
    src = setup_source_space('sample', 'oct1', subjects_dir=subjects_dir)
    for s in src:
        s['nuse'] = 3
        s['vertno'] = src[1]['vertno'][:3]
        s['inuse'].fill(0)
        s['inuse'][s['vertno']] = 1
    # use different / more complete STC here
    vertices = [s['vertno'] for s in src]
    stc = SourceEstimate(np.eye(sum(len(v) for v in vertices)), vertices, 0,
                         1. / raw.info['sfreq'])
    stcs = [stc] * 15
    raw_sim_sph = simulate_raw(raw.info, stcs, trans, src, sphere)
    raw_sim_bem = simulate_raw(raw.info, stcs, trans, src, bem_fname)
    # some components (especially radial) might not match that well,
    # so just make sure that most components have high correlation
    assert_array_equal(raw_sim_sph.ch_names, raw_sim_bem.ch_names)
    picks = pick_types(raw.info, meg=True, eeg=True)
    n_ch = len(picks)
    corr = np.corrcoef(raw_sim_sph[picks][0], raw_sim_bem[picks][0])
    assert_array_equal(corr.shape, (2 * n_ch, 2 * n_ch))
    med_corr = np.median(np.diag(corr[:n_ch, -n_ch:]))
    assert med_corr > 0.65
    # do some round-trip localization
    for s in src:
        transform_surface_to(s, 'head', trans)
    locs = np.concatenate([s['rr'][s['vertno']] for s in src])
    tmax = (len(locs) - 1) / raw.info['sfreq']
    cov = make_ad_hoc_cov(raw.info)
    # The tolerance for the BEM is surprisingly high (28) but I get the same
    # result when using MNE-C and Xfit, even when using a proper 5120 BEM :(
    for use_raw, bem, tol in ((raw_sim_sph, sphere, 2), (raw_sim_bem,
                                                         bem_fname, 31)):
        events = find_events(use_raw, 'STI 014')
        assert len(locs) == 6
        evoked = Epochs(use_raw, events, 1, 0, tmax, baseline=None).average()
        assert len(evoked.times) == len(locs)
        fits = fit_dipole(evoked, cov, bem, trans, min_dist=1.)[0].pos
        diffs = np.sqrt(np.sum((locs - fits)**2, axis=-1)) * 1000
        med_diff = np.median(diffs)
        assert med_diff < tol, '%s: %s' % (bem, med_diff)
    # also test event timings with SourceSimulator
    first_samp = raw.first_samp
    events = find_events(raw, initial_event=True, verbose=False)
    evt_times = events[:, 0]
    assert len(events) == 3
    labels_sim = [[], [], []]  # random l+r hemisphere points
    labels_sim[0] = Label([src_ss[0]['vertno'][1]], hemi='lh')
    labels_sim[1] = Label([src_ss[0]['vertno'][4]], hemi='lh')
    labels_sim[2] = Label([src_ss[1]['vertno'][2]], hemi='rh')
    wf_sim = np.array([2, 1, 0])
    for this_fs in (0, first_samp):
        ss = SourceSimulator(src_ss,
                             1. / raw.info['sfreq'],
                             first_samp=this_fs)
        for i in range(3):
            ss.add_data(labels_sim[i], wf_sim, events[np.newaxis, i])
        assert ss.n_times == evt_times[-1] + len(wf_sim) - this_fs
    raw_sim = simulate_raw(raw.info,
                           ss,
                           src=src_ss,
                           bem=bem_fname,
                           first_samp=first_samp)
    data = raw_sim.get_data()
    amp0 = data[:, evt_times - first_samp].max()
    amp1 = data[:, evt_times + 1 - first_samp].max()
    amp2 = data[:, evt_times + 2 - first_samp].max()
    assert_allclose(amp0 / amp1, wf_sim[0] / wf_sim[1], rtol=1e-5)
    assert amp2 == 0
    assert raw_sim.n_times == ss.n_times
def test_stc_attributes():
    """Test STC attributes."""
    stc = _fake_stc(n_time=10)
    vec_stc = _fake_vec_stc(n_time=10)

    _test_stc_integrety(stc)
    assert_array_almost_equal(
        stc.times, [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])

    def attempt_times_mutation(stc):
        stc.times -= 1

    def attempt_assignment(stc, attr, val):
        setattr(stc, attr, val)

    # .times is read-only
    pytest.raises(ValueError, attempt_times_mutation, stc)
    pytest.raises(ValueError, attempt_assignment, stc, 'times', [1])

    # Changing .tmin or .tstep re-computes .times
    stc.tmin = 1
    assert (type(stc.tmin) == float)
    assert_array_almost_equal(
        stc.times, [1., 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9])

    stc.tstep = 1
    assert (type(stc.tstep) == float)
    assert_array_almost_equal(stc.times,
                              [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])

    # tstep <= 0 is not allowed
    pytest.raises(ValueError, attempt_assignment, stc, 'tstep', 0)
    pytest.raises(ValueError, attempt_assignment, stc, 'tstep', -1)

    # Changing .data re-computes .times
    stc.data = np.random.rand(100, 5)
    assert_array_almost_equal(stc.times, [1., 2., 3., 4., 5.])

    # .data must match the number of vertices
    pytest.raises(ValueError, attempt_assignment, stc, 'data', [[1]])
    pytest.raises(ValueError, attempt_assignment, stc, 'data', None)

    # .data much match number of dimensions
    pytest.raises(ValueError, attempt_assignment, stc, 'data', np.arange(100))
    pytest.raises(ValueError, attempt_assignment, vec_stc, 'data',
                  [np.arange(100)])
    pytest.raises(ValueError, attempt_assignment, vec_stc, 'data',
                  [[[np.arange(100)]]])

    # .shape attribute must also work when ._data is None
    stc._kernel = np.zeros((2, 2))
    stc._sens_data = np.zeros((2, 3))
    stc._data = None
    assert_equal(stc.shape, (2, 3))

    # bad size of data
    stc = _fake_stc()
    data = stc.data[:, np.newaxis, :]
    with pytest.raises(ValueError, match='2 dimensions for SourceEstimate'):
        SourceEstimate(data, stc.vertices)
    stc = SourceEstimate(data[:, 0, 0], stc.vertices, 0, 1)
    assert stc.data.shape == (len(data), 1)
Exemple #23
0
def test_dipole_fitting(tmp_path):
    """Test dipole fitting."""
    amp = 100e-9
    tempdir = str(tmp_path)
    rng = np.random.RandomState(0)
    fname_dtemp = op.join(tempdir, 'test.dip')
    fname_sim = op.join(tempdir, 'test-ave.fif')
    fwd = convert_forward_solution(read_forward_solution(fname_fwd),
                                   surf_ori=False, force_fixed=True,
                                   use_cps=True)
    evoked = read_evokeds(fname_evo)[0]
    cov = read_cov(fname_cov)
    n_per_hemi = 5
    vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
                for s in fwd['src']]
    nv = sum(len(v) for v in vertices)
    stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
    evoked = simulate_evoked(fwd, stc, evoked.info, cov, nave=evoked.nave,
                             random_state=rng)
    # For speed, let's use a subset of channels (strange but works)
    picks = np.sort(np.concatenate([
        pick_types(evoked.info, meg=True, eeg=False)[::2],
        pick_types(evoked.info, meg=False, eeg=True)[::2]]))
    evoked.pick_channels([evoked.ch_names[p] for p in picks])
    evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
    write_evokeds(fname_sim, evoked)

    # Run MNE-C version
    run_subprocess([
        'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
        '--noise', fname_cov, '--dip', fname_dtemp,
        '--mri', fname_fwd, '--reg', '0', '--tmin', '0',
    ])
    dip_c = read_dipole(fname_dtemp)

    # Run mne-python version
    sphere = make_sphere_model(head_radius=0.1)
    with pytest.warns(RuntimeWarning, match='projection'):
        dip, residual = fit_dipole(evoked, cov, sphere, fname_fwd,
                                   rank='info')  # just to test rank support
    assert isinstance(residual, Evoked)

    # Test conversion of dip.pos to MNI coordinates.
    dip_mni_pos = dip.to_mni('sample', fname_trans,
                             subjects_dir=subjects_dir)
    head_to_mni_dip_pos = head_to_mni(dip.pos, 'sample', fwd['mri_head_t'],
                                      subjects_dir=subjects_dir)
    assert_allclose(dip_mni_pos, head_to_mni_dip_pos, rtol=1e-3, atol=0)

    # Test finding label for dip.pos in an aseg, also tests `to_mri`
    target_labels = ['Left-Cerebral-Cortex', 'Unknown', 'Left-Cerebral-Cortex',
                     'Right-Cerebral-Cortex', 'Left-Cerebral-Cortex',
                     'Unknown', 'Unknown', 'Unknown',
                     'Right-Cerebral-White-Matter', 'Right-Cerebral-Cortex']
    labels = dip.to_volume_labels(fname_trans, subject='fsaverage',
                                  aseg="aseg", subjects_dir=subjects_dir)
    assert labels == target_labels

    # Sanity check: do our residuals have less power than orig data?
    data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
    resi_rms = np.sqrt(np.sum(residual.data ** 2, axis=0))
    assert (data_rms > resi_rms * 0.95).all(), \
        '%s (factor: %s)' % ((data_rms / resi_rms).min(), 0.95)

    # Compare to original points
    transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
    transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
    assert fwd['src'][0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD
    src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)
    src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)

    # MNE-C skips the last "time" point :(
    out = dip.crop(dip_c.times[0], dip_c.times[-1])
    assert (dip is out)
    src_rr, src_nn = src_rr[:-1], src_nn[:-1]

    # check that we did about as well
    corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
    for d in (dip_c, dip):
        new = d.pos
        diffs = new - src_rr
        corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
        dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
        gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
                                                            axis=1)))]
        amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
        gofs += [np.mean(d.gof)]
    # XXX possibly some OpenBLAS numerical differences make
    # things slightly worse for us
    factor = 0.7
    assert dists[0] / factor >= dists[1], 'dists: %s' % dists
    assert corrs[0] * factor <= corrs[1], 'corrs: %s' % corrs
    assert gc_dists[0] / factor >= gc_dists[1] * 0.8, \
        'gc-dists (ori): %s' % gc_dists
    assert amp_errs[0] / factor >= amp_errs[1],\
        'amplitude errors: %s' % amp_errs
    # This one is weird because our cov/sim/picking is weird
    assert gofs[0] * factor <= gofs[1] * 2, 'gof: %s' % gofs
Exemple #24
0
    data.fill(0)
    v_inds = clusters[cluster_ind][1]
    t_inds = clusters[cluster_ind][0]
    data[v_inds, t_inds] = T_obs[t_inds, v_inds]
    # Store a nice visualization of the cluster by summing across time (in ms)
    data = np.sign(data) * np.logical_not(data == 0) * data1.tstep
    data_summary[:, ii + 1] = 0.25e3 * np.sum(data, axis=1)

    # save as stc
for i, cluster_ind in enumerate(good_cluster_inds):
    v_inds = clusters[cluster_ind][1]
    t_inds = clusters[cluster_ind][0]
    data[v_inds, t_inds] = T_obs[t_inds, v_inds]

stc_cluster_vis = SourceEstimate(data,
                                 fsave_vertices,
                                 tmin=dataface.tmin,
                                 tstep=dataface.tstep)
stc_cluster_vis.save(
    '/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/plots/clusters/test_west_par'
)

########################################################################################"

# Implement gave variable
erfAAgave[s] = np.mean(erfAA_trial, axis=0)
erfAVgave[s] = np.mean(erfAV_trial, axis=0)
erfVAgave[s] = np.mean(erfVA_trial, axis=0)
erfVVgave[s] = np.mean(erfVV_trial, axis=0)

# Compute statistic between subjects
#threshold = 6.0
Exemple #25
0
def test_dipole_fitting():
    """Test dipole fitting"""
    amp = 10e-9
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    fname_dtemp = op.join(tempdir, 'test.dip')
    fname_sim = op.join(tempdir, 'test-ave.fif')
    fwd = convert_forward_solution(read_forward_solution(fname_fwd),
                                   surf_ori=False, force_fixed=True)
    evoked = read_evokeds(fname_evo)[0]
    cov = read_cov(fname_cov)
    n_per_hemi = 5
    vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
                for s in fwd['src']]
    nv = sum(len(v) for v in vertices)
    stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
    evoked = simulate_evoked(fwd, stc, evoked.info, cov, snr=20,
                             random_state=rng)
    # For speed, let's use a subset of channels (strange but works)
    picks = np.sort(np.concatenate([
        pick_types(evoked.info, meg=True, eeg=False)[::2],
        pick_types(evoked.info, meg=False, eeg=True)[::2]]))
    evoked.pick_channels([evoked.ch_names[p] for p in picks])
    evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
    write_evokeds(fname_sim, evoked)

    # Run MNE-C version
    run_subprocess([
        'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
        '--noise', fname_cov, '--dip', fname_dtemp,
        '--mri', fname_fwd, '--reg', '0', '--tmin', '0',
    ])
    dip_c = read_dipole(fname_dtemp)

    # Run mne-python version
    sphere = make_sphere_model(head_radius=0.1)
    dip, residuals = fit_dipole(evoked, fname_cov, sphere, fname_fwd)

    # Sanity check: do our residuals have less power than orig data?
    data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
    resi_rms = np.sqrt(np.sum(residuals ** 2, axis=0))
    factor = 1.
    # XXX weird, inexplicable differenc for 3.5 build we'll assume is due to
    # Anaconda bug for now...
    if os.getenv('TRAVIS', 'false') == 'true' and \
            sys.version[:3] in ('3.5', '2.7'):
        factor = 0.8
    assert_true((data_rms > factor * resi_rms).all(),
                msg='%s (factor: %s)' % ((data_rms / resi_rms).min(), factor))

    # Compare to original points
    transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
    transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
    src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)
    src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)

    # MNE-C skips the last "time" point :(
    dip.crop(dip_c.times[0], dip_c.times[-1])
    src_rr, src_nn = src_rr[:-1], src_nn[:-1]

    # check that we did at least as well
    corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
    for d in (dip_c, dip):
        new = d.pos
        diffs = new - src_rr
        corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
        dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
        gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
                                                     axis=1)))]
        amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
        gofs += [np.mean(d.gof)]
    assert_true(dists[0] >= dists[1] * factor, 'dists: %s' % dists)
    assert_true(corrs[0] <= corrs[1] / factor, 'corrs: %s' % corrs)
    assert_true(gc_dists[0] >= gc_dists[1] * factor,
                'gc-dists (ori): %s' % gc_dists)
    assert_true(amp_errs[0] >= amp_errs[1] * factor,
                'amplitude errors: %s' % amp_errs)
    assert_true(gofs[0] <= gofs[1] / factor, 'gof: %s' % gofs)
Exemple #26
0
def plot_visualize_mft_sources(fwdmag, stcdata, tmin, tstep, subject,
                               subjects_dir):
    '''
    Plot the MFT sources at time point of peak.
    '''
    print "##### Attempting to plot:"
    # cf. decoding/plot_decoding_spatio_temporal_source.py
    vertices = [s['vertno'] for s in fwdmag['src']]
    if len(vertices) == 1:
        vertices = [
            fwdmag['src'][0]['vertno']
            [fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] <= -0.],
            fwdmag['src'][0]['vertno'][
                fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] > -0.]
        ]

    stc_feat = SourceEstimate(stcdata,
                              vertices=vertices,
                              tmin=-0.2,
                              tstep=tstep,
                              subject=subject)
    for hemi in ['lh', 'rh']:
        brain = stc_feat.plot(surface='white',
                              hemi=hemi,
                              subjects_dir=subjects_dir,
                              transparent=True,
                              clim='auto')
        brain.show_view('lateral')
        # use peak getter to move visualization to the time point of the peak
        tmin = 0.095
        tmax = 0.10
        print "Restricting peak search to [%fs, %fs]" % (tmin, tmax)
        if hemi == 'both':
            vertno_max, time_idx = stc_feat.get_peak(hemi='rh',
                                                     time_as_index=True,
                                                     tmin=tmin,
                                                     tmax=tmax)
        else:
            vertno_max, time_idx = stc_feat.get_peak(hemi=hemi,
                                                     time_as_index=True,
                                                     tmin=tmin,
                                                     tmax=tmax)
        if hemi == 'lh':
            comax = fwdmag['src'][0]['rr'][vertno_max]
            print "hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][0]['rr'][vertno_max] = " %\
                  (hemi, vertno_max, time_idx), comax
        elif len(fwdmag['src']) > 1:
            comax = fwdmag['src'][1]['rr'][vertno_max]
            print "hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][1]['rr'][vertno_max] = " %\
                  (hemi, vertno_max, time_idx), comax

        print "hemi=%s: setting time_idx=%d" % (hemi, time_idx)
        brain.set_data_time_index(time_idx)
        # draw marker at maximum peaking vertex
        brain.add_foci(vertno_max,
                       coords_as_verts=True,
                       hemi=hemi,
                       color='blue',
                       scale_factor=0.6)
        offsets = np.append([0], [s['nuse'] for s in fwdmag['src']])
        if hemi == 'lh':
            ifoci = [
                np.nonzero([
                    stcdata[0:offsets[1], time_idx] >=
                    0.25 * np.max(stcdata[:, time_idx])
                ][0])
            ]
            vfoci = fwdmag['src'][0]['vertno'][ifoci[0][0]]
            cfoci = fwdmag['src'][0]['rr'][vfoci]
            print "Coords  of %d sel. vfoci: " % cfoci.shape[0]
            print cfoci
            print "vfoci: "
            print vfoci
            print "brain.geo['lh'].coords[vfoci] : "
            print brain.geo['lh'].coords[vfoci]
        elif len(fwdmag['src']) > 1:
            ifoci = [
                np.nonzero([
                    stcdata[offsets[1]:, time_idx] >=
                    0.25 * np.max(stcdata[:, time_idx])
                ][0])
            ]
            vfoci = fwdmag['src'][1]['vertno'][ifoci[0][0]]
            cfoci = fwdmag['src'][1]['rr'][vfoci]
            print "Coords  of %d sel. vfoci: " % cfoci.shape[0]
            print cfoci
            print "vfoci: "
            print vfoci
            print "brain.geo['rh'].coords[vfoci] : "
            print brain.geo['rh'].coords[vfoci]

        mrfoci = np.zeros(cfoci.shape)
        invmri_head_t = invert_transform(fwdmag['info']['mri_head_t'])
        mrfoci = apply_trans(invmri_head_t['trans'], cfoci, move=True)
        print "mrfoci: "
        print mrfoci

        # Just some blops:
        bloblist = np.zeros((300, 3))
        for i in xrange(100):
            bloblist[i, 0] = float(i)
            bloblist[i + 100, 1] = float(i)
            bloblist[i + 200, 2] = float(i)
        mrblobs = apply_trans(invmri_head_t['trans'], bloblist, move=True)
        brain.save_image('testfig_map_%s.png' % hemi)
        brain.close()
def _fake_stc(n_time=10):
    verts = [np.arange(10), np.arange(90)]
    return SourceEstimate(np.random.rand(100, n_time), verts, 0, 1e-1, 'foo')
Exemple #28
0
def test_limits_to_control_points():
    """Test functionality for determining control points."""
    sample_src = read_source_spaces(src_fname)
    kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.RandomState(0).rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    mlab = _import_mlab()
    stc.plot(**kwargs)
    stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
    stc.plot(colormap='hot', clim='auto', **kwargs)
    stc.plot(colormap='mne', clim='auto', **kwargs)
    figs = [mlab.figure(), mlab.figure()]
    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
    pytest.raises(ValueError, stc.plot, clim='auto', figure=figs, **kwargs)

    # Test for correct clim values
    with pytest.raises(ValueError, match='monotonically'):
        stc.plot(clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
    with pytest.raises(ValueError, match=r'.*must be \(3,\)'):
        stc.plot(colormap='mne', clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
    with pytest.raises(ValueError, match='must be "value" or "percent"'):
        stc.plot(clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
    with pytest.raises(ValueError, match='must be "auto" or dict'):
        stc.plot(colormap='mne', clim='foo', **kwargs)
    with pytest.raises(TypeError, match='must be an instance of'):
        plot_source_estimates('foo', clim='auto', **kwargs)
    with pytest.raises(ValueError, match='hemi'):
        stc.plot(hemi='foo', clim='auto', **kwargs)
    with pytest.raises(ValueError, match='Exactly one'):
        stc.plot(clim=dict(lims=[0, 1, 2], pos_lims=[0, 1, 2], kind='value'),
                 **kwargs)

    # Test handling of degenerate data: thresholded maps
    stc._data.fill(0.)
    with pytest.warns(RuntimeWarning, match='All data were zero'):
        plot_source_estimates(stc, **kwargs)
    mlab.close(all=True)
def test_transform():
    """Test applying linear (time) transform to data."""
    # make up some data
    n_verts_lh, n_verts_rh, n_times = 10, 10, 10
    vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
    data = rng.randn(n_verts_lh + n_verts_rh, n_times)
    stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)

    # data_t.ndim > 2 & copy is True
    stcs_t = stc.transform(_my_trans, copy=True)
    assert_true(isinstance(stcs_t, list))
    assert_array_equal(stc.times, stcs_t[0].times)
    assert_equal(stc.vertices, stcs_t[0].vertices)

    data = np.concatenate(
        (stcs_t[0].data[:, :, None], stcs_t[1].data[:, :, None]), axis=2)
    data_t = stc.transform_data(_my_trans)
    assert_array_equal(data, data_t)  # check against stc.transform_data()

    # data_t.ndim > 2 & copy is False
    assert_raises(ValueError, stc.transform, _my_trans, copy=False)

    # data_t.ndim = 2 & copy is True
    tmp = deepcopy(stc)
    stc_t = stc.transform(np.abs, copy=True)
    assert_true(isinstance(stc_t, SourceEstimate))
    assert_array_equal(stc.data, tmp.data)  # xfrm doesn't modify original?

    # data_t.ndim = 2 & copy is False
    times = np.round(1000 * stc.times)
    verts = np.arange(len(stc.lh_vertno),
                      len(stc.lh_vertno) + len(stc.rh_vertno), 1)
    verts_rh = stc.rh_vertno
    tmin_idx = np.searchsorted(times, 0)
    tmax_idx = np.searchsorted(times, 501)  # Include 500ms in the range
    data_t = stc.transform_data(np.abs,
                                idx=verts,
                                tmin_idx=tmin_idx,
                                tmax_idx=tmax_idx)
    stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
    assert_true(isinstance(stc, SourceEstimate))
    assert_equal(stc.tmin, 0.)
    assert_equal(stc.times[-1], 0.5)
    assert_equal(len(stc.vertices[0]), 0)
    assert_equal(stc.vertices[1], verts_rh)
    assert_array_equal(stc.data, data_t)

    times = np.round(1000 * stc.times)
    tmin_idx, tmax_idx = np.searchsorted(times, 0), np.searchsorted(times, 250)
    data_t = stc.transform_data(np.abs, tmin_idx=tmin_idx, tmax_idx=tmax_idx)
    stc.transform(np.abs, tmin=0, tmax=250, copy=False)
    assert_equal(stc.tmin, 0.)
    assert_equal(stc.times[-1], 0.2)
    assert_array_equal(stc.data, data_t)
Exemple #30
0
fname_raw = op.join(data_dir, 'raw_fif', '%s_funloc_raw.fif' % subj)
fname_erm = op.join(data_dir, 'raw_fif', '%s_erm_raw.fif' % subj)
trans = op.join(data_dir, 'trans', '%s-trans.fif' % subj)
bem = op.join(bem_dir, '%s-5120-5120-5120-bem-sol.fif' % subject)
src = read_source_spaces(op.join(bem_dir, '%s-oct-6-src.fif' % subject))
sfreq = read_info(fname_raw, verbose=False)['sfreq']

# ############################################################################
# construct appropriate brain activity

print('Constructing original (simulated) sources')
tmin, tmax = -0.2, 0.8
vertices = [s['vertno'] for s in src]
n_vertices = sum(s['nuse'] for s in src)
data = np.ones((n_vertices, int((tmax - tmin) * sfreq)))
stc = SourceEstimate(data, vertices, -0.2, 1. / sfreq, subject)

# limit activation to a square pulse in time at two vertices in space
labels = [read_labels_from_annot(subject, 'aparc.a2009s', hemi,
                                 regexp='G_temp_sup-G_T_transv')[0]
          for hi, hemi in enumerate(('lh', 'rh'))]
stc = stc.in_label(labels[0] + labels[1])
stc.data.fill(0)
stc.data[:, np.where(np.logical_and(stc.times >= pulse_tmin,
                                    stc.times <= pulse_tmax))[0]] = 10e-9

# ############################################################################
# Simulate data

# Simulate data with movement
with warnings.catch_warnings(record=True):
Exemple #31
0
def test_process_clim_plot(renderer_interactive):
    """Test functionality for determining control points with stc.plot."""
    sample_src = read_source_spaces(src_fname)
    kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.RandomState(0).rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    stc.plot(**kwargs)
    stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
    stc.plot(colormap='hot', clim='auto', **kwargs)
    stc.plot(colormap='mne', clim='auto', **kwargs)
    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
    pytest.raises(TypeError, stc.plot, clim='auto', figure=[0], **kwargs)

    # Test for correct clim values
    with pytest.raises(ValueError, match='monotonically'):
        stc.plot(clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
    with pytest.raises(ValueError, match=r'.*must be \(3,\)'):
        stc.plot(colormap='mne', clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
    with pytest.raises(ValueError, match="'value', 'values' and 'percent'"):
        stc.plot(clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
    with pytest.raises(ValueError, match='must be "auto" or dict'):
        stc.plot(colormap='mne', clim='foo', **kwargs)
    with pytest.raises(TypeError, match='must be an instance of'):
        plot_source_estimates('foo', clim='auto', **kwargs)
    with pytest.raises(ValueError, match='hemi'):
        stc.plot(hemi='foo', clim='auto', **kwargs)
    with pytest.raises(ValueError, match='Exactly one'):
        stc.plot(clim=dict(lims=[0, 1, 2], pos_lims=[0, 1, 2], kind='value'),
                 **kwargs)

    # Test handling of degenerate data: thresholded maps
    stc._data.fill(0.)
    with pytest.warns(RuntimeWarning, match='All data were zero'):
        plot_source_estimates(stc, **kwargs)
Exemple #32
0
               hemi='lh',
               map_surface=surf,
               color='red')
brain.add_foci(coords=simulated_stc.rh_vertno,
               coords_as_verts=True,
               hemi='rh',
               map_surface=surf,
               color='red')

# TODO: this outputs some vtk error, not sure why. It seems to work anyway

raw_input('press enter to continue')
"""Visualize the inverse modeled data."""
vertices = [fwd_fixed['src'][0]['vertno'], fwd_fixed['src'][1]['vertno']]

stc = SourceEstimate(np.abs(source_data), vertices, tmin=0.0,
                     tstep=0.001)  # weighted
stc_orig = apply_inverse(evoked, inv, 1 / 9., inversion_method)  # original

stc.plot(subject=subject,
         subjects_dir=subjects_dir,
         hemi='both',
         time_viewer=True,
         colormap='mne',
         alpha=0.5,
         transparent=True)
"""Compute the parcel time-series."""
parcel_series = apply_weighting_evoked(evoked, fwd_fixed, inv, labels,
                                       inversion_method)

raw_input('press enter to exit')
def resolution_metrics(resmat,
                       src,
                       function='psf',
                       metric='peak_err',
                       threshold=0.5,
                       verbose=None):
    """Compute spatial resolution metrics for linear solvers.

    Parameters
    ----------
    resmat : array, shape (n_orient * n_vertices, n_vertices)
        The resolution matrix.
        If not a square matrix and if the number of rows is a multiple of
        number of columns (e.g. free or loose orientations), then the Euclidean
        length per source location is computed (e.g. if inverse operator with
        free orientations was applied to forward solution with fixed
        orientations).
    src : instance of SourceSpaces
        Source space object from forward or inverse operator.
    function : 'psf' | 'ctf'
        Whether to compute metrics for columns (point-spread functions, PSFs)
        or rows (cross-talk functions, CTFs) of the resolution matrix.
    metric : str
        The resolution metric to compute. Allowed options are:

        Localization-based metrics:

        - ``'peak_err'`` Peak localization error (PLE), Euclidean distance
          between peak and true source location.
        - ``'cog_err'`` Centre-of-gravity localisation error (CoG), Euclidean
          distance between CoG and true source location.

        Spatial-extent-based metrics:

        - ``'sd_ext'`` spatial deviation (e.g. [1,2]_).
        - ``'maxrad_ext'`` maximum radius to 50%% of max amplitude.

        Amplitude-based metrics:

        - ``'peak_amp'`` Ratio between absolute maximum amplitudes of peaks per
            location and maximum peak across locations.
        - ``'sum_amp'`` Ratio between sums of absolute amplitudes.

    threshold : float
        Amplitude fraction threshold for spatial extent metric 'maxrad_ext'.
        Defaults to 0.5.
    %(verbose)s

    Returns
    -------
    resolution_metric : instance of SourceEstimate
        The resolution metric.

    Notes
    -----
    For details, see [1]_ [2]_.

    .. versionadded:: 0.20

    References
    ----------
    .. [1] Molins A, Stufflebeam S M, Brown E N, Hämäläinen M S (2008).
           Quantification of the benefit from integrating MEG and EEG data in
           minimum l2-norm estimation. Neuroimage, 42(3):1069-77.
    .. [2] Hauk O, Stenroos M, Treder M (2019). "Towards an Objective
           Evaluation of EEG/MEG Source Estimation Methods: The Linear Tool
           Kit", bioRxiv, doi: https://doi.org/10.1101/672956.
    """
    # Check if input options are valid
    metrics = ('peak_err', 'cog_err', 'sd_ext', 'maxrad_ext', 'peak_amp',
               'sum_amp')
    if metric not in metrics:
        raise ValueError('"%s" is not a recognized metric.' % metric)

    if function not in ['psf', 'ctf']:
        raise ValueError('Not a recognised resolution function: %s.' %
                         function)

    if metric in ('peak_err', 'cog_err'):
        resolution_metric = _localisation_error(resmat,
                                                src,
                                                function=function,
                                                metric=metric)

    elif metric in ('sd_ext', 'maxrad_ext'):
        resolution_metric = _spatial_extent(resmat,
                                            src,
                                            function=function,
                                            metric=metric,
                                            threshold=threshold)

    elif metric in ('peak_amp', 'sum_amp'):
        resolution_metric = _relative_amplitude(resmat,
                                                src,
                                                function=function,
                                                metric=metric)

    # get vertices from source space
    vertno_lh = src[0]['vertno']
    vertno_rh = src[1]['vertno']
    vertno = [vertno_lh, vertno_rh]

    # Convert array to source estimate
    resolution_metric = SourceEstimate(resolution_metric,
                                       vertno,
                                       tmin=0.,
                                       tstep=1.)

    return resolution_metric
Exemple #34
0
def test_limits_to_control_points():
    """Test functionality for determing control points."""
    sample_src = read_source_spaces(src_fname)
    kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.RandomState(0).rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    mlab = _import_mlab()
    stc.plot(**kwargs)
    stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
    stc.plot(colormap='hot', clim='auto', **kwargs)
    stc.plot(colormap='mne', clim='auto', **kwargs)
    figs = [mlab.figure(), mlab.figure()]
    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
    assert_raises(ValueError, stc.plot, clim='auto', figure=figs, **kwargs)

    # Test both types of incorrect limits key (lims/pos_lims)
    assert_raises(KeyError,
                  plot_source_estimates,
                  stc,
                  colormap='mne',
                  clim=dict(kind='value', lims=(5, 10, 15)),
                  **kwargs)
    assert_raises(KeyError,
                  plot_source_estimates,
                  stc,
                  colormap='hot',
                  clim=dict(kind='value', pos_lims=(5, 10, 15)),
                  **kwargs)

    # Test for correct clim values
    assert_raises(ValueError,
                  stc.plot,
                  clim=dict(kind='value', pos_lims=[0, 1, 0]),
                  **kwargs)
    assert_raises(ValueError,
                  stc.plot,
                  colormap='mne',
                  clim=dict(pos_lims=(5, 10, 15, 20)),
                  **kwargs)
    assert_raises(ValueError,
                  stc.plot,
                  clim=dict(pos_lims=(5, 10, 15), kind='foo'),
                  **kwargs)
    assert_raises(ValueError, stc.plot, colormap='mne', clim='foo', **kwargs)
    assert_raises(ValueError, stc.plot, clim=(5, 10, 15), **kwargs)
    assert_raises(ValueError,
                  plot_source_estimates,
                  'foo',
                  clim='auto',
                  **kwargs)
    assert_raises(ValueError, stc.plot, hemi='foo', clim='auto', **kwargs)

    # Test handling of degenerate data
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # thresholded maps
        stc._data.fill(0.)
        plot_source_estimates(stc, **kwargs)
        assert_equal(len(w), 1)
    mlab.close(all=True)
data_summary = np.zeros((n_vertices_fsave, len(good_cluster_inds) + 1))
tstep = stc.tstep
for ii, cluster_ind in enumerate(good_cluster_inds):
    data.fill(0)
    v_inds = clusters[cluster_ind][1]
    t_inds = clusters[cluster_ind][0]
    data[v_inds, t_inds] = T_obs[t_inds, v_inds]
    # Store a nice visualization of the cluster by summing across time (in ms)
    data = np.sign(data) * np.logical_not(data == 0) * tstep
    data_summary[:, ii + 1] = 1e3 * np.sum(data, axis=1)

#    Make the first "time point" a sum across all clusters for easy
#    visualization
data_summary[:, 0] = np.sum(data_summary, axis=1)
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = SourceEstimate(data_summary, fsave_vertices, tmin=0,
                                     tstep=1e-3, subject='fsaverage')

#    Let's actually plot the first "time point" in the SourceEstimate, which
#    shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A != condition B
brains = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'both',
                                  subjects_dir=subjects_dir,
                                  time_label='Duration significant (ms)',
                                  fmin=0, fmid=25, fmax=50)
for idx, brain in enumerate(brains):
    brain.set_data_time_index(0)
    brain.scale_data_colormap(fmin=0, fmid=25, fmax=50, transparent=True)
    brain.show_view('lateral')
    brain.save_image('clusters-%s.png' % ('lh' if idx == 0 else 'rh'))
def test_epochs_tmin_tmax(kind):
    """Test spectral.spectral_connectivity with epochs and arrays."""
    rng = np.random.RandomState(0)
    n_epochs, n_chs, n_times, sfreq, f = 10, 2, 2000, 1000., 20.
    data = rng.randn(n_epochs, n_chs, n_times)
    sig = np.sin(2 * np.pi * f * np.arange(1000) / sfreq) * np.hanning(1000)
    data[:, :, 500:1500] += sig
    info = create_info(n_chs, sfreq, 'eeg')
    if kind == 'epochs':
        tmin = -1
        X = EpochsArray(data, info, tmin=tmin)
    elif kind == 'stc':
        tmin = -1
        X = [SourceEstimate(d, [[0], [0]], tmin, 1. / sfreq) for d in data]
    elif kind == 'combo':
        tmin = -1
        X = [(d[[0]], SourceEstimate(d[[1]], [[0], []], tmin, 1. / sfreq))
             for d in data]
    else:
        assert kind == 'ndarray'
        tmin = 0
        X = data
    want_times = np.arange(n_times) / sfreq + tmin

    # Parameters for computing connectivity
    fmin, fmax = f - 2, f + 2
    kwargs = {
        'method': 'coh',
        'mode': 'multitaper',
        'sfreq': sfreq,
        'fmin': fmin,
        'fmax': fmax,
        'faverage': True,
        'mt_adaptive': False,
        'n_jobs': 1
    }

    # Check the entire interval
    conn = spectral_connectivity(X, **kwargs)
    assert 0.89 < conn[0][1, 0] < 0.91
    assert_allclose(conn[2], want_times)
    # Check a time interval before the sinusoid
    conn = spectral_connectivity(X, tmax=tmin + 0.5, **kwargs)
    assert 0 < conn[0][1, 0] < 0.15
    # Check a time during the sinusoid
    conn = spectral_connectivity(X, tmin=tmin + 0.5, tmax=tmin + 1.5, **kwargs)
    assert 0.93 < conn[0][1, 0] <= 0.94
    # Check a time interval after the sinusoid
    conn = spectral_connectivity(X, tmin=tmin + 1.5, tmax=tmin + 1.9, **kwargs)
    assert 0 < conn[0][1, 0] < 0.15

    # Check for warning if tmin, tmax is outside of the time limits of data
    with pytest.warns(RuntimeWarning, match='start time tmin'):
        spectral_connectivity(X, **kwargs, tmin=tmin - 0.1)

    with pytest.warns(RuntimeWarning, match='stop time tmax'):
        spectral_connectivity(X, **kwargs, tmax=tmin + 2.5)

    # make one with mismatched times
    if kind != 'combo':
        return
    X = [(SourceEstimate(d[[0]], [[0], []], tmin - 1, 1. / sfreq),
          SourceEstimate(d[[1]], [[0], []], tmin, 1. / sfreq)) for d in data]
    with pytest.warns(RuntimeWarning, match='time scales of input') as w:
        spectral_connectivity(X, **kwargs)

    # increased to 2 to catch the DeprecationWarning
    assert len(w) == 2  # just one even though there were multiple epochs
def test_localization_bias():
    """Test inverse localization bias for minimum-norm solvers."""
    # Identity input
    evoked = _get_evoked()
    evoked.pick_types(meg=True, eeg=True, exclude=())
    evoked = EvokedArray(np.eye(len(evoked.data)), evoked.info)
    noise_cov = read_cov(fname_cov)
    # restrict to limited set of verts (small src here) and one hemi for speed
    fwd_orig = read_forward_solution(fname_fwd)
    vertices = [fwd_orig['src'][0]['vertno'].copy(), []]
    stc = SourceEstimate(np.zeros((sum(len(v) for v in vertices), 1)),
                         vertices, 0., 1.)
    fwd_orig = restrict_forward_to_stc(fwd_orig, stc)

    #
    # Fixed orientation (not very different)
    #
    fwd = fwd_orig.copy()
    inv_fixed = make_inverse_operator(evoked.info,
                                      fwd,
                                      noise_cov,
                                      loose=0.,
                                      depth=0.8)
    fwd = convert_forward_solution(fwd, force_fixed=True, surf_ori=True)
    fwd = fwd['sol']['data']
    want = np.arange(fwd.shape[1])
    for method, lower, upper in (('MNE', 83, 87), ('dSPM', 96, 98),
                                 ('sLORETA', 100, 100), ('eLORETA', 100, 100)):
        inv_op = apply_inverse(evoked, inv_fixed, lambda2, method).data
        loc = np.abs(np.dot(inv_op, fwd))
        # Compute the percentage of sources for which there is no localization
        # bias:
        perc = (want == np.argmax(loc, axis=0)).mean() * 100
        assert lower <= perc <= upper, method

    #
    # Loose orientation
    #
    fwd = fwd_orig.copy()
    inv_free = make_inverse_operator(evoked.info,
                                     fwd,
                                     noise_cov,
                                     loose=0.2,
                                     depth=0.8)
    fwd = fwd['sol']['data']
    want = np.arange(fwd.shape[1]) // 3
    for method, lower, upper in (('MNE', 25, 35), ('dSPM', 25, 35),
                                 ('sLORETA', 35, 40), ('eLORETA', 40, 45)):
        inv_op = apply_inverse(evoked,
                               inv_free,
                               lambda2,
                               method,
                               pick_ori='vector').data
        loc = np.linalg.norm(np.einsum('vos,sx->vxo', inv_op, fwd), axis=-1)
        # Compute the percentage of sources for which there is no localization
        # bias:
        perc = (want == np.argmax(loc, axis=0)).mean() * 100
        assert lower <= perc <= upper, method

    #
    # Free orientation
    #
    fwd = fwd_orig.copy()
    inv_free = make_inverse_operator(evoked.info,
                                     fwd,
                                     noise_cov,
                                     loose=1.,
                                     depth=0.8)
    fwd = fwd['sol']['data']
    want = np.arange(fwd.shape[1]) // 3
    force_kwargs = dict(method_params=dict(force_equal=True))
    for method, lower, upper, kwargs in (
        ('MNE', 45, 55, {}),
        ('dSPM', 40, 45, {}),
        ('sLORETA', 90, 95, {}),
        ('eLORETA', 90, 95, force_kwargs),
        ('eLORETA', 100, 100, {}),
    ):
        inv_op = apply_inverse(evoked,
                               inv_free,
                               lambda2,
                               method,
                               pick_ori='vector',
                               **kwargs).data
        loc = np.linalg.norm(np.einsum('vos,sx->vxo', inv_op, fwd), axis=-1)
        # Compute the percentage of sources for which there is no localization
        # bias:
        perc = (want == np.argmax(loc, axis=0)).mean() * 100
        assert lower <= perc <= upper, method
def test_morphed_source_space_return():
    """Test returning a morphed source space to the original subject."""
    # let's create some random data on fsaverage
    data = rng.randn(20484, 1)
    tmin, tstep = 0, 1.
    src_fs = read_source_spaces(fname_fs)
    stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs], tmin, tstep,
                            'fsaverage')

    # Create our morph source space
    src_morph = morph_source_spaces(src_fs,
                                    'sample',
                                    subjects_dir=subjects_dir)

    # Morph the data over using standard methods
    stc_morph = stc_fs.morph('sample', [s['vertno'] for s in src_morph],
                             smooth=1,
                             subjects_dir=subjects_dir)

    # We can now pretend like this was real data we got e.g. from an inverse.
    # To be complete, let's remove some vertices
    keeps = [
        np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10])
        for v in stc_morph.vertices
    ]
    stc_morph = SourceEstimate(
        np.concatenate([
            stc_morph.lh_data[keeps[0]], stc_morph.rh_data[keeps[1]]
        ]), [v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep,
        'sample')

    # Return it to the original subject
    stc_morph_return = stc_morph.to_original_src(src_fs,
                                                 subjects_dir=subjects_dir)

    # Compare to the original data
    stc_morph_morph = stc_morph.morph('fsaverage',
                                      stc_morph_return.vertices,
                                      smooth=1,
                                      subjects_dir=subjects_dir)
    assert_equal(stc_morph_return.subject, stc_morph_morph.subject)
    for ii in range(2):
        assert_array_equal(stc_morph_return.vertices[ii],
                           stc_morph_morph.vertices[ii])
    # These will not match perfectly because morphing pushes data around
    corr = np.corrcoef(stc_morph_return.data[:, 0],
                       stc_morph_morph.data[:, 0])[0, 1]
    assert_true(corr > 0.99, corr)

    # Degenerate cases
    stc_morph.subject = None  # no .subject provided
    assert_raises(ValueError,
                  stc_morph.to_original_src,
                  src_fs,
                  subject_orig='fsaverage',
                  subjects_dir=subjects_dir)
    stc_morph.subject = 'sample'
    del src_fs[0]['subject_his_id']  # no name in src_fsaverage
    assert_raises(ValueError,
                  stc_morph.to_original_src,
                  src_fs,
                  subjects_dir=subjects_dir)
    src_fs[0]['subject_his_id'] = 'fsaverage'  # name mismatch
    assert_raises(ValueError,
                  stc_morph.to_original_src,
                  src_fs,
                  subject_orig='foo',
                  subjects_dir=subjects_dir)
    src_fs[0]['subject_his_id'] = 'sample'
    src = read_source_spaces(fname)  # wrong source space
    assert_raises(RuntimeError,
                  stc_morph.to_original_src,
                  src,
                  subjects_dir=subjects_dir)
Exemple #39
0
def test_limits_to_control_points():
    """Test functionality for determing control points
    """
    sample_src = read_source_spaces(src_fname)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.RandomState(0).rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    from mayavi import mlab
    stc.plot(subjects_dir=subjects_dir)
    stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
             subjects_dir=subjects_dir)
    stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
    stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
    figs = [mlab.figure(), mlab.figure()]
    assert_raises(RuntimeError, stc.plot, clim='auto', figure=figs,
                  subjects_dir=subjects_dir)

    # Test both types of incorrect limits key (lims/pos_lims)
    assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
                  clim=dict(kind='value', lims=(5, 10, 15)),
                  subjects_dir=subjects_dir)
    assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
                  clim=dict(kind='value', pos_lims=(5, 10, 15)),
                  subjects_dir=subjects_dir)

    # Test for correct clim values
    assert_raises(ValueError, stc.plot,
                  clim=dict(kind='value', pos_lims=[0, 1, 0]),
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, stc.plot, colormap='mne',
                  clim=dict(pos_lims=(5, 10, 15, 20)),
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, stc.plot,
                  clim=dict(pos_lims=(5, 10, 15), kind='foo'),
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, stc.plot, colormap='mne', clim='foo',
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, stc.plot, clim=(5, 10, 15),
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, stc.plot, hemi='foo', clim='auto',
                  subjects_dir=subjects_dir)

    # Test handling of degenerate data
    stc.plot(clim=dict(kind='value', lims=[0, 0, 1]),
             subjects_dir=subjects_dir)  # ok
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # thresholded maps
        stc._data.fill(1.)
        plot_source_estimates(stc, subjects_dir=subjects_dir)
        assert_equal(len(w), 0)
        stc._data[0].fill(0.)
        plot_source_estimates(stc, subjects_dir=subjects_dir)
        assert_equal(len(w), 0)
        stc._data.fill(0.)
        plot_source_estimates(stc, subjects_dir=subjects_dir)
        assert_equal(len(w), 1)
    mlab.close()
Exemple #40
0
def test_apply_forward():
    """Test projection of source space data to sensor space."""
    start = 0
    stop = 5
    n_times = stop - start - 1
    sfreq = 10.0
    t_start = 0.123

    fwd = read_forward_solution(fname_meeg)
    fwd = convert_forward_solution(fwd,
                                   surf_ori=True,
                                   force_fixed=True,
                                   use_cps=True)
    fwd = pick_types_forward(fwd, meg=True)
    assert isinstance(fwd, Forward)

    vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
    stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
    stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)

    gain_sum = np.sum(fwd['sol']['data'], axis=1)

    # Evoked
    evoked = read_evokeds(fname_evoked, condition=0)
    evoked.pick_types(meg=True)
    with pytest.warns(RuntimeWarning, match='only .* positive values'):
        evoked = apply_forward(fwd, stc, evoked.info, start=start, stop=stop)
    data = evoked.data
    times = evoked.times

    # do some tests
    assert_array_almost_equal(evoked.info['sfreq'], sfreq)
    assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
    assert_array_almost_equal(times[0], t_start)
    assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)

    # vector
    stc_vec = VectorSourceEstimate(
        fwd['source_nn'][:, :, np.newaxis] * stc.data[:, np.newaxis],
        stc.vertices, stc.tmin, stc.tstep)
    with pytest.warns(RuntimeWarning, match='very large'):
        evoked_2 = apply_forward(fwd, stc_vec, evoked.info)
    assert np.abs(evoked_2.data).mean() > 1e-5
    assert_allclose(evoked.data, evoked_2.data, atol=1e-10)

    # Raw
    with pytest.warns(RuntimeWarning, match='only .* positive values'):
        raw_proj = apply_forward_raw(fwd,
                                     stc,
                                     evoked.info,
                                     start=start,
                                     stop=stop)
    data, times = raw_proj[:, :]

    # do some tests
    assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)
    assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
    atol = 1. / sfreq
    assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol)
    assert_allclose(raw_proj.last_samp / sfreq,
                    t_start + (n_times - 1) / sfreq,
                    atol=atol)
#    cluster becomes a "time point" in the SourceEstimate
data = np.zeros((n_vertices_fsave, n_times))
data_summary = np.zeros((n_vertices_fsave, len(good_cluster_inds) + 1))
for ii, cluster_ind in enumerate(good_cluster_inds):
    data.fill(0)
    v_inds = clusters[cluster_ind][1]
    t_inds = clusters[cluster_ind][0]
    data[v_inds, t_inds] = T_obs[t_inds, v_inds]
    # Store a nice visualization of the cluster by summing across time (in ms)
    data = np.sign(data) * np.logical_not(data == 0) * tstep
    data_summary[:, ii + 1] = 1e3 * np.sum(data, axis=1)

#    Make the first "time point" a sum across all clusters for easy
#    visualization
data_summary[:, 0] = np.sum(data_summary, axis=1)
stc_all_cluster_vis = SourceEstimate(data_summary, fsave_vertices, tmin=0,
                                     tstep=1e-3)

#    Let's actually plot the first "time point" in the SourceEstimate, which
#    shows all the clusters, weighted by duration
colormap = mne_analyze_colormap(limits=[0, 10, 50])
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A < condition B, red for A > B
brain = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'rh', colormap,
                                 subjects_dir=subjects_dir,
                                 time_label='Duration significant (ms)')
brain.set_data_time_index(0)
# The colormap requires brain data to be scaled -fmax -> fmax
brain.scale_data_colormap(fmin=-50, fmid=0, fmax=50, transparent=False)
brain.show_view('lateral')
brain.save_image('clusters.png')
def test_transform():
    """Test applying linear (time) transform to data"""
    # make up some data
    n_sensors, n_verts_lh, n_verts_rh, n_times = 10, 10, 10, 10
    vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
    data = np.random.randn(n_verts_lh + n_verts_rh, n_times)
    stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)

    # data_t.ndim > 2 & copy is True
    stcs_t = stc.transform(_my_trans, copy=True)
    assert_true(isinstance(stcs_t, list))
    assert_array_equal(stc.times, stcs_t[0].times)
    assert_equal(stc.vertno, stcs_t[0].vertno)

    data = np.concatenate(
        (stcs_t[0].data[:, :, None], stcs_t[1].data[:, :, None]), axis=2)
    data_t = stc.transform_data(_my_trans)
    assert_array_equal(data, data_t)  # check against stc.transform_data()

    # data_t.ndim > 2 & copy is False
    assert_raises(ValueError, stc.transform, _my_trans, copy=False)

    # data_t.ndim = 2 & copy is True
    tmp = deepcopy(stc)
    stc_t = stc.transform(np.abs, copy=True)
    assert_true(isinstance(stc_t, SourceEstimate))
    assert_array_equal(stc.data, tmp.data)  # xfrm doesn't modify original?

    # data_t.ndim = 2 & copy is False
    times = np.round(1000 * stc.times)
    verts = np.arange(len(stc.lh_vertno),
                      len(stc.lh_vertno) + len(stc.rh_vertno), 1)
    verts_rh = stc.rh_vertno
    t_idx = [np.where(times >= -50)[0][0], np.where(times <= 500)[0][-1]]
    data_t = stc.transform_data(np.abs,
                                idx=verts,
                                tmin_idx=t_idx[0],
                                tmax_idx=t_idx[-1])
    stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
    assert_true(isinstance(stc, SourceEstimate))
    assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.5))
    assert_true(len(stc.vertno[0]) == 0)
    assert_equal(stc.vertno[1], verts_rh)
    assert_array_equal(stc.data, data_t)

    times = np.round(1000 * stc.times)
    t_idx = [np.where(times >= 0)[0][0], np.where(times <= 250)[0][-1]]
    data_t = stc.transform_data(np.abs, tmin_idx=t_idx[0], tmax_idx=t_idx[-1])
    stc.transform(np.abs, tmin=0, tmax=250, copy=False)
    assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.2))
    assert_array_equal(stc.data, data_t)
def test_morphed_source_space_return():
    """Test returning a morphed source space to the original subject."""
    # let's create some random data on fsaverage
    data = rng.randn(20484, 1)
    tmin, tstep = 0, 1.
    src_fs = read_source_spaces(fname_fs)
    stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs],
                            tmin, tstep, 'fsaverage')
    n_verts_fs = sum(len(s['vertno']) for s in src_fs)

    # Create our morph source space
    src_morph = morph_source_spaces(src_fs, 'sample',
                                    subjects_dir=subjects_dir)
    n_verts_sample = sum(len(s['vertno']) for s in src_morph)
    assert n_verts_fs == n_verts_sample

    # Morph the data over using standard methods
    stc_morph = compute_source_morph(
        src_fs, 'fsaverage', 'sample',
        spacing=[s['vertno'] for s in src_morph], smooth=1,
        subjects_dir=subjects_dir, warn=False).apply(stc_fs)
    assert stc_morph.data.shape[0] == n_verts_sample

    # We can now pretend like this was real data we got e.g. from an inverse.
    # To be complete, let's remove some vertices
    keeps = [np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10])
             for v in stc_morph.vertices]
    stc_morph = SourceEstimate(
        np.concatenate([stc_morph.lh_data[keeps[0]],
                        stc_morph.rh_data[keeps[1]]]),
        [v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep,
        'sample')

    # Return it to the original subject
    stc_morph_return = stc_morph.to_original_src(
        src_fs, subjects_dir=subjects_dir)

    # This should fail (has too many verts in SourceMorph)
    with pytest.warns(RuntimeWarning, match='vertices not included'):
        morph = compute_source_morph(
            src_morph, subject_from='sample',
            spacing=stc_morph_return.vertices, smooth=1,
            subjects_dir=subjects_dir)
    with pytest.raises(ValueError, match='vertices do not match'):
        morph.apply(stc_morph)

    # Compare to the original data
    with pytest.warns(RuntimeWarning, match='vertices not included'):
        stc_morph_morph = compute_source_morph(
            src=stc_morph, subject_from='sample',
            spacing=stc_morph_return.vertices, smooth=1,
            subjects_dir=subjects_dir).apply(stc_morph)

    assert_equal(stc_morph_return.subject, stc_morph_morph.subject)
    for ii in range(2):
        assert_array_equal(stc_morph_return.vertices[ii],
                           stc_morph_morph.vertices[ii])
    # These will not match perfectly because morphing pushes data around
    corr = np.corrcoef(stc_morph_return.data[:, 0],
                       stc_morph_morph.data[:, 0])[0, 1]
    assert corr > 0.99, corr

    # Explicitly test having two vertices map to the same target vertex. We
    # simulate this by having two vertices be at the same position.
    src_fs2 = src_fs.copy()
    vert1, vert2 = src_fs2[0]['vertno'][:2]
    src_fs2[0]['rr'][vert1] = src_fs2[0]['rr'][vert2]
    stc_morph_return = stc_morph.to_original_src(
        src_fs2, subjects_dir=subjects_dir)

    # test to_original_src method result equality
    for ii in range(2):
        assert_array_equal(stc_morph_return.vertices[ii],
                           stc_morph_morph.vertices[ii])

    # These will not match perfectly because morphing pushes data around
    corr = np.corrcoef(stc_morph_return.data[:, 0],
                       stc_morph_morph.data[:, 0])[0, 1]
    assert corr > 0.99, corr

    # Degenerate cases
    stc_morph.subject = None  # no .subject provided
    pytest.raises(ValueError, stc_morph.to_original_src,
                  src_fs, subject_orig='fsaverage', subjects_dir=subjects_dir)
    stc_morph.subject = 'sample'
    del src_fs[0]['subject_his_id']  # no name in src_fsaverage
    pytest.raises(ValueError, stc_morph.to_original_src,
                  src_fs, subjects_dir=subjects_dir)
    src_fs[0]['subject_his_id'] = 'fsaverage'  # name mismatch
    pytest.raises(ValueError, stc_morph.to_original_src,
                  src_fs, subject_orig='foo', subjects_dir=subjects_dir)
    src_fs[0]['subject_his_id'] = 'sample'
    src = read_source_spaces(fname)  # wrong source space
    pytest.raises(RuntimeError, stc_morph.to_original_src,
                  src, subjects_dir=subjects_dir)
Exemple #44
0
def plot_visualize_mft_sources(fwdmag, stcdata, tmin, tstep, subject,
                               subjects_dir):
    """
    Plot the MFT sources at time point of peak.
    Parameters
    ----------
    fwdmag:  forward solution
    stcdata: stc with ||cdv|| (point sequence as in fwdmag['source_rr'])
    tmin, tstep, subject: passed to mne.SourceEstimate()
    """
    print("##### Attempting to plot:")
    # cf. decoding/plot_decoding_spatio_temporal_source.py
    vertices = [s['vertno'] for s in fwdmag['src']]
    if len(vertices) == 1:
        vertices = [
            fwdmag['src'][0]['vertno']
            [fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] <= -0.],
            fwdmag['src'][0]['vertno'][
                fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] > -0.]
        ]
    elif len(vertices) > 2:
        warnings.warn(
            'plot_visualize_mft_sources(): Cannot handle more than two sources spaces'
        )
        return

    stc_feat = SourceEstimate(stcdata,
                              vertices=vertices,
                              tmin=tmin,
                              tstep=tstep,
                              subject=subject)
    itmaxsum = np.argmax(np.sum(stcdata, axis=0))
    twmin = tmin + tstep * float(itmaxsum - stcdata.shape[1] / 20)
    twmax = tmin + tstep * float(itmaxsum + stcdata.shape[1] / 20)
    for ihemi, hemi in enumerate(['lh', 'rh', 'both']):
        brain = stc_feat.plot(surface='white',
                              hemi=hemi,
                              subjects_dir=subjects_dir,
                              transparent=True,
                              clim='auto')
        # use peak getter to move visualization to the time point of the peak
        print("Restricting peak search to [%fs, %fs]" % (twmin, twmax))
        if hemi == 'both':
            brain.show_view('parietal')
            vertno_max, time_idx = stc_feat.get_peak(hemi=None,
                                                     time_as_index=True,
                                                     tmin=twmin,
                                                     tmax=twmax)
        else:
            brain.show_view('lateral')
            vertno_max, time_idx = stc_feat.get_peak(hemi=hemi,
                                                     time_as_index=True,
                                                     tmin=twmin,
                                                     tmax=twmax)
        print("hemi=%s: setting time_idx=%d" % (hemi, time_idx))
        brain.set_data_time_index(time_idx)
        if hemi == 'lh' or hemi == 'rh':
            # draw marker at maximum peaking vertex
            brain.add_foci(vertno_max,
                           coords_as_verts=True,
                           hemi=hemi,
                           color='blue',
                           scale_factor=0.6)

        if len(fwdmag['src']) > ihemi:
            fwds = fwdmag['src'][ihemi]
            comax = fwds['rr'][vertno_max]
            print("hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][%d]['rr'][vertno_max] = " % \
                  (hemi, vertno_max, time_idx, ihemi), comax)

            offsets = np.append([0], [s['nuse'] for s in fwdmag['src']])
            if hemi == 'lh':
                ifoci = [
                    np.nonzero([
                        stcdata[0:offsets[1], time_idx] >=
                        0.25 * np.max(stcdata[:, time_idx])
                    ][0])
                ]
            elif len(fwdmag['src']) > 1:
                ifoci = [
                    np.nonzero([
                        stcdata[offsets[1]:, time_idx] >=
                        0.25 * np.max(stcdata[:, time_idx])
                    ][0])
                ]
            vfoci = fwds['vertno'][ifoci[0][0]]
            cfoci = fwds['rr'][vfoci]
            print("Coords  of %d sel. vfoci: " % cfoci.shape[0])
            print(cfoci)
            print("vfoci: ")
            print(vfoci)
            print("brain.geo[%s].coords[vfoci] : " % hemi)
            print(brain.geo[hemi].coords[vfoci])

            mrfoci = np.zeros(cfoci.shape)
            invmri_head_t = invert_transform(fwdmag['info']['mri_head_t'])
            mrfoci = apply_trans(invmri_head_t['trans'], cfoci, move=True)
            print("mrfoci: ")
            print(mrfoci)

            # Just some blops along the coordinate axis:
            # This will not yield reasonable results w an inflated brain.
            # bloblist = np.zeros((300,3))
            # for i in xrange(100):
            #    bloblist[i,0] = float(i)
            #    bloblist[i+100,1] = float(i)
            #    bloblist[i+200,2] = float(i)
            # mrblobs = apply_trans(invmri_head_t['trans'], bloblist, move=True)
            # brain.add_foci(mrblobs, coords_as_verts=False, hemi=hemi, color='yellow', scale_factor=0.3)
        brain.save_image('testfig_map_%s.png' % hemi)
        brain.close()