Example #1
0
def test_stc_mpl():
    """Test plotting source estimates with matplotlib."""
    sample_src = read_source_spaces(src_fname)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.ones((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
    with pytest.warns(RuntimeWarning, match='not included'):
        stc.plot(subjects_dir=subjects_dir, time_unit='s', views='ven',
                 hemi='rh', smoothing_steps=2, subject='sample',
                 backend='matplotlib', spacing='oct1', initial_time=0.001,
                 colormap='Reds')
        fig = stc.plot(subjects_dir=subjects_dir, time_unit='ms', views='dor',
                       hemi='lh', smoothing_steps=2, subject='sample',
                       backend='matplotlib', spacing='ico2', time_viewer=True,
                       colormap='mne')
        time_viewer = fig.time_viewer
        _fake_click(time_viewer, time_viewer.axes[0], (0.5, 0.5))  # change t
        time_viewer.canvas.key_press_event('ctrl+right')
        time_viewer.canvas.key_press_event('left')
    pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
                  hemi='both', subject='sample', backend='matplotlib')
    pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
                  time_unit='ss', subject='sample', backend='matplotlib')
    plt.close('all')
Example #2
0
def test_stc_mpl():
    """Test plotting source estimates with matplotlib."""
    import matplotlib.pyplot as plt
    sample_src = read_source_spaces(src_fname)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.ones((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
    with warnings.catch_warnings(record=True):  # vertices not included
        stc.plot(subjects_dir=subjects_dir, time_unit='s', views='ven',
                 hemi='rh', smoothing_steps=2, subject='sample',
                 backend='matplotlib', spacing='oct1', initial_time=0.001,
                 colormap='Reds')
        fig = stc.plot(subjects_dir=subjects_dir, time_unit='ms', views='dor',
                       hemi='lh', smoothing_steps=2, subject='sample',
                       backend='matplotlib', spacing='ico2', time_viewer=True)
        time_viewer = fig.time_viewer
        _fake_click(time_viewer, time_viewer.axes[0], (0.5, 0.5))  # change t
        time_viewer.canvas.key_press_event('ctrl+right')
        time_viewer.canvas.key_press_event('left')
    assert_raises(ValueError, stc.plot, subjects_dir=subjects_dir,
                  hemi='both', subject='sample', backend='matplotlib')
    assert_raises(ValueError, stc.plot, subjects_dir=subjects_dir,
                  time_unit='ss', subject='sample', backend='matplotlib')
    plt.close('all')
Example #3
0
def test_single_hemi(hemi, renderer_interactive_pyvistaqt, brain_gc):
    """Test single hemi support."""
    stc = read_source_estimate(fname_stc)
    idx, order = (0, 1) if hemi == 'lh' else (1, -1)
    stc = SourceEstimate(getattr(stc, f'{hemi}_data'),
                         [stc.vertices[idx], []][::order], 0, 1, 'sample')
    brain = stc.plot(subjects_dir=subjects_dir, hemi='both', size=300)
    brain.close()

    # test skipping when len(vertices) == 0
    stc.vertices[1 - idx] = np.array([])
    brain = stc.plot(subjects_dir=subjects_dir, hemi=hemi, size=300)
    brain.close()
Example #4
0
def test_limits_to_control_points():
    """Test functionality for determing control points
    """
    sample_src = read_source_spaces(src_fname)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    from mayavi import mlab
    mlab.close()
    stc.plot(clim='auto', subjects_dir=subjects_dir)
    stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
             subjects_dir=subjects_dir)
    with warnings.catch_warnings(record=True):  # dep
        stc.plot(fmin=1, subjects_dir=subjects_dir)
    stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
    stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
    figs = [mlab.figure(), mlab.figure()]
    assert_raises(RuntimeError, stc.plot, clim='auto', figure=figs)

    # Test both types of incorrect limits key (lims/pos_lims)
    assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
                  clim=dict(kind='value', lims=(5, 10, 15)))
    assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
                  clim=dict(kind='value', pos_lims=(5, 10, 15)))

    # Test for correct clim values
    colormap = 'mne'
    assert_raises(ValueError, stc.plot, colormap=colormap,
                  clim=dict(pos_lims=(5, 10, 15, 20)))
    assert_raises(ValueError, stc.plot, colormap=colormap,
                  clim=dict(pos_lims=(5, 10, 15), kind='foo'))
    assert_raises(ValueError, stc.plot, colormap=colormap,
                  clim=dict(kind='value', pos_lims=(5, 10, 15)), fmin=1)
    assert_raises(ValueError, stc.plot, colormap=colormap, clim='foo')
    assert_raises(ValueError, stc.plot, colormap=colormap, clim=(5, 10, 15))
    assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto')
    assert_raises(ValueError, stc.plot, hemi='foo', clim='auto')

    # Test that stc.data contains enough unique values to use percentages
    clim = 'auto'
    stc._data = np.zeros_like(stc.data)
    assert_raises(ValueError, plot_source_estimates, stc,
                  colormap=colormap, clim=clim)
    mlab.close()
Example #5
0
def tiny(tmpdir):
    """Create a tiny fake brain."""
    # This is a minimal version of what we need for our viz-with-timeviewer
    # support currently
    subject = 'test'
    subject_dir = tmpdir.mkdir(subject)
    surf_dir = subject_dir.mkdir('surf')
    rng = np.random.RandomState(0)
    rr = rng.randn(4, 3)
    tris = np.array([[0, 1, 2], [2, 1, 3]])
    curv = rng.randn(len(rr))
    with open(surf_dir.join('lh.curv'), 'wb') as fid:
        fid.write(np.array([255, 255, 255], dtype=np.uint8))
        fid.write(np.array([len(rr), 0, 1], dtype='>i4'))
        fid.write(curv.astype('>f4'))
    write_surface(surf_dir.join('lh.white'), rr, tris)
    write_surface(surf_dir.join('rh.white'), rr, tris)  # needed for vertex tc
    vertices = [np.arange(len(rr)), []]
    data = rng.randn(len(rr), 10)
    stc = SourceEstimate(data, vertices, 0, 1, subject)
    brain = stc.plot(subjects_dir=tmpdir,
                     hemi='lh',
                     surface='white',
                     size=_TINY_SIZE)
    # in principle this should be sufficient:
    #
    # ratio = brain.mpl_canvas.canvas.window().devicePixelRatio()
    #
    # but in practice VTK can mess up sizes, so let's just calculate it.
    sz = brain.plotter.size()
    sz = (sz.width(), sz.height())
    sz_ren = brain.plotter.renderer.GetSize()
    ratio = np.median(np.array(sz_ren) / np.array(sz))
    return brain, ratio
Example #6
0
def test_limits_to_control_points():
    """Test functionality for determing control points."""
    sample_src = read_source_spaces(src_fname)
    kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.RandomState(0).rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    mlab = _import_mlab()
    stc.plot(**kwargs)
    stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
    stc.plot(colormap='hot', clim='auto', **kwargs)
    stc.plot(colormap='mne', clim='auto', **kwargs)
    figs = [mlab.figure(), mlab.figure()]
    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
    assert_raises(ValueError, stc.plot, clim='auto', figure=figs, **kwargs)

    # Test both types of incorrect limits key (lims/pos_lims)
    assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
                  clim=dict(kind='value', lims=(5, 10, 15)), **kwargs)
    assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
                  clim=dict(kind='value', pos_lims=(5, 10, 15)), **kwargs)

    # Test for correct clim values
    assert_raises(ValueError, stc.plot,
                  clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
    assert_raises(ValueError, stc.plot, colormap='mne',
                  clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
    assert_raises(ValueError, stc.plot,
                  clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
    assert_raises(ValueError, stc.plot, colormap='mne', clim='foo', **kwargs)
    assert_raises(ValueError, stc.plot, clim=(5, 10, 15), **kwargs)
    assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
                  **kwargs)
    assert_raises(ValueError, stc.plot, hemi='foo', clim='auto', **kwargs)

    # Test handling of degenerate data
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # thresholded maps
        stc._data.fill(0.)
        plot_source_estimates(stc, **kwargs)
        assert_equal(len(w), 1)
    mlab.close(all=True)
Example #7
0
def test_limits_to_control_points():
    """Test functionality for determing control points."""
    sample_src = read_source_spaces(src_fname)
    kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.RandomState(0).rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    mlab = _import_mlab()
    stc.plot(**kwargs)
    stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
    stc.plot(colormap='hot', clim='auto', **kwargs)
    stc.plot(colormap='mne', clim='auto', **kwargs)
    figs = [mlab.figure(), mlab.figure()]
    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
    assert_raises(ValueError, stc.plot, clim='auto', figure=figs, **kwargs)

    # Test both types of incorrect limits key (lims/pos_lims)
    assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
                  clim=dict(kind='value', lims=(5, 10, 15)), **kwargs)
    assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
                  clim=dict(kind='value', pos_lims=(5, 10, 15)), **kwargs)

    # Test for correct clim values
    assert_raises(ValueError, stc.plot,
                  clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
    assert_raises(ValueError, stc.plot, colormap='mne',
                  clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
    assert_raises(ValueError, stc.plot,
                  clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
    assert_raises(ValueError, stc.plot, colormap='mne', clim='foo', **kwargs)
    assert_raises(ValueError, stc.plot, clim=(5, 10, 15), **kwargs)
    assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
                  **kwargs)
    assert_raises(ValueError, stc.plot, hemi='foo', clim='auto', **kwargs)

    # Test handling of degenerate data
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # thresholded maps
        stc._data.fill(0.)
        plot_source_estimates(stc, **kwargs)
        assert any('All data were zero' in str(ww.message) for ww in w)
    mlab.close(all=True)
Example #8
0
def test_limits_to_control_points():
    """Test functionality for determining control points."""
    sample_src = read_source_spaces(src_fname)
    kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.RandomState(0).rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    mlab = _import_mlab()
    stc.plot(**kwargs)
    stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
    stc.plot(colormap='hot', clim='auto', **kwargs)
    stc.plot(colormap='mne', clim='auto', **kwargs)
    figs = [mlab.figure(), mlab.figure()]
    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
    pytest.raises(ValueError, stc.plot, clim='auto', figure=figs, **kwargs)

    # Test for correct clim values
    with pytest.raises(ValueError, match='monotonically'):
        stc.plot(clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
    with pytest.raises(ValueError, match=r'.*must be \(3,\)'):
        stc.plot(colormap='mne', clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
    with pytest.raises(ValueError, match="'value', 'values' and 'percent'"):
        stc.plot(clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
    with pytest.raises(ValueError, match='must be "auto" or dict'):
        stc.plot(colormap='mne', clim='foo', **kwargs)
    with pytest.raises(TypeError, match='must be an instance of'):
        plot_source_estimates('foo', clim='auto', **kwargs)
    with pytest.raises(ValueError, match='hemi'):
        stc.plot(hemi='foo', clim='auto', **kwargs)
    with pytest.raises(ValueError, match='Exactly one'):
        stc.plot(clim=dict(lims=[0, 1, 2], pos_lims=[0, 1, 2], kind='value'),
                 **kwargs)

    # Test handling of degenerate data: thresholded maps
    stc._data.fill(0.)
    with pytest.warns(RuntimeWarning, match='All data were zero'):
        plot_source_estimates(stc, **kwargs)
    mlab.close(all=True)
tstep = stc.tstep
for ii, cluster_ind in enumerate(good_cluster_inds):
    data.fill(0)
    v_inds = clusters[cluster_ind][1]
    t_inds = clusters[cluster_ind][0]
    data[v_inds, t_inds] = T_obs[t_inds, v_inds]
    # Store a nice visualization of the cluster by summing across time (in ms)
    data = np.sign(data) * np.logical_not(data == 0) * tstep
    data_summary[:, ii + 1] = 1e3 * np.sum(data, axis=1)

#    Make the first "time point" a sum across all clusters for easy
#    visualization
data_summary[:, 0] = np.sum(data_summary, axis=1)
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = SourceEstimate(data_summary, fsave_vertices, tmin=0,
                                     tstep=1e-3, subject='fsaverage')

#    Let's actually plot the first "time point" in the SourceEstimate, which
#    shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A != condition B
brains = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'both',
                                  subjects_dir=subjects_dir,
                                  time_label='Duration significant (ms)',
                                  fmin=0, fmid=25, fmax=50)
for idx, brain in enumerate(brains):
    brain.set_data_time_index(0)
    brain.scale_data_colormap(fmin=0, fmid=25, fmax=50, transparent=True)
    brain.show_view('lateral')
    brain.save_image('clusters-%s.png' % ('lh' if idx == 0 else 'rh'))
Example #10
0
               map_surface=surf,
               color='red')
brain.add_foci(coords=simulated_stc.rh_vertno,
               coords_as_verts=True,
               hemi='rh',
               map_surface=surf,
               color='red')

# TODO: this outputs some vtk error, not sure why. It seems to work anyway

raw_input('press enter to continue')
"""Visualize the inverse modeled data."""
vertices = [fwd_fixed['src'][0]['vertno'], fwd_fixed['src'][1]['vertno']]

stc = SourceEstimate(np.abs(source_data), vertices, tmin=0.0,
                     tstep=0.001)  # weighted
stc_orig = apply_inverse(evoked, inv, 1 / 9., inversion_method)  # original

stc.plot(subject=subject,
         subjects_dir=subjects_dir,
         hemi='both',
         time_viewer=True,
         colormap='mne',
         alpha=0.5,
         transparent=True)
"""Compute the parcel time-series."""
parcel_series = apply_weighting_evoked(evoked, fwd_fixed, inv, labels,
                                       inversion_method)

raw_input('press enter to exit')
Example #11
0
n_sources = np.shape(source_data)[0]

print('Source level visualization. Close visualization windows to continue.')
"""Visualize the inverse modeled data."""
vertices = [fwd_fixed['src'][0]['vertno'], fwd_fixed['src'][1]['vertno']]

stc = SourceEstimate(np.abs(source_data), vertices, tmin=0.0,
                     tstep=0.001)  # weighted
stc_orig = apply_inverse(evoked, inv, 1 / 9., method)  # original

stc.plot(subject=subject,
         subjects_dir=subjects_dir,
         hemi='both',
         time_viewer=True,
         colormap='mne',
         alpha=0.5,
         transparent=True,
         views=['fro'],
         initial_time=0.150)

### testing code for parcellation visualization.
# Assumes that there are 150 time samples. Take sample at 150 ms. Sort data to lh, then rh. Now interleaved. Drop medial wall ('unknown-lh' and 'unknown-rh').
p_data_sorted = np.ravel(parcel_series[:, 150])
p_data_sorted = np.concatenate((p_data_sorted[0:-2:2], p_data_sorted[1:-2:2]))

# Parcel data visualization function.
import nibabel as nib


def plot_4_view(data1,
Example #12
0
def test_limits_to_control_points():
    """Test functionality for determing control points
    """
    sample_src = read_source_spaces(src_fname)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.RandomState(0).rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    from mayavi import mlab
    stc.plot(subjects_dir=subjects_dir)
    stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
             subjects_dir=subjects_dir)
    stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
    stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
    figs = [mlab.figure(), mlab.figure()]
    assert_raises(RuntimeError, stc.plot, clim='auto', figure=figs,
                  subjects_dir=subjects_dir)

    # Test both types of incorrect limits key (lims/pos_lims)
    assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
                  clim=dict(kind='value', lims=(5, 10, 15)),
                  subjects_dir=subjects_dir)
    assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
                  clim=dict(kind='value', pos_lims=(5, 10, 15)),
                  subjects_dir=subjects_dir)

    # Test for correct clim values
    assert_raises(ValueError, stc.plot,
                  clim=dict(kind='value', pos_lims=[0, 1, 0]),
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, stc.plot, colormap='mne',
                  clim=dict(pos_lims=(5, 10, 15, 20)),
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, stc.plot,
                  clim=dict(pos_lims=(5, 10, 15), kind='foo'),
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, stc.plot, colormap='mne', clim='foo',
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, stc.plot, clim=(5, 10, 15),
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, stc.plot, hemi='foo', clim='auto',
                  subjects_dir=subjects_dir)

    # Test handling of degenerate data
    stc.plot(clim=dict(kind='value', lims=[0, 0, 1]),
             subjects_dir=subjects_dir)  # ok
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # thresholded maps
        stc._data.fill(1.)
        plot_source_estimates(stc, subjects_dir=subjects_dir)
        assert_equal(len(w), 0)
        stc._data[0].fill(0.)
        plot_source_estimates(stc, subjects_dir=subjects_dir)
        assert_equal(len(w), 0)
        stc._data.fill(0.)
        plot_source_estimates(stc, subjects_dir=subjects_dir)
        assert_equal(len(w), 1)
    mlab.close()
data = np.zeros((n_vertices_fsave, n_times))
data_summary = np.zeros((n_vertices_fsave, len(good_cluster_inds) + 1))
for ii, cluster_ind in enumerate(good_cluster_inds):
    data.fill(0)
    v_inds = clusters[cluster_ind][1]
    t_inds = clusters[cluster_ind][0]
    data[v_inds, t_inds] = T_obs[t_inds, v_inds]
    # Store a nice visualization of the cluster by summing across time (in ms)
    data = np.sign(data) * np.logical_not(data == 0) * tstep
    data_summary[:, ii + 1] = 1e3 * np.sum(data, axis=1)

#    Make the first "time point" a sum across all clusters for easy
#    visualization
data_summary[:, 0] = np.sum(data_summary, axis=1)
stc_all_cluster_vis = SourceEstimate(data_summary, fsave_vertices, tmin=0,
                                     tstep=1e-3)

#    Let's actually plot the first "time point" in the SourceEstimate, which
#    shows all the clusters, weighted by duration
colormap = mne_analyze_colormap(limits=[0, 10, 50])
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A < condition B, red for A > B
brain = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'rh', colormap,
                                 subjects_dir=subjects_dir,
                                 time_label='Duration significant (ms)')
brain.set_data_time_index(0)
# The colormap requires brain data to be scaled -fmax -> fmax
brain.scale_data_colormap(fmin=-50, fmid=0, fmax=50, transparent=False)
brain.show_view('lateral')
brain.save_image('clusters.png')
Example #14
0
def plot_visualize_mft_sources(fwdmag, stcdata, tmin, tstep,
                               subject, subjects_dir):
    '''
    Plot the MFT sources at time point of peak.
    '''
    print "##### Attempting to plot:"
    # cf. decoding/plot_decoding_spatio_temporal_source.py
    vertices = [s['vertno'] for s in fwdmag['src']]
    if len(vertices) == 1:
        vertices = [fwdmag['src'][0]['vertno'][fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] <= -0.],
                    fwdmag['src'][0]['vertno'][fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] > -0.]]

    stc_feat = SourceEstimate(stcdata, vertices=vertices,
                              tmin=-0.2, tstep=tstep, subject=subject)
    for hemi in ['lh', 'rh']:
        brain = stc_feat.plot(surface='white', hemi=hemi, subjects_dir=subjects_dir,
                              transparent=True, clim='auto')
        brain.show_view('lateral')
        # use peak getter to move visualization to the time point of the peak
        tmin = 0.095
        tmax = 0.10
        print "Restricting peak search to [%fs, %fs]" % (tmin, tmax)
        if hemi == 'both':
            vertno_max, time_idx = stc_feat.get_peak(hemi='rh', time_as_index=True,
                                                     tmin=tmin, tmax=tmax)
        else:
            vertno_max, time_idx = stc_feat.get_peak(hemi=hemi, time_as_index=True,
                                                     tmin=tmin, tmax=tmax)
        if hemi == 'lh':
            comax = fwdmag['src'][0]['rr'][vertno_max]
            print "hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][0]['rr'][vertno_max] = " %\
                  (hemi, vertno_max, time_idx), comax
        elif len(fwdmag['src']) > 1:
            comax = fwdmag['src'][1]['rr'][vertno_max]
            print "hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][1]['rr'][vertno_max] = " %\
                  (hemi, vertno_max, time_idx), comax

        print "hemi=%s: setting time_idx=%d" % (hemi, time_idx)
        brain.set_data_time_index(time_idx)
        # draw marker at maximum peaking vertex
        brain.add_foci(vertno_max, coords_as_verts=True, hemi=hemi, color='blue',
                       scale_factor=0.6)
        offsets = np.append([0], [s['nuse'] for s in fwdmag['src']])
        if hemi == 'lh':
            ifoci = [np.nonzero([stcdata[0:offsets[1],time_idx]>=0.25*np.max(stcdata[:,time_idx])][0])]
            vfoci = fwdmag['src'][0]['vertno'][ifoci[0][0]]
            cfoci = fwdmag['src'][0]['rr'][vfoci]
            print "Coords  of %d sel. vfoci: " % cfoci.shape[0]
            print cfoci
            print "vfoci: "
            print vfoci
            print "brain.geo['lh'].coords[vfoci] : "
            print brain.geo['lh'].coords[vfoci]
        elif len(fwdmag['src']) > 1:
            ifoci = [np.nonzero([stcdata[offsets[1]:,time_idx]>=0.25*np.max(stcdata[:,time_idx])][0])]
            vfoci = fwdmag['src'][1]['vertno'][ifoci[0][0]]
            cfoci = fwdmag['src'][1]['rr'][vfoci]
            print "Coords  of %d sel. vfoci: " % cfoci.shape[0]
            print cfoci
            print "vfoci: "
            print vfoci
            print "brain.geo['rh'].coords[vfoci] : "
            print brain.geo['rh'].coords[vfoci]

        mrfoci = np.zeros(cfoci.shape)
        invmri_head_t = invert_transform(fwdmag['info']['mri_head_t'])
        mrfoci = apply_trans(invmri_head_t['trans'],cfoci, move=True)
        print "mrfoci: "
        print mrfoci

        # Just some blops:
        bloblist = np.zeros((300,3))
        for i in xrange(100):
            bloblist[i,0] = float(i)
            bloblist[i+100,1] = float(i)
            bloblist[i+200,2] = float(i)
        mrblobs = apply_trans(invmri_head_t['trans'], bloblist, move=True)
        brain.save_image('testfig_map_%s.png' % hemi)
        brain.close()
Example #15
0
def plot_visualize_mft_sources(fwdmag, stcdata, tmin, tstep, subject,
                               subjects_dir):
    '''
    Plot the MFT sources at time point of peak.
    '''
    print "##### Attempting to plot:"
    # cf. decoding/plot_decoding_spatio_temporal_source.py
    vertices = [s['vertno'] for s in fwdmag['src']]
    if len(vertices) == 1:
        vertices = [
            fwdmag['src'][0]['vertno']
            [fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] <= -0.],
            fwdmag['src'][0]['vertno'][
                fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] > -0.]
        ]

    stc_feat = SourceEstimate(stcdata,
                              vertices=vertices,
                              tmin=-0.2,
                              tstep=tstep,
                              subject=subject)
    for hemi in ['lh', 'rh']:
        brain = stc_feat.plot(surface='white',
                              hemi=hemi,
                              subjects_dir=subjects_dir,
                              transparent=True,
                              clim='auto')
        brain.show_view('lateral')
        # use peak getter to move visualization to the time point of the peak
        tmin = 0.095
        tmax = 0.10
        print "Restricting peak search to [%fs, %fs]" % (tmin, tmax)
        if hemi == 'both':
            vertno_max, time_idx = stc_feat.get_peak(hemi='rh',
                                                     time_as_index=True,
                                                     tmin=tmin,
                                                     tmax=tmax)
        else:
            vertno_max, time_idx = stc_feat.get_peak(hemi=hemi,
                                                     time_as_index=True,
                                                     tmin=tmin,
                                                     tmax=tmax)
        if hemi == 'lh':
            comax = fwdmag['src'][0]['rr'][vertno_max]
            print "hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][0]['rr'][vertno_max] = " %\
                  (hemi, vertno_max, time_idx), comax
        elif len(fwdmag['src']) > 1:
            comax = fwdmag['src'][1]['rr'][vertno_max]
            print "hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][1]['rr'][vertno_max] = " %\
                  (hemi, vertno_max, time_idx), comax

        print "hemi=%s: setting time_idx=%d" % (hemi, time_idx)
        brain.set_data_time_index(time_idx)
        # draw marker at maximum peaking vertex
        brain.add_foci(vertno_max,
                       coords_as_verts=True,
                       hemi=hemi,
                       color='blue',
                       scale_factor=0.6)
        offsets = np.append([0], [s['nuse'] for s in fwdmag['src']])
        if hemi == 'lh':
            ifoci = [
                np.nonzero([
                    stcdata[0:offsets[1], time_idx] >=
                    0.25 * np.max(stcdata[:, time_idx])
                ][0])
            ]
            vfoci = fwdmag['src'][0]['vertno'][ifoci[0][0]]
            cfoci = fwdmag['src'][0]['rr'][vfoci]
            print "Coords  of %d sel. vfoci: " % cfoci.shape[0]
            print cfoci
            print "vfoci: "
            print vfoci
            print "brain.geo['lh'].coords[vfoci] : "
            print brain.geo['lh'].coords[vfoci]
        elif len(fwdmag['src']) > 1:
            ifoci = [
                np.nonzero([
                    stcdata[offsets[1]:, time_idx] >=
                    0.25 * np.max(stcdata[:, time_idx])
                ][0])
            ]
            vfoci = fwdmag['src'][1]['vertno'][ifoci[0][0]]
            cfoci = fwdmag['src'][1]['rr'][vfoci]
            print "Coords  of %d sel. vfoci: " % cfoci.shape[0]
            print cfoci
            print "vfoci: "
            print vfoci
            print "brain.geo['rh'].coords[vfoci] : "
            print brain.geo['rh'].coords[vfoci]

        mrfoci = np.zeros(cfoci.shape)
        invmri_head_t = invert_transform(fwdmag['info']['mri_head_t'])
        mrfoci = apply_trans(invmri_head_t['trans'], cfoci, move=True)
        print "mrfoci: "
        print mrfoci

        # Just some blops:
        bloblist = np.zeros((300, 3))
        for i in xrange(100):
            bloblist[i, 0] = float(i)
            bloblist[i + 100, 1] = float(i)
            bloblist[i + 200, 2] = float(i)
        mrblobs = apply_trans(invmri_head_t['trans'], bloblist, move=True)
        brain.save_image('testfig_map_%s.png' % hemi)
        brain.close()
Example #16
0
def test_limits_to_control_points():
    """Test functionality for determining control points."""
    sample_src = read_source_spaces(src_fname)
    kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.RandomState(0).rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    mlab = _import_mlab()
    stc.plot(**kwargs)
    stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
    stc.plot(colormap='hot', clim='auto', **kwargs)
    stc.plot(colormap='mne', clim='auto', **kwargs)
    figs = [mlab.figure(), mlab.figure()]
    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
    pytest.raises(ValueError, stc.plot, clim='auto', figure=figs, **kwargs)

    # Test for correct clim values
    with pytest.raises(ValueError, match='monotonically'):
        stc.plot(clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
    with pytest.raises(ValueError, match=r'.*must be \(3,\)'):
        stc.plot(colormap='mne', clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
    with pytest.raises(ValueError, match='must be "value" or "percent"'):
        stc.plot(clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
    with pytest.raises(ValueError, match='must be "auto" or dict'):
        stc.plot(colormap='mne', clim='foo', **kwargs)
    with pytest.raises(TypeError, match='must be an instance of'):
        plot_source_estimates('foo', clim='auto', **kwargs)
    with pytest.raises(ValueError, match='hemi'):
        stc.plot(hemi='foo', clim='auto', **kwargs)
    with pytest.raises(ValueError, match='Exactly one'):
        stc.plot(clim=dict(lims=[0, 1, 2], pos_lims=[0, 1, 2], kind='value'),
                 **kwargs)

    # Test handling of degenerate data: thresholded maps
    stc._data.fill(0.)
    with pytest.warns(RuntimeWarning, match='All data were zero'):
        plot_source_estimates(stc, **kwargs)
    mlab.close(all=True)
Example #17
0
def test_process_clim_plot(renderer_interactive, brain_gc):
    """Test functionality for determining control points with stc.plot."""
    sample_src = read_source_spaces(src_fname)
    kwargs = dict(subjects_dir=subjects_dir,
                  smoothing_steps=1,
                  time_viewer=False,
                  show_traces=False)

    vertices = [s['vertno'] for s in sample_src]
    n_time = 5
    n_verts = sum(len(v) for v in vertices)
    stc_data = np.random.RandomState(0).rand((n_verts * n_time))
    stc_data.shape = (n_verts, n_time)
    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')

    # Test for simple use cases
    brain = stc.plot(**kwargs)
    assert brain.data['center'] is None
    brain.close()
    brain = stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
    assert brain.data['center'] == 0.
    brain.close()
    brain = stc.plot(colormap='hot', clim='auto', **kwargs)
    brain.close()
    brain = stc.plot(colormap='mne', clim='auto', **kwargs)
    brain.close()
    brain = stc.plot(clim=dict(kind='value', lims=(10, 50, 90)),
                     figure=99,
                     **kwargs)
    brain.close()
    with pytest.raises(TypeError, match='must be a'):
        stc.plot(clim='auto', figure=[0], **kwargs)

    # Test for correct clim values
    with pytest.raises(ValueError, match='monotonically'):
        stc.plot(clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
    with pytest.raises(ValueError, match=r'.*must be \(3,\)'):
        stc.plot(colormap='mne', clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
    with pytest.raises(ValueError, match="'value', 'values', and 'percent'"):
        stc.plot(clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
    with pytest.raises(ValueError, match='must be "auto" or dict'):
        stc.plot(colormap='mne', clim='foo', **kwargs)
    with pytest.raises(TypeError, match='must be an instance of'):
        plot_source_estimates('foo', clim='auto', **kwargs)
    with pytest.raises(ValueError, match='hemi'):
        stc.plot(hemi='foo', clim='auto', **kwargs)
    with pytest.raises(ValueError, match='Exactly one'):
        stc.plot(clim=dict(lims=[0, 1, 2], pos_lims=[0, 1, 2], kind='value'),
                 **kwargs)

    # Test handling of degenerate data: thresholded maps
    stc._data.fill(0.)
    with pytest.warns(RuntimeWarning, match='All data were zero'):
        brain = plot_source_estimates(stc, **kwargs)
    brain.close()
Example #18
0
def plot_visualize_mft_sources(fwdmag, stcdata, tmin, tstep, subject,
                               subjects_dir):
    """
    Plot the MFT sources at time point of peak.
    Parameters
    ----------
    fwdmag:  forward solution
    stcdata: stc with ||cdv|| (point sequence as in fwdmag['source_rr'])
    tmin, tstep, subject: passed to mne.SourceEstimate()
    """
    print("##### Attempting to plot:")
    # cf. decoding/plot_decoding_spatio_temporal_source.py
    vertices = [s['vertno'] for s in fwdmag['src']]
    if len(vertices) == 1:
        vertices = [
            fwdmag['src'][0]['vertno']
            [fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] <= -0.],
            fwdmag['src'][0]['vertno'][
                fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] > -0.]
        ]
    elif len(vertices) > 2:
        warnings.warn(
            'plot_visualize_mft_sources(): Cannot handle more than two sources spaces'
        )
        return

    stc_feat = SourceEstimate(stcdata,
                              vertices=vertices,
                              tmin=tmin,
                              tstep=tstep,
                              subject=subject)
    itmaxsum = np.argmax(np.sum(stcdata, axis=0))
    twmin = tmin + tstep * float(itmaxsum - stcdata.shape[1] / 20)
    twmax = tmin + tstep * float(itmaxsum + stcdata.shape[1] / 20)
    for ihemi, hemi in enumerate(['lh', 'rh', 'both']):
        brain = stc_feat.plot(surface='white',
                              hemi=hemi,
                              subjects_dir=subjects_dir,
                              transparent=True,
                              clim='auto')
        # use peak getter to move visualization to the time point of the peak
        print("Restricting peak search to [%fs, %fs]" % (twmin, twmax))
        if hemi == 'both':
            brain.show_view('parietal')
            vertno_max, time_idx = stc_feat.get_peak(hemi=None,
                                                     time_as_index=True,
                                                     tmin=twmin,
                                                     tmax=twmax)
        else:
            brain.show_view('lateral')
            vertno_max, time_idx = stc_feat.get_peak(hemi=hemi,
                                                     time_as_index=True,
                                                     tmin=twmin,
                                                     tmax=twmax)
        print("hemi=%s: setting time_idx=%d" % (hemi, time_idx))
        brain.set_data_time_index(time_idx)
        if hemi == 'lh' or hemi == 'rh':
            # draw marker at maximum peaking vertex
            brain.add_foci(vertno_max,
                           coords_as_verts=True,
                           hemi=hemi,
                           color='blue',
                           scale_factor=0.6)

        if len(fwdmag['src']) > ihemi:
            fwds = fwdmag['src'][ihemi]
            comax = fwds['rr'][vertno_max]
            print("hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][%d]['rr'][vertno_max] = " % \
                  (hemi, vertno_max, time_idx, ihemi), comax)

            offsets = np.append([0], [s['nuse'] for s in fwdmag['src']])
            if hemi == 'lh':
                ifoci = [
                    np.nonzero([
                        stcdata[0:offsets[1], time_idx] >=
                        0.25 * np.max(stcdata[:, time_idx])
                    ][0])
                ]
            elif len(fwdmag['src']) > 1:
                ifoci = [
                    np.nonzero([
                        stcdata[offsets[1]:, time_idx] >=
                        0.25 * np.max(stcdata[:, time_idx])
                    ][0])
                ]
            vfoci = fwds['vertno'][ifoci[0][0]]
            cfoci = fwds['rr'][vfoci]
            print("Coords  of %d sel. vfoci: " % cfoci.shape[0])
            print(cfoci)
            print("vfoci: ")
            print(vfoci)
            print("brain.geo[%s].coords[vfoci] : " % hemi)
            print(brain.geo[hemi].coords[vfoci])

            mrfoci = np.zeros(cfoci.shape)
            invmri_head_t = invert_transform(fwdmag['info']['mri_head_t'])
            mrfoci = apply_trans(invmri_head_t['trans'], cfoci, move=True)
            print("mrfoci: ")
            print(mrfoci)

            # Just some blops along the coordinate axis:
            # This will not yield reasonable results w an inflated brain.
            # bloblist = np.zeros((300,3))
            # for i in xrange(100):
            #    bloblist[i,0] = float(i)
            #    bloblist[i+100,1] = float(i)
            #    bloblist[i+200,2] = float(i)
            # mrblobs = apply_trans(invmri_head_t['trans'], bloblist, move=True)
            # brain.add_foci(mrblobs, coords_as_verts=False, hemi=hemi, color='yellow', scale_factor=0.3)
        brain.save_image('testfig_map_%s.png' % hemi)
        brain.close()