def plot_overlays_Fgroup(condition,modality,hemi,azimuth): brain = Brain(subject_id='fsaverage', hemi=hemi,surf='pial',cortex = 'low_contrast', size=(600, 600)) stc_fname = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/Plot_STATS/' +"_vs_".join(condition) +'/fmap'+ modality+ '_' +"_vs_".join(condition)+ '-' + hemi+'.stc') stc = read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] brain.add_data(data, thresh = 3.259,colormap='hot',alpha=1, vertices=vertices, smoothing_steps=3,hemi=hemi) brain.set_data_time_index(0) brain.scale_data_colormap(fmin=3.26, fmid=5.84, fmax= 8.42, transparent=False) brain.show_view(dict(azimuth=azimuth,elevation=None, distance=None)) # mayavi.mlab.view(azimuth=0, elevation=None, distance=None, focalpoint=None, # roll=None, reset_roll=True, figure=None) PlotDir = [] PlotDir = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/Plot_STATS/' + "_vs_".join(condition)) if not os.path.exists(PlotDir): os.makedirs(PlotDir) brain.save_image(PlotDir + '/Fmap_IcaCorr_' + modality + '_' + 'dSPM' + '_' + '_' + "_vs_".join(condition) + '_' + hemi + '_'+ str(azimuth)+ '_ico-5-fwd-fsaverage-'+'.png')
def test_meg_inverse(): """Test plotting of MEG inverse solution.""" _set_backend() brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) vertices = stc['vertices'] colormap = 'hot' data = stc['data'] data_full = (brain.geo['lh'].nn[vertices][..., np.newaxis] * data[:, np.newaxis]) time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) def time_label(t): return 'time=%0.2f ms' % (1e3 * t) for use_data in (data, data_full): brain.add_data(use_data, colormap=colormap, vertices=vertices, smoothing_steps=1, time=time, time_label=time_label) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) assert brain.data_dict['lh']['time_idx'] == 0 brain.set_time(.1) assert brain.data_dict['lh']['time_idx'] == 2 # viewer = TimeViewer(brain) # multiple data layers pytest.raises(ValueError, brain.add_data, data, vertices=vertices, time=time[:-1]) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=1, time=time, time_label=time_label, initial_time=.09) assert brain.data_dict['lh']['time_idx'] == 1 data_dicts = brain._data_dicts['lh'] assert len(data_dicts) == 3 assert data_dicts[0]['time_idx'] == 1 assert data_dicts[1]['time_idx'] == 1 # shift time in both layers brain.set_data_time_index(0) assert data_dicts[0]['time_idx'] == 0 assert data_dicts[1]['time_idx'] == 0 brain.set_data_smoothing_steps(2) # add second data-layer without time axis brain.add_data(data[:, 1], colormap=colormap, vertices=vertices, smoothing_steps=2) brain.set_data_time_index(2) assert len(data_dicts) == 4 # change surface brain.set_surf('white') # remove all layers brain.remove_data() assert brain._data_dicts['lh'] == [] brain.close()
def test_movie(tmpdir): """Test saving a movie of an MEG inverse solution.""" import imageio if sys.version_info < (3, ): raise SkipTest('imageio ffmpeg requires Python 3') # create and setup the Brain instance _set_backend() brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin'] brain.add_data(data, colormap='hot', vertices=stc['vertices'], smoothing_steps=10, time=time, time_label='time=%0.2f ms') brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) # save movies with different options dst = str(tmpdir.join('test.mov')) # test the number of frames in the movie brain.save_movie(dst) frames = imageio.mimread(dst) assert len(frames) == 2 brain.save_movie(dst, time_dilation=10) frames = imageio.mimread(dst) assert len(frames) == 7 brain.save_movie(dst, tmin=0.081, tmax=0.102) frames = imageio.mimread(dst) assert len(frames) == 2 brain.close()
def test_movie(): """Test saving a movie of an MEG inverse solution.""" import imageio # create and setup the Brain instance _set_backend() brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin'] brain.add_data(data, colormap='hot', vertices=stc['vertices'], smoothing_steps=10, time=time, time_label='time=%0.2f ms') brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) # save movies with different options tempdir = mkdtemp() try: dst = os.path.join(tempdir, 'test.mov') # test the number of frames in the movie brain.save_movie(dst) frames = imageio.mimread(dst) assert_equal(len(frames), 2) brain.save_movie(dst, time_dilation=10) frames = imageio.mimread(dst) assert_equal(len(frames), 7) brain.save_movie(dst, tmin=0.081, tmax=0.102) frames = imageio.mimread(dst) assert_equal(len(frames), 2) finally: # clean up if not (sys.platform == 'win32' and os.getenv('APPVEYOR', 'False') == 'True'): # cleanup problems shutil.rmtree(tempdir) brain.close()
def test_meg_inverse(): """Test plotting of MEG inverse solution """ mlab.options.backend = 'test' brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) colormap = 'hot' def time_label(t): return 'time=%0.2f ms' % (1e3 * t) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) assert_equal(brain.data_dict['lh']['time_idx'], 0) brain.set_time(.1) assert_equal(brain.data_dict['lh']['time_idx'], 2) # viewer = TimeViewer(brain) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label, initial_time=.09, remove_existing=True) assert_equal(brain.data_dict['lh']['time_idx'], 1) brain.close()
def test_movie(tmpdir): """Test saving a movie of an MEG inverse solution.""" import imageio if sys.version_info < (3,): raise SkipTest('imageio ffmpeg requires Python 3') # create and setup the Brain instance _set_backend() brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin'] brain.add_data(data, colormap='hot', vertices=stc['vertices'], smoothing_steps=10, time=time, time_label='time=%0.2f ms') brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) # save movies with different options dst = str(tmpdir.join('test.mov')) # test the number of frames in the movie brain.save_movie(dst) frames = imageio.mimread(dst) assert len(frames) == 2 brain.save_movie(dst, time_dilation=10) frames = imageio.mimread(dst) assert len(frames) == 7 brain.save_movie(dst, tmin=0.081, tmax=0.102) frames = imageio.mimread(dst) assert len(frames) == 2 brain.close()
def plot_overlays_diff_group_window(condition,method,modality,hemi,window,azimuth,elevation): subject_id, surface = 'fsaverage', 'inflated' hemi = hemi brain = Brain(subject_id, hemi, surface, size=(600, 600)) stc_fname = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/BrainMaps/IcaCorr_Normalized' + modality + '_' + condition[0] + '-' + condition[1] + '_pick_oriNone_' + method + '_ico-5-fwd-fsaverage.stc-'+ hemi +'.stc') stcl_fname = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/BrainMaps/IcaCorr_Normalized' + modality + '_' + condition[0] + '-' + condition[1] + '_pick_oriNone_' + method + '_ico-5-fwd-fsaverage.stc-lh.stc') stcr_fname = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/BrainMaps/IcaCorr_Normalized' + modality + '_' + condition[0] + '-' + condition[1] + '_pick_oriNone_' + method + '_ico-5-fwd-fsaverage.stc-rh.stc') stcl = read_stc(stcl_fname) stcr = read_stc(stcr_fname) datal = stcl['data'] datar = stcr['data'] stc = read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1]) winstart = np.where(time < window[0])[0][-1] winend = np.where(time >= window[1])[0][0] meanval = np.mean(data[:,winstart:winend],1) meanvalr = np.mean(datar[:,winstart:winend],1) meanvall = np.mean(datal[:,winstart:winend],1) maxval = np.max([np.max(meanvalr),np.max(meanvall)]) minval = np.min([np.min(meanvalr),np.min(meanvall)]) fmin = -np.max(np.abs([maxval,minval]))*0.8 fmax = np.max(np.abs([maxval,minval]))*0.8 colormap = mne.viz.mne_analyze_colormap(limits=[fmin, fmin/3, fmin/3.1, fmax/3.1, fmax/3, fmax], format='mayavi') #colormap = 'jet' time_label = lambda t: 'time=%0.2f ms' % (0) brain.add_data(meanval, colormap=colormap, vertices=vertices, smoothing_steps=15, time=time, time_label=time_label, hemi=hemi) brain.scale_data_colormap(fmin=fmin, fmid=0, fmax=fmax, transparent=False) brain.show_view(dict(azimuth=azimuth,elevation=elevation, distance=None)) # mayavi.mlab.view(azimuth=0, elevation=None, distance=None, focalpoint=None, # roll=None, reset_roll=True, figure=None) PlotDir = [] PlotDir = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/Plots/IcaCorr_Window_' + condition[0] + '-' + condition[1] + str(window[0]) + '-' + str(window[1])) if not os.path.exists(PlotDir): os.makedirs(PlotDir) brain.save_image(PlotDir + '/IcaCorr_' + modality + '_' + method + '_' + '_Normalized' + condition[0] + '-' + condition[1] + '_' + str(window[0]) + '-' + str(window[1]) + hemi + '_'+ str(azimuth)+ '_ico-5-fwd-fsaverage-'+'.png')
def test_movie(): """Test saving a movie of an MEG inverse solution """ # create and setup the Brain instance mlab.options.backend = 'auto' brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin'] brain.add_data(data, colormap='hot', vertices=stc['vertices'], smoothing_steps=10, time=time, time_label='time=%0.2f ms') brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) # save movies with different options tempdir = mkdtemp() try: dst = os.path.join(tempdir, 'test.mov') brain.save_movie(dst) brain.save_movie(dst, tmin=0.081, tmax=0.102) # test the number of frames in the movie sp = subprocess.Popen(('ffmpeg', '-i', 'test.mov', '-vcodec', 'copy', '-f', 'null', '/dev/null'), cwd=tempdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = sp.communicate() m = re.search('frame=\s*(\d+)\s', stderr) if not m: raise RuntimeError(stderr) n_frames = int(m.group(1)) assert_equal(n_frames, 3) finally: # clean up shutil.rmtree(tempdir) brain.close()
def test_meg_inverse(): """Test plotting of MEG inverse solution.""" _set_backend() brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) vertices = stc['vertices'] colormap = 'hot' data = stc['data'] data_full = (brain.geo['lh'].nn[vertices][..., np.newaxis] * data[:, np.newaxis]) time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) def time_label(t): return 'time=%0.2f ms' % (1e3 * t) for use_data in (data, data_full): brain.add_data(use_data, colormap=colormap, vertices=vertices, smoothing_steps=1, time=time, time_label=time_label) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) assert_equal(brain.data_dict['lh']['time_idx'], 0) brain.set_time(.1) assert_equal(brain.data_dict['lh']['time_idx'], 2) # viewer = TimeViewer(brain) # multiple data layers assert_raises(ValueError, brain.add_data, data, vertices=vertices, time=time[:-1]) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=1, time=time, time_label=time_label, initial_time=.09) assert_equal(brain.data_dict['lh']['time_idx'], 1) data_dicts = brain._data_dicts['lh'] assert_equal(len(data_dicts), 3) assert_equal(data_dicts[0]['time_idx'], 1) assert_equal(data_dicts[1]['time_idx'], 1) # shift time in both layers brain.set_data_time_index(0) assert_equal(data_dicts[0]['time_idx'], 0) assert_equal(data_dicts[1]['time_idx'], 0) brain.set_data_smoothing_steps(2) # add second data-layer without time axis brain.add_data(data[:, 1], colormap=colormap, vertices=vertices, smoothing_steps=2) brain.set_data_time_index(2) assert_equal(len(data_dicts), 4) # change surface brain.set_surf('white') # remove all layers brain.remove_data() assert_equal(brain._data_dicts['lh'], []) brain.close()
def test_meg_inverse(): """Test plotting of MEG inverse solution.""" mlab.options.backend = 'test' brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) colormap = 'hot' def time_label(t): return 'time=%0.2f ms' % (1e3 * t) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) assert_equal(brain.data_dict['lh']['time_idx'], 0) brain.set_time(.1) assert_equal(brain.data_dict['lh']['time_idx'], 2) # viewer = TimeViewer(brain) # multiple data layers assert_raises(ValueError, brain.add_data, data, vertices=vertices, time=time[:-1]) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label, initial_time=.09) assert_equal(brain.data_dict['lh']['time_idx'], 1) data_dicts = brain._data_dicts['lh'] assert_equal(len(data_dicts), 2) assert_equal(data_dicts[0]['time_idx'], 1) assert_equal(data_dicts[1]['time_idx'], 1) # shift time in both layers brain.set_data_time_index(0) assert_equal(data_dicts[0]['time_idx'], 0) assert_equal(data_dicts[1]['time_idx'], 0) # remove all layers brain.remove_data() assert_equal(brain._data_dicts['lh'], []) brain.close()
def test_meg_inverse(): """Test plotting of MEG inverse solution """ mlab.options.backend = 'test' brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = 1e3 * np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1]) colormap = 'hot' time_label = 'time=%0.2f ms' brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label) brain.set_data_time_index(2) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True)
def plot_overlays_diff_singlesubj(subject,condition,method,modality,hemi,indextime, azimuth): subject_id, surface = 'fsaverage', 'inflated' hemi = hemi brain = Brain(subject_id, hemi, surface, size=(600, 600)) stc_fname = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/' + subject + '/mne_python/STCS_diff/IcaCorr_' + condition[0] + '-' + condition[1] + '/' + modality + '_' + method + '_' + subject + '_' + condition[0] + '-' + condition[1] + '_' + '_ico-5-fwd-fsaverage-.stc-'+hemi+'.stc') stc = read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1]) colormap = 'hot' time_label = lambda t: 'time=%0.2f ms' % (t * 1e3) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=4, time=time, time_label=time_label, hemi=hemi) brain.set_data_time_index(indextime) brain.scale_data_colormap(fmin=0, fmid=2.5, fmax=5, transparent=True) brain.show_view(dict(azimuth=azimuth,elevation=None, distance=None)) # mayavi.mlab.view(azimuth=0, elevation=None, distance=None, focalpoint=None, # roll=None, reset_roll=True, figure=None) realtime = stc['tmin'] + stc['tstep']*indextime PlotDir = [] PlotDir = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/' + subject + '/mne_python/BrainMaps/IcaCorr_' + + condition[0] + '-' + condition[1]) if not os.path.exists(PlotDir): os.makedirs(PlotDir) brain.save_image(PlotDir + '/IcaCorr_' + modality + '_' + method + '_' + subject + '_' + condition[0] + '-' + condition[1] + '_' + str(realtime) + hemi + '_'+ str(azimuth)+ '_ico-5-fwd-fsaverage-' +'.png')
def test_meg_inverse(): """Test plotting of MEG inverse solution.""" mlab.options.backend = 'test' brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) colormap = 'hot' def time_label(t): return 'time=%0.2f ms' % (1e3 * t) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) assert_equal(brain.data_dict['lh']['time_idx'], 0) brain.set_time(.1) assert_equal(brain.data_dict['lh']['time_idx'], 2) # viewer = TimeViewer(brain) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label, initial_time=.09, remove_existing=True) assert_equal(brain.data_dict['lh']['time_idx'], 1) brain.close()
def test_meg_inverse(): """Test plotting of MEG inverse solution """ mlab.options.backend = 'test' brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = 1e3 * np.linspace( stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1]) colormap = 'hot' time_label = 'time=%0.2f ms' brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label) brain.set_data_time_index(2) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) # viewer = TimeViewer(brain) brain.close()
def plot_overlays_diff_group(condition,method,modality,hemi,indextime,azimuth): hemi = hemi brain = Brain(subject_id='fsaverage', hemi=hemi, surface='pial', size=(600, 600)) stc_fname = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/BrainMaps/IcaCorr_' + modality + '_' + condition[0] + '-' + condition[1] + '_pick_oriNone_' + method + '_ico-5-fwd-fsaverage.stc-'+ hemi +'.stc') stc = read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1]) # colormap = 'seismic' colormap = mne.viz.mne_analyze_colormap(limits=[-3,-1.81,-1.80, 1.80,1.81, 3], format='mayavi') time_label = lambda t: 'time=%0.2f ms' % (t * 1e3) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=20, time=time, time_label=time_label, hemi=hemi) brain.set_data_time_index(indextime) brain.scale_data_colormap(fmin=-1.82, fmid=0, fmax= 1.82, transparent=False) brain.show_view(dict(azimuth=azimuth,elevation=None, distance=None)) # mayavi.mlab.view(azimuth=0, elevation=None, distance=None, focalpoint=None, # roll=None, reset_roll=True, figure=None) realtime = stc['tmin'] + stc['tstep']*indextime PlotDir = [] PlotDir = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/Plots/IcaCorr_' + condition[0] + '-' + condition[1] ) if not os.path.exists(PlotDir): os.makedirs(PlotDir) brain.save_image(PlotDir + '/IcaCorr_' + modality + '_' + method + '_' + '_' + condition[0] + '-' + condition[1] + '_' + str(realtime) + hemi + '_'+ str(azimuth)+ '_ico-5-fwd-fsaverage-'+'.png')
time_array = np.linspace(stc.tmin, stc.tmin + stc.data.shape[1] * stc.tstep, stc.data.shape[1], endpoint=False) # Time array for hemi in ['lh', 'rh']: # Plot the blanck hemisphere brain = Brain(nip, hemi, 'inflated', size=(800, 400), subjects_dir=data_mri_directory) # Add source time course brain.add_data(stc_data[hemi], colormap=colormap, vertices=stc_vertices[hemi], smoothing_steps=10, time=time_array, time_label=time_label, hemi=hemi, initial_time=0.) # Scale the F-map brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax, transparent=True) # Add the image array to all images for time in times: brain.set_time(time) fig_temp[hemi].append(brain.save_montage(filename=None, orientation='h')) fig_temp[hemi] = np.concatenate(fig_temp[hemi], axis=0) brain.close() # Correct for different lengths min_len = min(len(fig_temp['lh']), len(fig_temp['rh'])) fig_temp['lh'] = fig_temp['lh'][0:min_len, :, :] fig_temp['rh'] = fig_temp['rh'][0:min_len, :, :]
def plot_4_view(data1, parcel_names, parcellation, style='linear', alpha=0.95, zmin=None, zmax=None, zmid=None, cmap='auto', show=True, filename=None, surface='inflated', null_val=0, transparent=True, subj='fsaverage', sub_dir='K:\\palva\\resting_state\\_fsaverage\\'): ''' Plots 1d array of data. Plotted views are lateral and medial on both HS. Used brain is fsaverage. INPUT: data1: 1-dimensional data array, len = # parcels. 1st half must be left HS, 2nd half right. parcel_names: Parcel_names, in the same order as the data. parcellation: Abbreviation, e.g. 'parc2018yeo7_100' or "parc2009' style: 'linear': pos. values only, 'divergent': both pos & neg alpha: Transparency value; transparency might look weird. zmin: The minimum value of a linear z-axis, or center of a divergent axis (thus should be 0) zmax: Maximum value of linear z-axis, or max/-min of div. zmid: Midpoint of z-axis. cmap: Colormap by name. Default is 'rocket' for linear, and 'icefire' for divergent; other recommended options: 'YlOrRd' for linear, or 'bwr' for divergent. show: If False, plot is closed after creation. filename: File to save plot as, e.g. 'plot_13.png' surface: Surface type. null_val: Value for unassigned vertices transparent: Whether parcels with minimum value should be transparent. OUTPUT: instance of surfer.Brain, if show==True ''' N_parc = len(data1) # the number of actually used parcels if len(parcel_names) != N_parc: raise ValueError('Number of parcels != len(data1) ') if parcel_names[0][-3:] != '-lh': parcel_names[:N_parc // 2] = [p + '-lh' for p in parcel_names[:N_parc // 2]] parcel_names[N_parc // 2:] = [p + '-rh' for p in parcel_names[N_parc // 2:]] hemi = 'split' #### load parcels if parcellation == 'parc2009': aparc_lh_file = sub_dir + '\\' + subj + '\\label\\lh.aparc.a2009s.annot' aparc_rh_file = sub_dir + '\\' + subj + '\\label\\rh.aparc.a2009s.annot' else: aparc_lh_file = sub_dir + '\\' + subj + '\\label\\lh.' + parcellation + '.annot' aparc_rh_file = sub_dir + '\\' + subj + '\\label\\rh.' + parcellation + '.annot' labels_lh, ctab, names_lh = nib.freesurfer.read_annot(aparc_lh_file) labels_rh, ctab, names_rh = nib.freesurfer.read_annot(aparc_rh_file) names_lh = [str(n)[2:-1] + '-lh' for n in names_lh] names_rh = [str(n)[2:-1] + '-rh' for n in names_rh] N_label_lh = len( names_lh ) # number of labels/parcels with unkown and med. wall included N_label_rh = len(names_rh) #### map parcels in data to loaded parcels indicesL = np.full(N_label_lh, -1) indicesR = np.full(N_label_rh, -1) for i in range(N_parc): for j in range(N_label_lh): if names_lh[j] == parcel_names[i]: indicesL[j] = i for j in range(N_label_rh): if names_rh[j] == parcel_names[i]: indicesR[j] = i - N_parc // 2 indicesL += 1 indicesR += 1 ## assign values to loaded parcels data1L = np.concatenate(([null_val], data1[:N_parc // 2])) data1R = np.concatenate(([null_val], data1[N_parc // 2:])) data_left = data1L[indicesL] data_right = data1R[indicesR] ## map parcel values to vertices vtx_data_left = data_left[labels_lh] vtx_data_left[labels_lh == -1] = null_val vtx_data_right = data_right[labels_rh] vtx_data_right[labels_rh == -1] = null_val if zmin == None: zmin = 0 if zmax == None: zmax = np.nanmax(abs(data1)) if zmid == None: zmid = zmax / 2 if style == 'linear': # shows only positive values center = None elif style == 'divergent': # shows positive and negative values center = 0 #### plot to 4-view Brain hemi = 'split' brain = Brain(subj, hemi, background='white', surf=surface, size=[900, 800], cortex='classic', subjects_dir=sub_dir, views=['lat', 'med']) brain.add_data(vtx_data_left, zmin, zmax, colormap=cmap, center=center, alpha=alpha, hemi='lh') brain.add_data(vtx_data_right, zmin, zmax, colormap=cmap, center=center, alpha=alpha, hemi='rh') # adjust colorbar brain.scale_data_colormap(zmin, zmid, zmax, transparent=transparent, center=center, alpha=alpha, verbose=None) #data=None, hemi=None, if filename != None: brain.save_image(filename) if show: return brain
We want to use an appropriate color map for these data: a divergent map that is centered on 0, which is a meaningful transition-point as it marks the change from negative correlations to positive correlations. By providing the 'center' argument the add_data function automatically chooses a divergent colormap. """ brain.add_data(surf_data_lh, 0, .7, center=0, hemi='lh') brain.add_data(surf_data_rh, 0, .7, center=0, hemi='rh') """ You can tune the data display by shifting the colormap around interesting regions. For example, you can ignore small correlation up to a magnitude of 0.2 and let colors become gradually less transparent from 0.2 to 0.5 by re-scaling the colormap as follows. For more information see the help string of this function. """ brain.scale_data_colormap(.2, .5, .7, transparent=True, center=0) """ You can also set the overall opacity of the displayed data while maintaining the transparency of the small values. """ brain.scale_data_colormap(0, .35, .7, transparent=True, center=0, alpha=0.75) """ This overlay represents resting-state correlations with a seed in left angular gyrus. Let's plot that seed. """ seed_coords = (-45, -67, 36) brain.add_foci(seed_coords, map_surface="white", hemi='lh')
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', colormap='auto', time_label='auto', smoothing_steps=10, transparent=None, alpha=1.0, time_viewer=False, config_opts=None, subjects_dir=None, figure=None, views='lat', colorbar=True, clim='auto', cortex="classic", size=800, background="black", foreground="white", initial_time=None, time_unit=None): """Plot SourceEstimates with PySurfer Note: PySurfer currently needs the SUBJECTS_DIR environment variable, which will automatically be set by this function. Plotting multiple SourceEstimates with different values for subjects_dir will cause PySurfer to use the wrong FreeSurfer surfaces when using methods of the returned Brain object. It is therefore recommended to set the SUBJECTS_DIR environment variable or always use the same value for subjects_dir (within the same Python session). Parameters ---------- stc : SourceEstimates The source estimates to plot. subject : str | None The subject name corresponding to FreeSurfer environment variable SUBJECT. If None stc.subject will be used. If that is None, the environment will be used. surface : str The type of surface (inflated, white etc.). hemi : str, 'lh' | 'rh' | 'split' | 'both' The hemisphere to display. colormap : str | np.ndarray of float, shape(n_colors, 3 | 4) Name of colormap to use or a custom look up table. If array, must be (n x 3) or (n x 4) array for with RGB or RGBA values between 0 and 255. If 'auto', either 'hot' or 'mne' will be chosen based on whether 'lims' or 'pos_lims' are specified in `clim`. time_label : str | callable | None Format of the time label (a format string, a function that maps floating point time values to strings, or None for no label). The default is ``time=%0.2f ms``. smoothing_steps : int The amount of smoothing transparent : bool | None If True, use a linear transparency between fmin and fmid. None will choose automatically based on colormap type. alpha : float Alpha value to apply globally to the overlay. time_viewer : bool Display time viewer GUI. config_opts : dict Deprecated parameter. subjects_dir : str The path to the freesurfer subjects reconstructions. It corresponds to Freesurfer environment variable SUBJECTS_DIR. figure : instance of mayavi.core.scene.Scene | list | int | None If None, a new figure will be created. If multiple views or a split view is requested, this must be a list of the appropriate length. If int is provided it will be used to identify the Mayavi figure by it's id or create a new figure with the given id. views : str | list View to use. See surfer.Brain(). colorbar : bool If True, display colorbar on scene. clim : str | dict Colorbar properties specification. If 'auto', set clim automatically based on data percentiles. If dict, should contain: ``kind`` : str Flag to specify type of limits. 'value' or 'percent'. ``lims`` : list | np.ndarray | tuple of float, 3 elements Note: Only use this if 'colormap' is not 'mne'. Left, middle, and right bound for colormap. ``pos_lims`` : list | np.ndarray | tuple of float, 3 elements Note: Only use this if 'colormap' is 'mne'. Left, middle, and right bound for colormap. Positive values will be mirrored directly across zero during colormap construction to obtain negative control points. cortex : str or tuple specifies how binarized curvature values are rendered. either the name of a preset PySurfer cortex colorscheme (one of 'classic', 'bone', 'low_contrast', or 'high_contrast'), or the name of mayavi colormap, or a tuple with values (colormap, min, max, reverse) to fully specify the curvature colors. size : float or pair of floats The size of the window, in pixels. can be one number to specify a square window, or the (width, height) of a rectangular window. background : matplotlib color Color of the background of the display window. foreground : matplotlib color Color of the foreground of the display window. initial_time : float | None The time to display on the plot initially. ``None`` to display the first time sample (default). time_unit : 's' | 'ms' Whether time is represented in seconds (expected by PySurfer) or milliseconds. The current default is 'ms', but will change to 's' in MNE 0.14. To avoid a deprecation warning specify ``time_unit`` explicitly. Returns ------- brain : Brain A instance of surfer.viz.Brain from PySurfer. """ import surfer from surfer import Brain, TimeViewer import mayavi # import here to avoid circular import problem from ..source_estimate import SourceEstimate surfer_version = LooseVersion(surfer.__version__) v06 = LooseVersion('0.6') if surfer_version < v06: raise ImportError("This function requires PySurfer 0.6 (you are " "running version %s). You can update PySurfer " "using:\n\n $ pip install -U pysurfer" % surfer.__version__) if initial_time is not None and surfer_version > v06: kwargs = {'initial_time': initial_time} initial_time = None # don't set it twice else: kwargs = {} if time_unit is None: warn("The time_unit parameter default will change from 'ms' to 's' " "in MNE 0.14. To avoid this warning specify the parameter " "explicitly.", DeprecationWarning) time_unit = 'ms' elif time_unit not in ('s', 'ms'): raise ValueError("time_unit needs to be 's' or 'ms', got %r" % (time_unit,)) if time_label == 'auto': if time_unit == 'ms': time_label = 'time=%0.2f ms' else: def time_label(t): return 'time=%0.2f ms' % (t * 1e3) if not isinstance(stc, SourceEstimate): raise ValueError('stc has to be a surface source estimate') if hemi not in ['lh', 'rh', 'split', 'both']: raise ValueError('hemi has to be either "lh", "rh", "split", ' 'or "both"') # check `figure` parameter (This will be performed by PySurfer > 0.6) if figure is not None: if isinstance(figure, int): # use figure with specified id size_ = size if isinstance(size, (tuple, list)) else (size, size) figure = [mayavi.mlab.figure(figure, size=size_)] elif not isinstance(figure, (list, tuple)): figure = [figure] if not all(isinstance(f, mayavi.core.scene.Scene) for f in figure): raise TypeError('figure must be a mayavi scene or list of scenes') # convert control points to locations in colormap ctrl_pts, colormap = _limits_to_control_points(clim, stc.data, colormap) # Construct cmap manually if 'mne' and get cmap bounds # and triage transparent argument if colormap in ('mne', 'mne_analyze'): colormap = mne_analyze_colormap(ctrl_pts) scale_pts = [-1 * ctrl_pts[-1], 0, ctrl_pts[-1]] transparent = False if transparent is None else transparent else: scale_pts = ctrl_pts transparent = True if transparent is None else transparent subjects_dir = get_subjects_dir(subjects_dir=subjects_dir, raise_error=True) subject = _check_subject(stc.subject, subject, True) if hemi in ['both', 'split']: hemis = ['lh', 'rh'] else: hemis = [hemi] title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0]) with warnings.catch_warnings(record=True): # traits warnings brain = Brain(subject, hemi, surface, True, title, cortex, size, background, foreground, figure, subjects_dir, views, config_opts=config_opts) if time_unit == 's': times = stc.times else: # time_unit == 'ms' times = 1e3 * stc.times for hemi in hemis: hemi_idx = 0 if hemi == 'lh' else 1 if hemi_idx == 0: data = stc.data[:len(stc.vertices[0])] else: data = stc.data[len(stc.vertices[0]):] vertices = stc.vertices[hemi_idx] with warnings.catch_warnings(record=True): # traits warnings brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=smoothing_steps, time=times, time_label=time_label, alpha=alpha, hemi=hemi, colorbar=colorbar, **kwargs) # scale colormap and set time (index) to display brain.scale_data_colormap(fmin=scale_pts[0], fmid=scale_pts[1], fmax=scale_pts[2], transparent=transparent) if initial_time is not None: brain.set_time(initial_time) if time_viewer: TimeViewer(brain) return brain
vertices = stc['vertices'] """ time points in milliseconds """ time = 1e3 * np.linspace( stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1]) """ colormap to use """ colormap = 'hot' """ label for time annotation """ time_label = 'time=%0.2f ms' brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label) """ scale colormap and set time (index) to display """ brain.set_data_time_index(2) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) """ uncomment this line to use the interactive TimeViewer GUI """ #viewer = TimeViewer(brain)
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', colormap='hot', time_label='time=%0.2f ms', smoothing_steps=10, fmin=5., fmid=10., fmax=15., transparent=True, alpha=1.0, time_viewer=False, config_opts={}, subjects_dir=None, figure=None, views='lat', colorbar=True): """Plot SourceEstimates with PySurfer Note: PySurfer currently needs the SUBJECTS_DIR environment variable, which will automatically be set by this function. Plotting multiple SourceEstimates with different values for subjects_dir will cause PySurfer to use the wrong FreeSurfer surfaces when using methods of the returned Brain object. It is therefore recommended to set the SUBJECTS_DIR environment variable or always use the same value for subjects_dir (within the same Python session). Parameters ---------- stc : SourceEstimates The source estimates to plot. subject : str | None The subject name corresponding to FreeSurfer environment variable SUBJECT. If None stc.subject will be used. If that is None, the environment will be used. surface : str The type of surface (inflated, white etc.). hemi : str, 'lh' | 'rh' | 'split' | 'both' The hemisphere to display. Using 'both' or 'split' requires PySurfer version 0.4 or above. colormap : str The type of colormap to use. time_label : str How to print info about the time instant visualized. smoothing_steps : int The amount of smoothing fmin : float The minimum value to display. fmid : float The middle value on the colormap. fmax : float The maximum value for the colormap. transparent : bool If True, use a linear transparency between fmin and fmid. alpha : float Alpha value to apply globally to the overlay. time_viewer : bool Display time viewer GUI. config_opts : dict Keyword arguments for Brain initialization. See pysurfer.viz.Brain. subjects_dir : str The path to the freesurfer subjects reconstructions. It corresponds to Freesurfer environment variable SUBJECTS_DIR. figure : instance of mayavi.core.scene.Scene | list | int | None If None, a new figure will be created. If multiple views or a split view is requested, this must be a list of the appropriate length. If int is provided it will be used to identify the Mayavi figure by it's id or create a new figure with the given id. views : str | list View to use. See surfer.Brain(). colorbar : bool If True, display colorbar on scene. Returns ------- brain : Brain A instance of surfer.viz.Brain from PySurfer. """ import surfer from surfer import Brain, TimeViewer if hemi in ['split', 'both'] and LooseVersion(surfer.__version__) < '0.4': raise NotImplementedError('hemi type "%s" not supported with your ' 'version of pysurfer. Please upgrade to ' 'version 0.4 or higher.' % hemi) try: import mayavi from mayavi import mlab except ImportError: from enthought import mayavi from enthought.mayavi import mlab # import here to avoid circular import problem from ..source_estimate import SourceEstimate if not isinstance(stc, SourceEstimate): raise ValueError('stc has to be a surface source estimate') if hemi not in ['lh', 'rh', 'split', 'both']: raise ValueError('hemi has to be either "lh", "rh", "split", ' 'or "both"') n_split = 2 if hemi == 'split' else 1 n_views = 1 if isinstance(views, string_types) else len(views) if figure is not None: # use figure with specified id or create new figure if isinstance(figure, int): figure = mlab.figure(figure, size=(600, 600)) # make sure it is of the correct type if not isinstance(figure, list): figure = [figure] if not all([isinstance(f, mayavi.core.scene.Scene) for f in figure]): raise TypeError('figure must be a mayavi scene or list of scenes') # make sure we have the right number of figures n_fig = len(figure) if not n_fig == n_split * n_views: raise RuntimeError('`figure` must be a list with the same ' 'number of elements as PySurfer plots that ' 'will be created (%s)' % n_split * n_views) subjects_dir = get_subjects_dir(subjects_dir=subjects_dir) subject = _check_subject(stc.subject, subject, False) if subject is None: if 'SUBJECT' in os.environ: subject = os.environ['SUBJECT'] else: raise ValueError('SUBJECT environment variable not set') if hemi in ['both', 'split']: hemis = ['lh', 'rh'] else: hemis = [hemi] title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0]) args = inspect.getargspec(Brain.__init__)[0] kwargs = dict(title=title, figure=figure, config_opts=config_opts, subjects_dir=subjects_dir) if 'views' in args: kwargs['views'] = views else: logger.info('PySurfer does not support "views" argument, please ' 'consider updating to a newer version (0.4 or later)') with warnings.catch_warnings(record=True): # traits warnings brain = Brain(subject, hemi, surface, **kwargs) for hemi in hemis: hemi_idx = 0 if hemi == 'lh' else 1 if hemi_idx == 0: data = stc.data[:len(stc.vertno[0])] else: data = stc.data[len(stc.vertno[0]):] vertices = stc.vertno[hemi_idx] time = 1e3 * stc.times with warnings.catch_warnings(record=True): # traits warnings brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=smoothing_steps, time=time, time_label=time_label, alpha=alpha, hemi=hemi, colorbar=colorbar) # scale colormap and set time (index) to display brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax, transparent=transparent) if time_viewer: TimeViewer(brain) return brain
stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) # MNE will soon add the option for a "full" inverse to be computed and stored. # In the meantime, we can get the equivalent for our data based on the # surface normals: data_full = brain.geo['lh'].nn[vertices][..., np.newaxis] * data[:, np.newaxis] # Now we add the data and set the initial time displayed to 100 ms: brain.add_data(data_full, colormap='hot', vertices=vertices, alpha=0.5, smoothing_steps=5, time=time, hemi=hemi, initial_time=0.1, vector_alpha=0.5, verbose=False) # scale colormap brain.scale_data_colormap(fmin=7, fmid=14, fmax=21, transparent=True, verbose=False) # viewer = TimeViewer(brain)
lambda2, method='dSPM') stc_evoked.save(stc_fname) brain = Brain(subject, 'split', 'partially_inflated', size=(800, 400)) for hemi in ['lh', 'rh']: stc = read_stc(stc_fname + '-%s.stc' % hemi) data = stc['data'] times = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin'] brain.add_data(data, colormap='RdBu', vertices=stc['vertices'], smoothing_steps=10, time=times, time_label=time_label, initial_time=-0.1, hemi=hemi) abs_max = (np.abs(data)).max() brain.scale_data_colormap(fmin=0, fmid=abs_max / 3, fmax=abs_max, center=0, transparent=True) brain.save_movie(meg_dir + subject + '_stc_evoked.mov', tmin=-0.1, tmax=0.5, time_dilation=10) brain.close()
#%% load specific regressor r_name = 'dot_x' show_measure = 'mu_mean' src_df_masked = ss.load_src_df(basefile, r_name, clusters) brain = Brain('fsaverage', 'both', 'inflated', cortex='low_contrast', subjects_dir=sv.subjects_dir, background='w', foreground='k') labels = sv.show_labels_as_data(src_df_masked, show_measure, brain, transparent=True) #brain.scale_data_colormap(src_df_masked[show_measure].min(), # src_df_masked[show_measure].median(), # src_df_masked[show_measure].max(), True) brain.scale_data_colormap(0.01, 0.025, 0.06, True) #labels = sv.show_labels_as_data(src_df_masked, 'mu_mean', brain, # transparent=True, # region_aggfun=lambda a: np.max(a, axis=0)) #tv = TimeViewer(brain)
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', colormap='auto', time_label='auto', smoothing_steps=10, transparent=None, alpha=1.0, time_viewer=False, config_opts=None, subjects_dir=None, figure=None, views='lat', colorbar=True, clim='auto', cortex="classic", size=800, background="black", foreground="white", initial_time=None, time_unit=None): """Plot SourceEstimates with PySurfer Note: PySurfer currently needs the SUBJECTS_DIR environment variable, which will automatically be set by this function. Plotting multiple SourceEstimates with different values for subjects_dir will cause PySurfer to use the wrong FreeSurfer surfaces when using methods of the returned Brain object. It is therefore recommended to set the SUBJECTS_DIR environment variable or always use the same value for subjects_dir (within the same Python session). Parameters ---------- stc : SourceEstimates The source estimates to plot. subject : str | None The subject name corresponding to FreeSurfer environment variable SUBJECT. If None stc.subject will be used. If that is None, the environment will be used. surface : str The type of surface (inflated, white etc.). hemi : str, 'lh' | 'rh' | 'split' | 'both' The hemisphere to display. colormap : str | np.ndarray of float, shape(n_colors, 3 | 4) Name of colormap to use or a custom look up table. If array, must be (n x 3) or (n x 4) array for with RGB or RGBA values between 0 and 255. If 'auto', either 'hot' or 'mne' will be chosen based on whether 'lims' or 'pos_lims' are specified in `clim`. time_label : str | callable | None Format of the time label (a format string, a function that maps floating point time values to strings, or None for no label). The default is ``time=%0.2f ms``. smoothing_steps : int The amount of smoothing transparent : bool | None If True, use a linear transparency between fmin and fmid. None will choose automatically based on colormap type. alpha : float Alpha value to apply globally to the overlay. time_viewer : bool Display time viewer GUI. config_opts : dict Deprecated parameter. subjects_dir : str The path to the freesurfer subjects reconstructions. It corresponds to Freesurfer environment variable SUBJECTS_DIR. figure : instance of mayavi.core.scene.Scene | list | int | None If None, a new figure will be created. If multiple views or a split view is requested, this must be a list of the appropriate length. If int is provided it will be used to identify the Mayavi figure by it's id or create a new figure with the given id. views : str | list View to use. See surfer.Brain(). colorbar : bool If True, display colorbar on scene. clim : str | dict Colorbar properties specification. If 'auto', set clim automatically based on data percentiles. If dict, should contain: ``kind`` : str Flag to specify type of limits. 'value' or 'percent'. ``lims`` : list | np.ndarray | tuple of float, 3 elements Note: Only use this if 'colormap' is not 'mne'. Left, middle, and right bound for colormap. ``pos_lims`` : list | np.ndarray | tuple of float, 3 elements Note: Only use this if 'colormap' is 'mne'. Left, middle, and right bound for colormap. Positive values will be mirrored directly across zero during colormap construction to obtain negative control points. cortex : str or tuple specifies how binarized curvature values are rendered. either the name of a preset PySurfer cortex colorscheme (one of 'classic', 'bone', 'low_contrast', or 'high_contrast'), or the name of mayavi colormap, or a tuple with values (colormap, min, max, reverse) to fully specify the curvature colors. size : float or pair of floats The size of the window, in pixels. can be one number to specify a square window, or the (width, height) of a rectangular window. background : matplotlib color Color of the background of the display window. foreground : matplotlib color Color of the foreground of the display window. initial_time : float | None The time to display on the plot initially. ``None`` to display the first time sample (default). time_unit : 's' | 'ms' Whether time is represented in seconds (expected by PySurfer) or milliseconds. The current default is 'ms', but will change to 's' in MNE 0.14. To avoid a deprecation warning specify ``time_unit`` explicitly. Returns ------- brain : Brain A instance of surfer.viz.Brain from PySurfer. """ import surfer from surfer import Brain, TimeViewer import mayavi # import here to avoid circular import problem from ..source_estimate import SourceEstimate surfer_version = LooseVersion(surfer.__version__) v06 = LooseVersion('0.6') if surfer_version < v06: raise ImportError("This function requires PySurfer 0.6 (you are " "running version %s). You can update PySurfer " "using:\n\n $ pip install -U pysurfer" % surfer.__version__) if time_unit is None: if initial_time is not None: warn( "The time_unit parameter default will change from 'ms' to " "'s' in MNE 0.14 and be removed in 0.15. To avoid this " "warning specify the parameter explicitly.", DeprecationWarning) time_unit = 'ms' elif time_unit not in ('s', 'ms'): raise ValueError("time_unit needs to be 's' or 'ms', got %r" % (time_unit, )) if initial_time is not None and surfer_version > v06: kwargs = {'initial_time': initial_time} initial_time = None # don't set it twice else: kwargs = {} if time_label == 'auto': if time_unit == 'ms': time_label = 'time=%0.2f ms' else: def time_label(t): return 'time=%0.2f ms' % (t * 1e3) if not isinstance(stc, SourceEstimate): raise ValueError('stc has to be a surface source estimate') if hemi not in ['lh', 'rh', 'split', 'both']: raise ValueError('hemi has to be either "lh", "rh", "split", ' 'or "both"') # check `figure` parameter (This will be performed by PySurfer > 0.6) if figure is not None: if isinstance(figure, int): # use figure with specified id size_ = size if isinstance(size, (tuple, list)) else (size, size) figure = [mayavi.mlab.figure(figure, size=size_)] elif not isinstance(figure, (list, tuple)): figure = [figure] if not all(isinstance(f, mayavi.core.scene.Scene) for f in figure): raise TypeError('figure must be a mayavi scene or list of scenes') # convert control points to locations in colormap ctrl_pts, colormap = _limits_to_control_points(clim, stc.data, colormap) # Construct cmap manually if 'mne' and get cmap bounds # and triage transparent argument if colormap in ('mne', 'mne_analyze'): colormap = mne_analyze_colormap(ctrl_pts) scale_pts = [-1 * ctrl_pts[-1], 0, ctrl_pts[-1]] transparent = False if transparent is None else transparent else: scale_pts = ctrl_pts transparent = True if transparent is None else transparent subjects_dir = get_subjects_dir(subjects_dir=subjects_dir, raise_error=True) subject = _check_subject(stc.subject, subject, True) if hemi in ['both', 'split']: hemis = ['lh', 'rh'] else: hemis = [hemi] title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0]) with warnings.catch_warnings(record=True): # traits warnings brain = Brain(subject, hemi=hemi, surf=surface, curv=True, title=title, cortex=cortex, size=size, background=background, foreground=foreground, figure=figure, subjects_dir=subjects_dir, views=views, config_opts=config_opts) if time_unit == 's': times = stc.times else: # time_unit == 'ms' times = 1e3 * stc.times for hemi in hemis: hemi_idx = 0 if hemi == 'lh' else 1 if hemi_idx == 0: data = stc.data[:len(stc.vertices[0])] else: data = stc.data[len(stc.vertices[0]):] vertices = stc.vertices[hemi_idx] with warnings.catch_warnings(record=True): # traits warnings brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=smoothing_steps, time=times, time_label=time_label, alpha=alpha, hemi=hemi, colorbar=colorbar, **kwargs) # scale colormap and set time (index) to display brain.scale_data_colormap(fmin=scale_pts[0], fmid=scale_pts[1], fmax=scale_pts[2], transparent=transparent) if initial_time is not None: brain.set_time(initial_time) if time_viewer: TimeViewer(brain) return brain
# Read the MNE dSPM inverse solution hemi = 'lh' stc_fname = os.path.join('example_data', 'meg_source_estimate-' + hemi + '.stc') stc = read_stc(stc_fname) # data and vertices for which the data is defined data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) ############################################################################### # MNE will soon add the option for a "full" inverse to be computed and stored. # In the meantime, we can get the equivalent for our data based on the # surface normals: data_full = brain.geo['lh'].nn[vertices][..., np.newaxis] * data[:, np.newaxis] ############################################################################### # Now we add the data and set the initial time displayed to 100 ms: brain.add_data(data_full, colormap='hot', vertices=vertices, alpha=0.5, smoothing_steps=5, time=time, hemi=hemi, initial_time=0.1, vector_alpha=0.5) # scale colormap brain.scale_data_colormap(fmin=7, fmid=14, fmax=21, transparent=True) # viewer = TimeViewer(brain)
perm=perm) print('cluster counts:') print(clusters.label.groupby(level='regressor').count()) #%% load specific regressor r_name = 'response' show_measure = 'mlog10p' src_df_masked = ss.load_src_df(basefile, r_name, clusters, use_basefile=True) brain = Brain('fsaverage', 'both', 'inflated', cortex='low_contrast', subjects_dir=sv.subjects_dir, background='w', foreground='k') labels = sv.show_labels_as_data(src_df_masked, show_measure, brain, transparent=True) brain.scale_data_colormap(src_df_masked[show_measure].min(), src_df_masked[show_measure].median(), src_df_masked[show_measure].max(), True) #brain.scale_data_colormap(0.01, 0.025, 0.06, True) #tv = TimeViewer(brain)
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', colormap='auto', time_label='time=%0.2f ms', smoothing_steps=10, transparent=None, alpha=1.0, time_viewer=False, config_opts=None, subjects_dir=None, figure=None, views='lat', colorbar=True, clim='auto'): """Plot SourceEstimates with PySurfer Note: PySurfer currently needs the SUBJECTS_DIR environment variable, which will automatically be set by this function. Plotting multiple SourceEstimates with different values for subjects_dir will cause PySurfer to use the wrong FreeSurfer surfaces when using methods of the returned Brain object. It is therefore recommended to set the SUBJECTS_DIR environment variable or always use the same value for subjects_dir (within the same Python session). Parameters ---------- stc : SourceEstimates The source estimates to plot. subject : str | None The subject name corresponding to FreeSurfer environment variable SUBJECT. If None stc.subject will be used. If that is None, the environment will be used. surface : str The type of surface (inflated, white etc.). hemi : str, 'lh' | 'rh' | 'split' | 'both' The hemisphere to display. colormap : str | np.ndarray of float, shape(n_colors, 3 | 4) Name of colormap to use or a custom look up table. If array, must be (n x 3) or (n x 4) array for with RGB or RGBA values between 0 and 255. If 'auto', either 'hot' or 'mne' will be chosen based on whether 'lims' or 'pos_lims' are specified in `clim`. time_label : str How to print info about the time instant visualized. smoothing_steps : int The amount of smoothing transparent : bool | None If True, use a linear transparency between fmin and fmid. None will choose automatically based on colormap type. alpha : float Alpha value to apply globally to the overlay. time_viewer : bool Display time viewer GUI. config_opts : dict Keyword arguments for Brain initialization. See pysurfer.viz.Brain. subjects_dir : str The path to the freesurfer subjects reconstructions. It corresponds to Freesurfer environment variable SUBJECTS_DIR. figure : instance of mayavi.core.scene.Scene | list | int | None If None, a new figure will be created. If multiple views or a split view is requested, this must be a list of the appropriate length. If int is provided it will be used to identify the Mayavi figure by it's id or create a new figure with the given id. views : str | list View to use. See surfer.Brain(). colorbar : bool If True, display colorbar on scene. clim : str | dict Colorbar properties specification. If 'auto', set clim automatically based on data percentiles. If dict, should contain: ``kind`` : str Flag to specify type of limits. 'value' or 'percent'. ``lims`` : list | np.ndarray | tuple of float, 3 elements Note: Only use this if 'colormap' is not 'mne'. Left, middle, and right bound for colormap. ``pos_lims`` : list | np.ndarray | tuple of float, 3 elements Note: Only use this if 'colormap' is 'mne'. Left, middle, and right bound for colormap. Positive values will be mirrored directly across zero during colormap construction to obtain negative control points. Returns ------- brain : Brain A instance of surfer.viz.Brain from PySurfer. """ from surfer import Brain, TimeViewer config_opts = _handle_default('config_opts', config_opts) import mayavi from mayavi import mlab # import here to avoid circular import problem from ..source_estimate import SourceEstimate if not isinstance(stc, SourceEstimate): raise ValueError('stc has to be a surface source estimate') if hemi not in ['lh', 'rh', 'split', 'both']: raise ValueError('hemi has to be either "lh", "rh", "split", ' 'or "both"') n_split = 2 if hemi == 'split' else 1 n_views = 1 if isinstance(views, string_types) else len(views) if figure is not None: # use figure with specified id or create new figure if isinstance(figure, int): figure = mlab.figure(figure, size=(600, 600)) # make sure it is of the correct type if not isinstance(figure, list): figure = [figure] if not all(isinstance(f, mayavi.core.scene.Scene) for f in figure): raise TypeError('figure must be a mayavi scene or list of scenes') # make sure we have the right number of figures n_fig = len(figure) if not n_fig == n_split * n_views: raise RuntimeError('`figure` must be a list with the same ' 'number of elements as PySurfer plots that ' 'will be created (%s)' % n_split * n_views) # convert control points to locations in colormap ctrl_pts, colormap = _limits_to_control_points(clim, stc.data, colormap) # Construct cmap manually if 'mne' and get cmap bounds # and triage transparent argument if colormap in ('mne', 'mne_analyze'): colormap = mne_analyze_colormap(ctrl_pts) scale_pts = [-1 * ctrl_pts[-1], 0, ctrl_pts[-1]] transparent = False if transparent is None else transparent else: scale_pts = ctrl_pts transparent = True if transparent is None else transparent subjects_dir = get_subjects_dir(subjects_dir=subjects_dir, raise_error=True) subject = _check_subject(stc.subject, subject, True) if hemi in ['both', 'split']: hemis = ['lh', 'rh'] else: hemis = [hemi] title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0]) args = inspect.getargspec(Brain.__init__)[0] kwargs = dict(title=title, figure=figure, config_opts=config_opts, subjects_dir=subjects_dir) if 'views' in args: kwargs['views'] = views with warnings.catch_warnings(record=True): # traits warnings brain = Brain(subject, hemi, surface, **kwargs) for hemi in hemis: hemi_idx = 0 if hemi == 'lh' else 1 if hemi_idx == 0: data = stc.data[:len(stc.vertices[0])] else: data = stc.data[len(stc.vertices[0]):] vertices = stc.vertices[hemi_idx] time = 1e3 * stc.times with warnings.catch_warnings(record=True): # traits warnings brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=smoothing_steps, time=time, time_label=time_label, alpha=alpha, hemi=hemi, colorbar=colorbar) # scale colormap and set time (index) to display brain.scale_data_colormap(fmin=scale_pts[0], fmid=scale_pts[1], fmax=scale_pts[2], transparent=transparent) if time_viewer: TimeViewer(brain) return brain
# data and vertices for which the data is defined data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) # MNE will soon add the option for a "full" inverse to be computed and stored. # In the meantime, we can get the equivalent for our data based on the # surface normals: data_full = brain.geo['lh'].nn[vertices][..., np.newaxis] * data[:, np.newaxis] # Now we add the data and set the initial time displayed to 100 ms: brain.add_data(data_full, colormap='hot', vertices=vertices, alpha=0.5, smoothing_steps=5, time=time, hemi=hemi, initial_time=0.1, vector_alpha=0.5) # scale colormap brain.scale_data_colormap(fmin=7, fmid=14, fmax=21, transparent=True) viewer = TimeViewer(brain)
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', colormap='hot', time_label='time=%0.2f ms', smoothing_steps=10, fmin=5., fmid=10., fmax=15., transparent=True, alpha=1.0, time_viewer=False, config_opts={}, subjects_dir=None, figure=None, views='lat', colorbar=True): """Plot SourceEstimates with PySurfer Note: PySurfer currently needs the SUBJECTS_DIR environment variable, which will automatically be set by this function. Plotting multiple SourceEstimates with different values for subjects_dir will cause PySurfer to use the wrong FreeSurfer surfaces when using methods of the returned Brain object. It is therefore recommended to set the SUBJECTS_DIR environment variable or always use the same value for subjects_dir (within the same Python session). Parameters ---------- stc : SourceEstimates The source estimates to plot. subject : str | None The subject name corresponding to FreeSurfer environment variable SUBJECT. If None stc.subject will be used. If that is None, the environment will be used. surface : str The type of surface (inflated, white etc.). hemi : str, 'lh' | 'rh' | 'split' | 'both' The hemisphere to display. Using 'both' or 'split' requires PySurfer version 0.4 or above. colormap : str The type of colormap to use. time_label : str How to print info about the time instant visualized. smoothing_steps : int The amount of smoothing fmin : float The minimum value to display. fmid : float The middle value on the colormap. fmax : float The maximum value for the colormap. transparent : bool If True, use a linear transparency between fmin and fmid. alpha : float Alpha value to apply globally to the overlay. time_viewer : bool Display time viewer GUI. config_opts : dict Keyword arguments for Brain initialization. See pysurfer.viz.Brain. subjects_dir : str The path to the freesurfer subjects reconstructions. It corresponds to Freesurfer environment variable SUBJECTS_DIR. figure : instance of mayavi.core.scene.Scene | list | int | None If None, a new figure will be created. If multiple views or a split view is requested, this must be a list of the appropriate length. If int is provided it will be used to identify the Mayavi figure by it's id or create a new figure with the given id. views : str | list View to use. See surfer.Brain(). colorbar : bool If True, display colorbar on scene. Returns ------- brain : Brain A instance of surfer.viz.Brain from PySurfer. """ import surfer from surfer import Brain, TimeViewer if hemi in ['split', 'both'] and LooseVersion(surfer.__version__) < '0.4': raise NotImplementedError('hemi type "%s" not supported with your ' 'version of pysurfer. Please upgrade to ' 'version 0.4 or higher.' % hemi) try: import mayavi from mayavi import mlab except ImportError: from enthought import mayavi from enthought.mayavi import mlab # import here to avoid circular import problem from ..source_estimate import SourceEstimate if not isinstance(stc, SourceEstimate): raise ValueError('stc has to be a surface source estimate') if hemi not in ['lh', 'rh', 'split', 'both']: raise ValueError('hemi has to be either "lh", "rh", "split", ' 'or "both"') n_split = 2 if hemi == 'split' else 1 n_views = 1 if isinstance(views, string_types) else len(views) if figure is not None: # use figure with specified id or create new figure if isinstance(figure, int): figure = mlab.figure(figure, size=(600, 600)) # make sure it is of the correct type if not isinstance(figure, list): figure = [figure] if not all([isinstance(f, mayavi.core.scene.Scene) for f in figure]): raise TypeError('figure must be a mayavi scene or list of scenes') # make sure we have the right number of figures n_fig = len(figure) if not n_fig == n_split * n_views: raise RuntimeError('`figure` must be a list with the same ' 'number of elements as PySurfer plots that ' 'will be created (%s)' % n_split * n_views) subjects_dir = get_subjects_dir(subjects_dir=subjects_dir) subject = _check_subject(stc.subject, subject, False) if subject is None: if 'SUBJECT' in os.environ: subject = os.environ['SUBJECT'] else: raise ValueError('SUBJECT environment variable not set') if hemi in ['both', 'split']: hemis = ['lh', 'rh'] else: hemis = [hemi] title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0]) args = inspect.getargspec(Brain.__init__)[0] kwargs = dict(title=title, figure=figure, config_opts=config_opts, subjects_dir=subjects_dir) if 'views' in args: kwargs['views'] = views else: logger.info('PySurfer does not support "views" argument, please ' 'consider updating to a newer version (0.4 or later)') with warnings.catch_warnings(record=True): # traits warnings brain = Brain(subject, hemi, surface, **kwargs) for hemi in hemis: hemi_idx = 0 if hemi == 'lh' else 1 if hemi_idx == 0: data = stc.data[:len(stc.vertices[0])] else: data = stc.data[len(stc.vertices[0]):] vertices = stc.vertices[hemi_idx] time = 1e3 * stc.times with warnings.catch_warnings(record=True): # traits warnings brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=smoothing_steps, time=time, time_label=time_label, alpha=alpha, hemi=hemi, colorbar=colorbar) # scale colormap and set time (index) to display brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax, transparent=transparent) if time_viewer: TimeViewer(brain) return brain
# Read the MNE dSPM inverse solution hemi = 'lh' stc_fname = os.path.join('example_data', 'meg_source_estimate-' + hemi + '.stc') stc = read_stc(stc_fname) # data and vertices for which the data is defined data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) # MNE will soon add the option for a "full" inverse to be computed and stored. # In the meantime, we can get the equivalent for our data based on the # surface normals: data_full = brain.geo['lh'].nn[vertices][..., np.newaxis] * data[:, np.newaxis] # Now we add the data and set the initial time displayed to 100 ms: brain.add_data(data_full, colormap='hot', vertices=vertices, alpha=0.5, smoothing_steps=5, time=time, hemi=hemi, initial_time=0.1, vector_alpha=0.5, verbose=False) # scale colormap brain.scale_data_colormap(fmin=7, fmid=14, fmax=21, transparent=True, verbose=False) # viewer = TimeViewer(brain)
brain = Brain(subject_id=mri_partic,subjects_dir=shared_dir,surf='orig',hemi='both', background='white', size=(800, 600)) brain.add_annotation(parc) brain = stc.plot(surface='inflated', hemi='lh', subjects_dir=shared_dir) brain.set_data_time_index(300) # 221 for S2 brain.scale_data_colormap(fmin=-1e-12, fmid=1e-12, fmax=50e-12, transparent=True) brain.show_view('lateral') vertno_max, time_max = stc.get_peak(hemi='rh') surfer_kwargs = dict( subjects_dir=shared_dir, clim=dict(kind='value', lims=[8, 12, 15]), views='lateral', initial_time=time_max, time_unit='s', size=(800, 800), smoothing_steps=5)
time points in milliseconds """ time = 1e3 * np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1]) """ colormap to use """ colormap = 'hot' """ label for time annotation """ time_label = 'time=%0.2f ms' brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label, hemi=hemi) """ scale colormap and set time (index) to display """ brain.set_data_time_index(2) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) """ uncomment these lines to use the interactive TimeViewer GUI """ #from surfer import TimeViewer #viewer = TimeViewer(brain)
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', colormap='auto', time_label='time=%0.2f ms', smoothing_steps=10, fmin=None, fmid=None, fmax=None, transparent=None, alpha=1.0, time_viewer=False, config_opts={}, subjects_dir=None, figure=None, views='lat', colorbar=True, clim=None): """Plot SourceEstimates with PySurfer Note: PySurfer currently needs the SUBJECTS_DIR environment variable, which will automatically be set by this function. Plotting multiple SourceEstimates with different values for subjects_dir will cause PySurfer to use the wrong FreeSurfer surfaces when using methods of the returned Brain object. It is therefore recommended to set the SUBJECTS_DIR environment variable or always use the same value for subjects_dir (within the same Python session). Parameters ---------- stc : SourceEstimates The source estimates to plot. subject : str | None The subject name corresponding to FreeSurfer environment variable SUBJECT. If None stc.subject will be used. If that is None, the environment will be used. surface : str The type of surface (inflated, white etc.). hemi : str, 'lh' | 'rh' | 'split' | 'both' The hemisphere to display. colormap : str | np.ndarray of float, shape(n_colors, 3 | 4) Name of colormap to use or a custom look up table. If array, must be (n x 3) or (n x 4) array for with RGB or RGBA values between 0 and 255. If 'auto', either 'hot' or 'mne' will be chosen based on whether 'lims' or 'pos_lims' are specified in `clim`. time_label : str How to print info about the time instant visualized. smoothing_steps : int The amount of smoothing transparent : bool | None If True, use a linear transparency between fmin and fmid. None will choose automatically based on colormap type. alpha : float Alpha value to apply globally to the overlay. time_viewer : bool Display time viewer GUI. config_opts : dict Keyword arguments for Brain initialization. See pysurfer.viz.Brain. subjects_dir : str The path to the freesurfer subjects reconstructions. It corresponds to Freesurfer environment variable SUBJECTS_DIR. figure : instance of mayavi.core.scene.Scene | list | int | None If None, a new figure will be created. If multiple views or a split view is requested, this must be a list of the appropriate length. If int is provided it will be used to identify the Mayavi figure by it's id or create a new figure with the given id. views : str | list View to use. See surfer.Brain(). colorbar : bool If True, display colorbar on scene. clim : str | dict Colorbar properties specification. If 'auto', set clim automatically based on data percentiles. If dict, should contain: ``kind`` : str Flag to specify type of limits. 'value' or 'percent'. ``lims`` : list | np.ndarray | tuple of float, 3 elements Note: Only use this if 'colormap' is not 'mne'. Left, middle, and right bound for colormap. ``pos_lims`` : list | np.ndarray | tuple of float, 3 elements Note: Only use this if 'colormap' is 'mne'. Left, middle, and right bound for colormap. Positive values will be mirrored directly across zero during colormap construction to obtain negative control points. Returns ------- brain : Brain A instance of surfer.viz.Brain from PySurfer. """ from surfer import Brain, TimeViewer import mayavi from mayavi import mlab # import here to avoid circular import problem from ..source_estimate import SourceEstimate if not isinstance(stc, SourceEstimate): raise ValueError('stc has to be a surface source estimate') if hemi not in ['lh', 'rh', 'split', 'both']: raise ValueError('hemi has to be either "lh", "rh", "split", ' 'or "both"') n_split = 2 if hemi == 'split' else 1 n_views = 1 if isinstance(views, string_types) else len(views) if figure is not None: # use figure with specified id or create new figure if isinstance(figure, int): figure = mlab.figure(figure, size=(600, 600)) # make sure it is of the correct type if not isinstance(figure, list): figure = [figure] if not all(isinstance(f, mayavi.core.scene.Scene) for f in figure): raise TypeError('figure must be a mayavi scene or list of scenes') # make sure we have the right number of figures n_fig = len(figure) if not n_fig == n_split * n_views: raise RuntimeError('`figure` must be a list with the same ' 'number of elements as PySurfer plots that ' 'will be created (%s)' % n_split * n_views) # Check if using old fmin/fmid/fmax cmap behavior if clim is None: # Throw deprecation warning and indicate future behavior warnings.warn('Using fmin, fmid, fmax (either manually or by default)' ' is deprecated and will be removed in v0.10. Set' ' "clim" to define color limits. In v0.10, "clim" will' ' be set to "auto" by default.', DeprecationWarning) # Fill in any missing flim values from deprecated defaults dep_lims = [v or c for v, c in zip([fmin, fmid, fmax], [5., 10., 15.])] clim = dict(kind='value', lims=dep_lims) else: if any(f is not None for f in [fmin, fmid, fmax]): raise ValueError('"clim" overrides fmin, fmid, fmax') # convert control points to locations in colormap ctrl_pts, colormap = _limits_to_control_points(clim, stc.data, colormap) # Construct cmap manually if 'mne' and get cmap bounds # and triage transparent argument if colormap in ('mne', 'mne_analyze'): colormap = mne_analyze_colormap(ctrl_pts) scale_pts = [-1 * ctrl_pts[-1], 0, ctrl_pts[-1]] transparent = False if transparent is None else transparent else: scale_pts = ctrl_pts transparent = True if transparent is None else transparent subjects_dir = get_subjects_dir(subjects_dir=subjects_dir) subject = _check_subject(stc.subject, subject, False) if subject is None: if 'SUBJECT' in os.environ: subject = os.environ['SUBJECT'] else: raise ValueError('SUBJECT environment variable not set') if hemi in ['both', 'split']: hemis = ['lh', 'rh'] else: hemis = [hemi] title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0]) args = inspect.getargspec(Brain.__init__)[0] kwargs = dict(title=title, figure=figure, config_opts=config_opts, subjects_dir=subjects_dir) if 'views' in args: kwargs['views'] = views with warnings.catch_warnings(record=True): # traits warnings brain = Brain(subject, hemi, surface, **kwargs) for hemi in hemis: hemi_idx = 0 if hemi == 'lh' else 1 if hemi_idx == 0: data = stc.data[:len(stc.vertices[0])] else: data = stc.data[len(stc.vertices[0]):] vertices = stc.vertices[hemi_idx] time = 1e3 * stc.times with warnings.catch_warnings(record=True): # traits warnings brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=smoothing_steps, time=time, time_label=time_label, alpha=alpha, hemi=hemi, colorbar=colorbar) # scale colormap and set time (index) to display brain.scale_data_colormap(fmin=scale_pts[0], fmid=scale_pts[1], fmax=scale_pts[2], transparent=transparent) if time_viewer: TimeViewer(brain) return brain
def visualize_dle_colormaps(data, src, colormap='hot', smoothing_steps=50): mne_dle, sloreta_dle, dspm_dle, nn_dle = np.array([]), np.array( []), np.array([]), np.array([]) mne_sd, sloreta_sd, dspm_sd, nn_sd = np.array([]), np.array([]), np.array( []), np.array([]) mne_oa, sloreta_oa, dspm_oa, nn_oa = np.array([]), np.array([]), np.array( []), np.array([]) if HEMISPHERE == 'rh': hemi = 1 else: hemi = 0 offset = len(CHANNELS) + len(GROUND_TRUTH) + 1 # 5 diff = len(CHANNELS) + len(GROUND_TRUTH) ground_truth_verts = map(int, list(data[:, 0])) unique_verts = sorted(set(ground_truth_verts)) print(unique_verts) for c in range(0, len(CHANNELS)): avgs_dle = np.zeros(max(unique_verts) + 1) avgs_sd = np.zeros(max(unique_verts) + 1) avgs_oa = np.zeros(max(unique_verts) + 1) counts = np.zeros(max(unique_verts) + 1) for d in range(0, data.shape[0]): vert = int(data[d, 0]) avgs_dle[vert] += data[d, offset + c] avgs_sd[vert] += data[d, offset + c + diff] avgs_oa[vert] += data[d, offset + c + 2 * diff] counts[vert] += 1 avgs_dle = avgs_dle[counts != 0] * 100 avgs_sd = avgs_sd[counts != 0] * 100 avgs_oa = avgs_oa[counts != 0] counts = counts[counts != 0] if CHANNELS[c] == 'mne': mne_dle = np.true_divide(avgs_dle, counts) mne_sd = np.true_divide(avgs_sd, counts) mne_oa = np.true_divide(avgs_oa, counts) elif CHANNELS[c] == 'sloreta': sloreta_dle = np.true_divide(avgs_dle, counts) sloreta_sd = np.true_divide(avgs_sd, counts) sloreta_oa = np.true_divide(avgs_oa, counts) elif CHANNELS[c] == 'dspm': dspm_dle = np.true_divide(avgs_dle, counts) dspm_sd = np.true_divide(avgs_sd, counts) dspm_oa = np.true_divide(avgs_oa, counts) else: raise ValueError("Channel %s not understood" % CHANNELS[c]) for g in range(0, len(GROUND_TRUTH)): avgs_dle = np.zeros(max(unique_verts) + 1) avgs_sd = np.zeros(max(unique_verts) + 1) avgs_oa = np.zeros(max(unique_verts) + 1) counts = np.zeros(max(unique_verts) + 1) for d in range(0, data.shape[0]): vert = int(data[d, 0]) avgs_dle[vert] += data[d, offset + g + len(CHANNELS)] avgs_sd[vert] += data[d, offset + g + len(CHANNELS) + diff] avgs_oa[vert] += data[d, offset + g + len(CHANNELS) + 2 * diff] counts[vert] += 1 avgs_dle = avgs_dle[counts != 0] * 100 avgs_sd = avgs_sd[counts != 0] * 100 avgs_oa = avgs_oa[counts != 0] counts = counts[counts != 0] if GROUND_TRUTH[g] == 'stc': nn_dle = np.true_divide(avgs_dle, counts) nn_sd = np.true_divide(avgs_sd, counts) nn_oa = np.true_divide(avgs_oa, counts) ground_truth_verts = np.where(src[hemi]['inuse'])[0][unique_verts] print(ground_truth_verts) maxv_dle = math.ceil( max(mne_dle.max(), sloreta_dle.max(), dspm_dle.max(), nn_dle.max())) maxv_sd = math.ceil( max(mne_sd.max(), sloreta_sd.max(), dspm_sd.max(), nn_sd.max())) maxv_oa = math.ceil( max(mne_oa.max(), sloreta_oa.max(), dspm_oa.max(), nn_oa.max())) minv_dle = 0 minv_sd = math.floor( min(mne_sd.min(), sloreta_sd.min(), dspm_sd.min(), nn_sd.min())) minv_oa = math.floor( min(mne_oa.min(), sloreta_oa.min(), dspm_oa.min(), nn_oa.min())) midv_dle = (maxv_dle + minv_dle) // 2 midv_sd = (maxv_sd + minv_sd) // 2 midv_oa = (maxv_oa + minv_oa) // 2 print(mne_dle) print(mne_dle.sum() / len(mne_dle)) print(sloreta_dle) print(sloreta_dle.sum() / len(sloreta_dle)) print(dspm_dle) print(dspm_dle.sum() / len(dspm_dle)) print(nn_dle) print(nn_dle.sum() / len(nn_dle)) print(len(mne_dle)) for c in CHANNELS: brain = Brain(SUBJECT_NAME, HEMISPHERE, 'inflated', subjects_dir=SUBJECTS_DIR) if c == 'mne': brain.add_data(mne_dle, colormap=colormap, vertices=ground_truth_verts, smoothing_steps=smoothing_steps, hemi=HEMISPHERE) brain.scale_data_colormap(fmin=minv_dle, fmid=midv_dle, fmax=maxv_dle, transparent=True) mlab.savefig(join("visualization", "mne_dle_heatmap.png")) #mlab.show() brain = Brain(SUBJECT_NAME, HEMISPHERE, 'inflated', subjects_dir=SUBJECTS_DIR) brain.add_data(mne_sd, colormap=colormap, vertices=ground_truth_verts, smoothing_steps=smoothing_steps, hemi=HEMISPHERE) brain.scale_data_colormap(fmin=minv_sd, fmid=midv_sd, fmax=maxv_sd, transparent=True) mlab.savefig(join("visualization", "mne_sd_heatmap.png")) #mlab.show() brain = Brain(SUBJECT_NAME, HEMISPHERE, 'inflated', subjects_dir=SUBJECTS_DIR) brain.add_data(mne_oa, colormap=colormap, vertices=ground_truth_verts, smoothing_steps=smoothing_steps, hemi=HEMISPHERE) brain.scale_data_colormap(fmin=minv_oa, fmid=midv_oa, fmax=maxv_oa, transparent=True) mlab.savefig(join("visualization", "mne_oa_heatmap.png")) #mlab.show() elif c == 'sloreta': brain.add_data(sloreta_dle, colormap=colormap, vertices=ground_truth_verts, smoothing_steps=smoothing_steps, hemi=HEMISPHERE) brain.scale_data_colormap(fmin=minv_dle, fmid=midv_dle, fmax=maxv_dle, transparent=True) mlab.savefig(join("visualization", "sloreta_dle_heatmap.png")) #mlab.show() brain = Brain(SUBJECT_NAME, HEMISPHERE, 'inflated', subjects_dir=SUBJECTS_DIR) brain.add_data(sloreta_sd, colormap=colormap, vertices=ground_truth_verts, smoothing_steps=smoothing_steps, hemi=HEMISPHERE) brain.scale_data_colormap(fmin=minv_sd, fmid=midv_sd, fmax=maxv_sd, transparent=True) mlab.savefig(join("visualization", "sloreta_sd_heatmap.png")) #mlab.show() brain = Brain(SUBJECT_NAME, HEMISPHERE, 'inflated', subjects_dir=SUBJECTS_DIR) brain.add_data(sloreta_oa, colormap=colormap, vertices=ground_truth_verts, smoothing_steps=smoothing_steps, hemi=HEMISPHERE) brain.scale_data_colormap(fmin=minv_oa, fmid=midv_oa, fmax=maxv_oa, transparent=True) mlab.savefig(join("visualization", "sloreta_oa_heatmap.png")) #mlab.show() elif c == 'dspm': brain.add_data(dspm_dle, colormap=colormap, vertices=ground_truth_verts, smoothing_steps=smoothing_steps, hemi=HEMISPHERE) brain.scale_data_colormap(fmin=minv_dle, fmid=midv_dle, fmax=maxv_dle, transparent=True) mlab.savefig(join("visualization", "dspm_dle_heatmap.png")) #mlab.show() brain = Brain(SUBJECT_NAME, HEMISPHERE, 'inflated', subjects_dir=SUBJECTS_DIR) brain.add_data(dspm_sd, colormap=colormap, vertices=ground_truth_verts, smoothing_steps=smoothing_steps, hemi=HEMISPHERE) brain.scale_data_colormap(fmin=minv_sd, fmid=midv_sd, fmax=maxv_sd, transparent=True) mlab.savefig(join("visualization", "dspm_sd_heatmap.png")) #mlab.show() brain = Brain(SUBJECT_NAME, HEMISPHERE, 'inflated', subjects_dir=SUBJECTS_DIR) brain.add_data(dspm_oa, colormap=colormap, vertices=ground_truth_verts, smoothing_steps=smoothing_steps, hemi=HEMISPHERE) brain.scale_data_colormap(fmin=minv_oa, fmid=midv_oa, fmax=maxv_oa, transparent=True) mlab.savefig(join("visualization", "dspm_oa_heatmap.png")) #mlab.show() for g in GROUND_TRUTH: brain = Brain(SUBJECT_NAME, HEMISPHERE, 'inflated', subjects_dir=SUBJECTS_DIR) if g == 'stc': brain.add_data(nn_dle, colormap=colormap, vertices=ground_truth_verts, smoothing_steps=smoothing_steps, hemi=HEMISPHERE) brain.scale_data_colormap(fmin=minv_dle, fmid=midv_dle, fmax=maxv_dle, transparent=True) mlab.savefig(join("visualization", "nn_dle_heatmap.png")) #mlab.show() brain = Brain(SUBJECT_NAME, HEMISPHERE, 'inflated', subjects_dir=SUBJECTS_DIR) brain.add_data(nn_sd, colormap=colormap, vertices=ground_truth_verts, smoothing_steps=smoothing_steps, hemi=HEMISPHERE) brain.scale_data_colormap(fmin=minv_sd, fmid=midv_sd, fmax=maxv_sd, transparent=True) mlab.savefig(join("visualization", "nn_sd_heatmap.png")) #mlab.show() brain = Brain(SUBJECT_NAME, HEMISPHERE, 'inflated', subjects_dir=SUBJECTS_DIR) brain.add_data(nn_oa, colormap=colormap, vertices=ground_truth_verts, smoothing_steps=smoothing_steps, hemi=HEMISPHERE) brain.scale_data_colormap(fmin=minv_oa, fmid=midv_oa, fmax=maxv_oa, transparent=True) mlab.savefig(join("visualization", "nn_oa_heatmap.png"))