def plot_single_roi(roi): fsaverage = "fsaverage" hemi = "lh" surf = "inflated" t, p, str_names, labels = get_data() df = pd.DataFrame({'labs': str_names, 't': 0}) df.loc[df.labs == roi, 't'] = -4 data = df.t.values data = data[labels] brain = Brain(fsaverage, hemi, surf, background="white", views=['lateral', 'ventral', 'medial', 'frontal']) brain.add_data(data, -10, 11, thresh=None, colormap="RdBu_r", alpha=1) f = brain.save_montage(None, [['lateral', 'parietal'], ['medial', 'frontal']], border_size=0, colorbar=None) fig, a = plt.subplots() im = plt.imshow(f, cmap='RdBu_r') a.set(xticks=[], yticks=[]) sns.despine(bottom=True, left=True) cbar = fig.colorbar(im, ticks=[f.min(), (f.min() + 255) / 2, 255], orientation='horizontal', drawedges=False) cbar.ax.set_xticklabels(['-10', '0', '10']) a.set_title(roi) return f, data
def plot_V1V2(measure, hemi, clim): data, vert = extract_hemi_data(src_df, measure, time, hemi) brain = Brain('fsaverage', hemi, 'inflated', cortex='low_contrast', subjects_dir='mne_subjects', background='w', foreground='k') brain.add_data(data, vertices=vert, min=-clim[2], max=clim[2], time=[time], time_label=lambda t: '%d ms' % (t * 1000), colormap=morph_divergent_cmap(cmap, clim), hemi=hemi, smoothing_steps=5) labels = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1', hemi=hemi, regexp='[LR]_((V1)|(V2)).*') for label in labels: brain.add_label(label, borders=True) mlab.view(*views[hemi]) return brain
def plot_overlays_Fgroup(condition,modality,hemi,azimuth): brain = Brain(subject_id='fsaverage', hemi=hemi,surf='pial',cortex = 'low_contrast', size=(600, 600)) stc_fname = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/Plot_STATS/' +"_vs_".join(condition) +'/fmap'+ modality+ '_' +"_vs_".join(condition)+ '-' + hemi+'.stc') stc = read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] brain.add_data(data, thresh = 3.259,colormap='hot',alpha=1, vertices=vertices, smoothing_steps=3,hemi=hemi) brain.set_data_time_index(0) brain.scale_data_colormap(fmin=3.26, fmid=5.84, fmax= 8.42, transparent=False) brain.show_view(dict(azimuth=azimuth,elevation=None, distance=None)) # mayavi.mlab.view(azimuth=0, elevation=None, distance=None, focalpoint=None, # roll=None, reset_roll=True, figure=None) PlotDir = [] PlotDir = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/Plot_STATS/' + "_vs_".join(condition)) if not os.path.exists(PlotDir): os.makedirs(PlotDir) brain.save_image(PlotDir + '/Fmap_IcaCorr_' + modality + '_' + 'dSPM' + '_' + '_' + "_vs_".join(condition) + '_' + hemi + '_'+ str(azimuth)+ '_ico-5-fwd-fsaverage-'+'.png')
def test_movie(): """Test saving a movie of an MEG inverse solution """ # create and setup the Brain instance mlab.options.backend = 'auto' brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin'] brain.add_data(data, colormap='hot', vertices=stc['vertices'], smoothing_steps=10, time=time, time_label='time=%0.2f ms') brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) # save movies with different options tempdir = mkdtemp() try: dst = os.path.join(tempdir, 'test.mov') brain.save_movie(dst) brain.save_movie(dst, tmin=0.081, tmax=0.102) # test the number of frames in the movie sp = subprocess.Popen(('ffmpeg', '-i', 'test.mov', '-vcodec', 'copy', '-f', 'null', '/dev/null'), cwd=tempdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = sp.communicate() m = re.search('frame=\s*(\d+)\s', stderr) if not m: raise RuntimeError(stderr) n_frames = int(m.group(1)) assert_equal(n_frames, 3) finally: # clean up shutil.rmtree(tempdir) brain.close()
def plot_surface(vtx_data, subject_id, subjects_dir, hemi, surface, output_dir, prefix, l, u, cmap, center, thresh): # Open up a brain in pysurfer brain = Brain( subject_id, hemi, surface, subjects_dir=subjects_dir, config_opts=dict(background="white", height=665, width=800), ) if center: # Make sure the colorbar is centered if l ** 2 < u ** 2: l = u * -1 else: u = l * -1 # Create an empty brain if the values are all below threshold if np.max(vtx_data) < thresh: # Add your data to the brain brain.add_data(vtx_data * 0, l, u, thresh=thresh, colormap=cmap, alpha=0.0) # Otherwise, add the data appropriately! else: # Add your data to the brain brain.add_data(vtx_data, l, u, thresh=thresh, colormap=cmap, alpha=0.8) # Save the images for medial and lateral # putting a color bar on all of them brain.save_imageset(prefix=os.path.join(output_dir, prefix), views=views_list, colorbar=range(len(views_list)))
def test_probabilistic_labels(): """Test plotting of probabilistic labels.""" _set_backend() brain = Brain("fsaverage", "lh", "inflated", cortex="low_contrast") extra, subj_dir = _get_extra() brain.add_label("BA1" + extra, color="darkblue") brain.add_label("BA1" + extra, color="dodgerblue", scalar_thresh=.5) brain.add_label("BA45" + extra, color="firebrick", borders=True) brain.add_label("BA45" + extra, color="salmon", borders=True, scalar_thresh=.5) label_file = pjoin(subj_dir, "fsaverage", "label", "lh.BA6%s.label" % (extra, )) prob_field = np.zeros_like(brain.geo['lh'].x) ids, probs = nib.freesurfer.read_label(label_file, read_scalars=True) prob_field[ids] = probs brain.add_data(prob_field, thresh=1e-5) with warnings.catch_warnings(record=True): brain.data["colorbar"].number_of_colors = 10 brain.data["colorbar"].number_of_labels = 11 brain.close()
def test_movie(tmpdir): """Test saving a movie of an MEG inverse solution.""" import imageio if sys.version_info < (3, ): raise SkipTest('imageio ffmpeg requires Python 3') # create and setup the Brain instance _set_backend() brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin'] brain.add_data(data, colormap='hot', vertices=stc['vertices'], smoothing_steps=10, time=time, time_label='time=%0.2f ms') brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) # save movies with different options dst = str(tmpdir.join('test.mov')) # test the number of frames in the movie brain.save_movie(dst) frames = imageio.mimread(dst) assert len(frames) == 2 brain.save_movie(dst, time_dilation=10) frames = imageio.mimread(dst) assert len(frames) == 7 brain.save_movie(dst, tmin=0.081, tmax=0.102) frames = imageio.mimread(dst) assert len(frames) == 2 brain.close()
def plot_stc(stc): ''' Plot average stc within freq range. ''' brain = Brain(subj_id, 'split', 'inflated', size=(800, 400), views=['lat','med']) freqs = stc_av.times f_mask = [(freqs > low) & (freqs < high)] f_idx = np.nonzero(f_mask)[1] freqs_mask = freqs[f_mask] l_data = stc_av.lh_data l_data_mask = l_data[:,f_idx] l_data_mean = np.mean(l_data_mask, axis = 1) l_vertices = stc_av.lh_vertno r_data = stc_av.rh_data r_data_mask = r_data[:,f_idx] r_data_mean = np.mean(r_data_mask, axis = 1) r_vertices = stc_av.rh_vertno cmap = 'nipy_spectral' smooth = 10 time = np.mean(freqs_mask) brain.add_data(l_data_mean, colormap = cmap, vertices = l_vertices, smoothing_steps = smooth, time = time, colorbar = True, hemi = 'lh') brain.add_data(r_data_mean, colormap = cmap, vertices = r_vertices, smoothing_steps = smooth, time = time, colorbar = True, hemi = 'rh') return(brain)
def test_movie(): """Test saving a movie of an MEG inverse solution.""" import imageio # create and setup the Brain instance _set_backend() brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin'] brain.add_data(data, colormap='hot', vertices=stc['vertices'], smoothing_steps=10, time=time, time_label='time=%0.2f ms') brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) # save movies with different options tempdir = mkdtemp() try: dst = os.path.join(tempdir, 'test.mov') # test the number of frames in the movie brain.save_movie(dst) frames = imageio.mimread(dst) assert_equal(len(frames), 2) brain.save_movie(dst, time_dilation=10) frames = imageio.mimread(dst) assert_equal(len(frames), 7) brain.save_movie(dst, tmin=0.081, tmax=0.102) frames = imageio.mimread(dst) assert_equal(len(frames), 2) finally: # clean up if not (sys.platform == 'win32' and os.getenv('APPVEYOR', 'False') == 'True'): # cleanup problems shutil.rmtree(tempdir) brain.close()
def test_probabilistic_labels(): """Test plotting of probabilistic labels """ mlab.options.backend = 'test' brain = Brain("fsaverage", "lh", "inflated", config_opts=dict(cortex="low_contrast")) brain.add_label("BA1", color="darkblue") brain.add_label("BA1", color="dodgerblue", scalar_thresh=.5) brain.add_label("BA45", color="firebrick", borders=True) brain.add_label("BA45", color="salmon", borders=True, scalar_thresh=.5) label_file = pjoin(subj_dir, "fsaverage", "label", "lh.BA6.label") prob_field = np.zeros_like(brain._geo.x) ids, probs = nib.freesurfer.read_label(label_file, read_scalars=True) prob_field[ids] = probs brain.add_data(prob_field, thresh=1e-5) brain.data["colorbar"].number_of_colors = 10 brain.data["colorbar"].number_of_labels = 11 brain.close()
def test_meg_inverse(): """Test plotting of MEG inverse solution """ mlab.options.backend = 'test' brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) colormap = 'hot' def time_label(t): return 'time=%0.2f ms' % (1e3 * t) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) assert_equal(brain.data_dict['lh']['time_idx'], 0) brain.set_time(.1) assert_equal(brain.data_dict['lh']['time_idx'], 2) # viewer = TimeViewer(brain) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label, initial_time=.09, remove_existing=True) assert_equal(brain.data_dict['lh']['time_idx'], 1) brain.close()
def montage_plot(parameter, in_dir, task, fdr_correct=True, hemi='lh', input_hemisphere='', annot=None): ''' Make plots for parameter on the cortical surface using pysurf module - Arguments: a) parameter b) output directory c) task (inference or instructed) d) FDR correction (boolean) ''' out_dir = join(in_dir, 'pysurf_plots') slu.mkdir_p(out_dir) fsaverage = "fsaverage" surf = "inflated" t_data, p_data, str_names, labels = get_data(task, in_dir, input_hemisphere, hemi) if fdr_correct is True: data = fdr_filter(t_data, p_data, parameter) else: data = t_data[parameter].values data = data[labels] brain = Brain(fsaverage, hemi, surf, background="white", title=parameter + task) brain.add_data(data, -10, 10, thresh=None, colormap="RdBu_r", alpha=.8) if annot is not None: brain.add_annotation(annot, color='white', alpha=1) brain.save_imageset(join(out_dir, parameter + '_' + task + input_hemisphere), ['lateral', 'medial', 'par'], colorbar=None)
def plot_overlays_diff_group_window(condition,method,modality,hemi,window,azimuth,elevation): subject_id, surface = 'fsaverage', 'inflated' hemi = hemi brain = Brain(subject_id, hemi, surface, size=(600, 600)) stc_fname = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/BrainMaps/IcaCorr_Normalized' + modality + '_' + condition[0] + '-' + condition[1] + '_pick_oriNone_' + method + '_ico-5-fwd-fsaverage.stc-'+ hemi +'.stc') stcl_fname = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/BrainMaps/IcaCorr_Normalized' + modality + '_' + condition[0] + '-' + condition[1] + '_pick_oriNone_' + method + '_ico-5-fwd-fsaverage.stc-lh.stc') stcr_fname = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/BrainMaps/IcaCorr_Normalized' + modality + '_' + condition[0] + '-' + condition[1] + '_pick_oriNone_' + method + '_ico-5-fwd-fsaverage.stc-rh.stc') stcl = read_stc(stcl_fname) stcr = read_stc(stcr_fname) datal = stcl['data'] datar = stcr['data'] stc = read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1]) winstart = np.where(time < window[0])[0][-1] winend = np.where(time >= window[1])[0][0] meanval = np.mean(data[:,winstart:winend],1) meanvalr = np.mean(datar[:,winstart:winend],1) meanvall = np.mean(datal[:,winstart:winend],1) maxval = np.max([np.max(meanvalr),np.max(meanvall)]) minval = np.min([np.min(meanvalr),np.min(meanvall)]) fmin = -np.max(np.abs([maxval,minval]))*0.8 fmax = np.max(np.abs([maxval,minval]))*0.8 colormap = mne.viz.mne_analyze_colormap(limits=[fmin, fmin/3, fmin/3.1, fmax/3.1, fmax/3, fmax], format='mayavi') #colormap = 'jet' time_label = lambda t: 'time=%0.2f ms' % (0) brain.add_data(meanval, colormap=colormap, vertices=vertices, smoothing_steps=15, time=time, time_label=time_label, hemi=hemi) brain.scale_data_colormap(fmin=fmin, fmid=0, fmax=fmax, transparent=False) brain.show_view(dict(azimuth=azimuth,elevation=elevation, distance=None)) # mayavi.mlab.view(azimuth=0, elevation=None, distance=None, focalpoint=None, # roll=None, reset_roll=True, figure=None) PlotDir = [] PlotDir = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/mne_python/Plots/IcaCorr_Window_' + condition[0] + '-' + condition[1] + str(window[0]) + '-' + str(window[1])) if not os.path.exists(PlotDir): os.makedirs(PlotDir) brain.save_image(PlotDir + '/IcaCorr_' + modality + '_' + method + '_' + '_Normalized' + condition[0] + '-' + condition[1] + '_' + str(window[0]) + '-' + str(window[1]) + hemi + '_'+ str(azimuth)+ '_ico-5-fwd-fsaverage-'+'.png')
def test_movie(tmpdir): """Test saving a movie of an MEG inverse solution.""" import imageio if sys.version_info < (3,): raise SkipTest('imageio ffmpeg requires Python 3') # create and setup the Brain instance _set_backend() brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin'] brain.add_data(data, colormap='hot', vertices=stc['vertices'], smoothing_steps=10, time=time, time_label='time=%0.2f ms') brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) # save movies with different options dst = str(tmpdir.join('test.mov')) # test the number of frames in the movie brain.save_movie(dst) frames = imageio.mimread(dst) assert len(frames) == 2 brain.save_movie(dst, time_dilation=10) frames = imageio.mimread(dst) assert len(frames) == 7 brain.save_movie(dst, tmin=0.081, tmax=0.102) frames = imageio.mimread(dst) assert len(frames) == 2 brain.close()
def plot_parcel(num_nodes=600,numbers=[1],hemi='lh'): from surfer import Brain, io for n in numbers: brain = Brain("fsaverage", "%s" %(hemi), "pial",config_opts=dict(background="white")) image = io.project_volume_data('/home/despo/mb3152/random_nodes/%s/parcel_%s.nii'%(num_nodes,n),hemi, subject_id="fsaverage", projsum = 'max', smooth_fwhm = 0) brain.add_data(image,thresh=1,colormap = "spectral") brain.save_imageset('/home/despo/mb3152/random_nodes/%s/parcel_%s' %(num_nodes,n),['med','lat'],'jpg',colorbar= None) brain.close()
def test_data_limits(): """Test handling of data limits.""" _set_backend() brain = Brain(*std_args) surf_data = np.zeros(163842) pytest.raises(ValueError, brain.add_data, surf_data, 0, 0) brain.add_data(surf_data, 0, 1) brain.close()
def test_data(): """Test plotting of data """ mlab.options.backend = 'test' brain = Brain(*std_args) mri_file = pjoin(data_dir, 'resting_corr.nii.gz') reg_file = pjoin(data_dir, 'register.dat') surf_data = io.project_volume_data(mri_file, "lh", reg_file) brain.add_data(surf_data, -.7, .7, colormap="jet", alpha=.7)
def curvature_normalization(data_dir, subj): """Normalize the curvature map and plot contour over fsaverage.""" surf_dir = op.join(data_dir, subj, "surf") snap_dir = op.join(data_dir, subj, "snapshots") panels = [] for hemi in ["lh", "rh"]: # Load the curv values and apply registration to fsaverage curv_fname = op.join(surf_dir, "{}.curv".format(hemi)) curv_vals = nib.freesurfer.read_morph_data(curv_fname) subj_curv_vals = apply_surface_warp(data_dir, subj, hemi, curv_vals) subj_curv_binary = (subj_curv_vals > 0) # Load the template curvature norm_fname = op.join(data_dir, "fsaverage", "surf", "{}.curv".format(hemi)) norm_curv_vals = nib.freesurfer.read_morph_data(norm_fname) norm_curv_binary = (norm_curv_vals > 0) # Compute the curvature overlap image curv_overlap = np.zeros_like(norm_curv_binary, np.int) curv_overlap[norm_curv_binary & subj_curv_binary] = 1 curv_overlap[norm_curv_binary ^ subj_curv_binary] = 2 # Mask out the medial wall cortex_fname = op.join(data_dir, "fsaverage", "label", "{}.cortex.label".format(hemi)) cortex = nib.freesurfer.read_label(cortex_fname) medial_wall = ~np.in1d(np.arange(curv_overlap.size), cortex) curv_overlap[medial_wall] = 1 # Plot the curvature overlap image try: b = Brain("fsaverage", hemi, "inflated", background="white") except TypeError: # PySurfer <= 0.5 b = Brain("fsaverage", hemi, "inflated", config_opts=dict(background="white")) b.add_data(curv_overlap, min=0, max=2, colormap=[".9", ".45", "indianred"], colorbar=False) for view in ["lat", "med", "ven"]: b.show_view(view, distance="auto") panels.append(crop(b.screenshot())) b.close() # Make and save a figure f = multi_panel_brain_figure(panels) fname = op.join(snap_dir, "surface_registration.png") f.savefig(fname, bbox_inches="tight") plt.close(f)
def img2disc(data, foci_all=False, foci_dmn=False, labelfile=False, hemi='lh', filename='temp.png'): brain = Brain('fsaverage5', hemi, 'inflated', curv=False) brain.add_data(data, data.min(), data.max(), colormap="spectral", alpha=0.6) if labelfile: brain.add_label(labelfile, borders=True, color='grey') if foci_all: brain.add_foci(foci_all, coords_as_verts=True, scale_factor=.5, color='black') if foci_dmn: brain.add_foci(foci_dmn, coords_as_verts=True, scale_factor=.7, color='blue') brain.save_montage(filename, order=['lat', 'med'], orientation='h', border_size=10)
def add_cluster(clustermap, hemi, fsaverage): brain = Brain(fsaverage, hemi, surface, config_opts=dict(background="lightslategray", cortex="high_contrast")) brain.add_data(clustermap, colormap='spectral', alpha=.8) brain.data['colorbar'].number_of_colors = int(clustermap.max()) + 1 brain.data['colorbar'].number_of_labels = int(clustermap.max( )) + 1 ##because -1 denotes masked regions, cluster labels start at 1
def test_data(): """Test plotting of data """ mlab.options.backend = 'test' brain = Brain(*std_args) mri_file = pjoin(data_dir, 'resting_corr.nii.gz') reg_file = pjoin(data_dir, 'register.dat') surf_data = io.project_volume_data(mri_file, "lh", reg_file) brain.add_data(surf_data, -.7, .7, colormap="jet", alpha=.7) brain.close()
def pysurfer_plot_perm_ttest_results(vertices, vertives_values, max_vals, fol): T = max(vertices.keys()) for t in range(T+1): print(t) brain = Brain('fsaverage', 'split', 'pial', curv=False, offscreen=False, views=['lat', 'med'], title='{} ms'.format(t)) for hemi in ['rh', 'lh']: if t in vertices: brain.add_data(np.array(vertives_values[t][hemi]), hemi=hemi, min=1, max=max_vals, remove_existing=True, colormap="YlOrRd", alpha=1, vertices=np.array(vertices[t][hemi])) brain.save_image(os.path.join(fol, '{}.jpg'.format(t))) brain.close()
def test_data(): """Test plotting of data.""" _set_backend() brain = Brain(*std_args) mri_file = pjoin(data_dir, 'resting_corr.nii.gz') reg_file = pjoin(data_dir, 'register.dat') surf_data = io.project_volume_data(mri_file, "lh", reg_file) brain.add_data(surf_data, -.7, .7, colormap="jet", alpha=.7) brain.set_surf('white') brain.add_data([], vertices=np.array([], int)) brain.close()
def visualise_surface(zstat, hemi, surface='inflated', min_val=None, max_val=None, thr_val=0, cmap='autumn'): ''' VERY helpfully taken from the Pysurfer example gallery http://pysurfer.github.io/examples/plot_fmri_activation_volume.html Parameters ---------- zstat : pysurfer surface Surface projection of result_file to fsaverage brain prefix : save hemi : {'lh', 'rh'} string indicating left or right hemisphere surface : {'inflated', 'pial', 'white'} string indicating surface view min_val : minimum value to be shown (default is to calculate it from the data) max_val : maximum value to be shown (default is to calculate it from the data) thr_val : threshold value zero everything below this number (default is 0) cmap : matplotlib colormap (default is autumn) Returns ---------- brain : current pysurfer visualization window ''' """ Bring up the visualization window. """ brain = Brain("fsaverage", hemi, surface, config_opts={'background': 'white'}) """ Add zstat as an overlay """ brain.add_data(zstat, min=min_val, max=max_val, thresh=thr_val, colormap=cmap, colorbar=True) return brain
def vizBrain(data, subject_id='fsaverage5', hemi='lh', surface='pial', filename='brain.png'): brain = Brain(subject_id, hemi, surface) dmin = data.min() #+(data.std()/2) dmax = data.max() #-(data.std()/2) brain.add_data(data, dmin, dmax, colormap="hot", alpha=0.7) brain.save_montage(filename, order=['lat', 'med'], orientation='h', border_size=10)
def plot_group(hub,num_nodes,hemi='lh'): from surfer import Brain, io brain = Brain("fsaverage", "%s" %(hemi), "pial",config_opts=dict(background="white")) if hub == 'pc' or hub =='wmd': image = io.project_volume_data('/home/despo/mb3152/random_nodes/%s/group_%s.nii'%(num_nodes,hub),hemi, subject_id="fsaverage", projsum = 'max', smooth_fwhm = 20) brain.add_data(image,colormap = "Reds", colorbar= True) else: pc_image = io.project_volume_data('/home/despo/mb3152/random_nodes/%s/group_pc.nii'%(num_nodes),hemi, subject_id="fsaverage", projsum = 'max', smooth_fwhm = 20) wmd_image = io.project_volume_data('/home/despo/mb3152/random_nodes/%s/group_wmd.nii'%(num_nodes),hemi, subject_id="fsaverage", projsum = 'max', smooth_fwhm = 20) wmd_thresh = np.nanmean(wmd_image[wmd_image>0]) pc_thresh = np.nanmean(pc_image[pc_image >0]) #find connetor hub activity connector_hub_image = pc_image.copy() connector_hub_image[pc_image < pc_thresh] = 0. connector_hub_image[wmd_image < wmd_thresh] = 0. #find sattelite connector activty satellite_image = pc_image.copy() satellite_image[pc_image < pc_thresh] = 0. satellite_image[wmd_image > wmd_thresh] = 0. # find provincial hub activity provincial_hub_image = wmd_image.copy() provincial_hub_image[pc_image > pc_thresh] = 0. provincial_hub_image[wmd_image < wmd_thresh] = 0. node_image = pc_image.copy() node_image[provincial_hub_image > 0] = 0 node_image[connector_hub_image > 0] = 0 node_image[satellite_image > 0] = 0 node_image[node_image > 0] = 1 # brain.add_data(node_image,thresh= 0, max = 2, colormap = 'gray',hemi=hemi,smoothing_steps = 0) brain.add_data(connector_hub_image,thresh= np.nanmin(pc_image),max=pc_thresh + np.std(pc_image), colormap = 'Reds',hemi=hemi,smoothing_steps = 0) brain.add_data(satellite_image,thresh= np.nanmin(pc_image),max=pc_thresh + np.std(pc_image),colormap = 'autumn',hemi=hemi,smoothing_steps = 0) brain.add_data(provincial_hub_image,thresh=np.nanmin(wmd_image),max=wmd_thresh +np.std(wmd_image),colormap = 'Blues',hemi=hemi,smoothing_steps = 0)
def montage_plot(parameter, fdr_correct=True): fsaverage = "fsaverage" hemi = "lh" surf = "inflated" t_data, p_data, str_names, labels = get_data() if fdr_correct is True: data = fdr_filter(t_data, p_data, parameter) else: data = t_data[parameter].values data = data[labels] brain = Brain(fsaverage, hemi, surf, background="white") brain.add_data(data, -10, 10, thresh=None, colormap="RdBu_r", alpha=.8) montage = brain.save_montage( None, [['lateral', 'parietal'], ['medial', 'frontal']], border_size=0, colorbar=None) fig, a = plt.subplots(figsize=(24, 24)) im = plt.imshow(montage, cmap='RdBu_r') a.set(xticks=[], yticks=[]) sns.despine(bottom=True, left=True) cbar = fig.colorbar(im, ticks=[montage.min(), (montage.min() + 255) / 2, 255], orientation='horizontal', drawedges=False) cbar.ax.set_xticklabels(['-10', '0', '10']) plt.rcParams['pdf.fonttype'] = 3 plt.rcParams['ps.fonttype'] = 3 sns.set(style='ticks', font_scale=1, rc={ 'axes.labelsize': 6, 'axes.titlesize': 40, 'xtick.labelsize': 40, 'ytick.labelsize': 5, 'legend.fontsize': 250, 'axes.linewidth': 0.25, 'xtick.major.width': 0.25, 'ytick.major.width': 0.25, 'ytick.major.width': 0.25, 'ytick.major.width': 0.25, 'ytick.major.pad': 2.0, 'ytick.minor.pad': 2.0, 'xtick.major.pad': 2.0, 'xtick.minor.pad': 2.0, 'axes.labelpad': 4.0, }) a.set_title(parameter) return fig
def plot_data_surf_bh(in_file, colormap='jet', thr_list=[(None, None, None)],roi_coords=(), fwhm=0): ''' allows more flexible visualization than plot_rs_surf_bh thr_list = [(min, max, thresh)] colormap: matplotlib colormap (http://matplotlib.org/examples/color/colormaps_reference.html) ''' # in_file .nii to be projected on surface import os from surfer import Brain, io out_file_list = [] in_file_name = os.path.basename(in_file) reg_file = os.path.join(os.environ["FREESURFER_HOME"],"average/mni152.register.dat") for thr in thr_list: min_thr = thr[0] max_thr = thr[1] thr_thr = thr[2] brain = Brain("fsaverage", "split", "inflated", views=['lat', 'med'], config_opts=dict(background="white")) surf_data_lh = io.project_volume_data(in_file, "lh", reg_file, smooth_fwhm=fwhm) surf_data_rh = io.project_volume_data(in_file, "rh", reg_file, smooth_fwhm=fwhm) brain.add_data(surf_data_lh, min=min_thr, max=max_thr, thresh=thr_thr, colormap=colormap, hemi='lh') brain.add_data(surf_data_rh, min=min_thr, max=max_thr, thresh=thr_thr, colormap=colormap, hemi='rh') roi_str = '' if not(roi_coords == ()): if roi_coords[0] <0: #lh hemi_str = 'lh' else: hemi_str = 'rh' roi_str = '_roi_%s.%s.%s' % roi_coords brain.add_foci(roi_coords, map_surface="white", hemi=hemi_str, color='red', scale_factor=2) out_filename = os.path.join(os.getcwd(), in_file_name + roi_str + '_thr_%s' % min_thr + '.png') out_file_list += [out_filename] brain.save_image(out_filename) brain.close() return out_file_list
def test_meg_inverse(): """Test plotting of MEG inverse solution.""" mlab.options.backend = 'test' brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) colormap = 'hot' def time_label(t): return 'time=%0.2f ms' % (1e3 * t) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) assert_equal(brain.data_dict['lh']['time_idx'], 0) brain.set_time(.1) assert_equal(brain.data_dict['lh']['time_idx'], 2) # viewer = TimeViewer(brain) # multiple data layers assert_raises(ValueError, brain.add_data, data, vertices=vertices, time=time[:-1]) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label, initial_time=.09) assert_equal(brain.data_dict['lh']['time_idx'], 1) data_dicts = brain._data_dicts['lh'] assert_equal(len(data_dicts), 2) assert_equal(data_dicts[0]['time_idx'], 1) assert_equal(data_dicts[1]['time_idx'], 1) # shift time in both layers brain.set_data_time_index(0) assert_equal(data_dicts[0]['time_idx'], 0) assert_equal(data_dicts[1]['time_idx'], 0) # remove all layers brain.remove_data() assert_equal(brain._data_dicts['lh'], []) brain.close()
def plot_surface_vertices(common_space, morph_data, vertex_idx, aug_data, hemi, surf, view, cmap, save_path): b = Brain(common_space, hemi, surf, background="white", views=view) x, y, z = b.geo[hemi].coords.T coords = np.array([x, y, z]).T print('Number of vertices to be plotted {}'.format(len(vertex_idx))) if aug_data: aug_vertex_idx = get_nbrs(coords, vertex_idx) print('Number of vertices to be plotted after augmentation {}'.format( len(aug_vertex_idx))) morph_data[aug_vertex_idx] = 1 else: morph_data[vertex_idx] = 1 print(np.sum(morph_data)) b.add_data(morph_data, colormap=cmap, alpha=.9, colorbar=True) print(save_path) b.save_image(save_path)
def vis_ers_comp(group=None,phase=None,surf=None,cmap=None,split='lh'): mri_file = f'/mnt/c/Users/ACH/Desktop/ers_comps/{group}_{phase}/{group}_{phase}_ClusterEffEst.nii.gz' #find the file containing stats surf_data_lh = project_volume_data(mri_file, "lh", reg_file, projarg=[0, 1, .01], smooth_fwhm=1) #project to lh _max = .55 if phase == 'acquisition' else .3 for view in ['med','lat']: #lateral and medial views brain = Brain('MNI2009c', split, surf, cortex='low_contrast',size=1000, views=view, background='white', foreground=None) #initialize the brain object brain.add_data(surf_data_lh, 0, _max, center=None, hemi='lh', thresh=None, colorbar=False, colormap=cmap, transparent=True) #add lh data for vert, color in zip([115262,135014],['white','black']): #add focal ROIs brain.add_foci(vert,coords_as_verts=True,color=color,alpha=1) fname = f'/mnt/c/Users/ACH/Documents/gPPI/paper/pysurfer/{group}_{phase}_{view}.png' os.system(f'rm {fname}') brain.save_image(fname,antialiased=True)
def test_meg_inverse(): """Test plotting of MEG inverse solution """ mlab.options.backend = 'test' brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = 1e3 * np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1]) colormap = 'hot' time_label = 'time=%0.2f ms' brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label) brain.set_data_time_index(2) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True)
def visualise_surface(zstat, hemi, surface='inflated', min_val=None, max_val=None, thr_val=0, cmap='autumn'): ''' VERY helpfully taken from the Pysurfer example gallery http://pysurfer.github.io/examples/plot_fmri_activation_volume.html Parameters ---------- zstat : pysurfer surface Surface projection of result_file to fsaverage brain prefix : save hemi : {'lh', 'rh'} string indicating left or right hemisphere surface : {'inflated', 'pial', 'white'} string indicating surface view min_val : minimum value to be shown (default is to calculate it from the data) max_val : maximum value to be shown (default is to calculate it from the data) thr_val : threshold value zero everything below this number (default is 0) cmap : matplotlib colormap (default is autumn) Returns ---------- brain : current pysurfer visualization window ''' """ Bring up the visualization window. """ brain = Brain("fsaverage", hemi, surface, config_opts={'background':'white'}) """ Add zstat as an overlay """ brain.add_data(zstat, min=min_val, max=max_val, thresh=thr_val, colormap=cmap, colorbar=True) return brain
def plot_surface(vtx_data, subject_id, subjects_dir, hemi, surface, output_dir, prefix, l, u, cmap, center, thresh): # Open up a brain in pysurfer # brain = Brain(subject_id, hemi, surface, # subjects_dir = subjects_dir, # config_opts=dict(background="white", # height=665, # width=800)) brain = Brain(subject_id, hemi, surface, subjects_dir=subjects_dir, background="white") if center: # Make sure the colorbar is centered if l**2 < u**2: l = u * -1 else: u = l * -1 # Create an empty brain if the values are all below threshold if np.max(vtx_data) < thresh: # Add your data to the brain brain.add_data(vtx_data * 0, l, u, thresh=thresh, colormap=cmap, alpha=0.0) # Otherwise, add the data appropriately! else: # Add your data to the brain brain.add_data(vtx_data, l, u, thresh=thresh, colormap=cmap, alpha=.8) # Save the images for medial and lateral # putting a color bar on all of them brain.save_imageset(prefix=os.path.join(output_dir, prefix), views=views_list, colorbar=range(len(views_list)))
def plot_roi_mask(filename, threshold, hemi='lh'): """Create a colour mask for ROI""" roi = make_surface_mask(filename, threshold, hemi) brain = Brain('fsaverage', hemi, "pial", background="white", cortex='low_contrast', size=(1000, 600)) # replace nans so that they can be thresholded off when adding data roi[np.isnan(roi)] = -11 brain.add_data(roi, thresh=-10, min=0, max=1, colormap='tab10', colorbar=False, alpha=.8) return brain
def plot_single_roi(rois, views=['medial']): ''' Function to plot location of an array of Glasser labels in a specified view ''' in_dir = '/Users/kenohagena/flexrule/fmri/analyses/Sublevel_GLM_Climag_2020-01-20/GroupLevel/' # need random data fsaverage = "fsaverage" hemi = "lh" surf = "inflated" t, p, str_names, labels = get_data('inference', in_dir=in_dir, input_hemisphere='_avg', hemi='lh') df = pd.DataFrame({'labs': str_names[0], 't': 0}) df = df.set_index('labs') print(df) df.loc[rois, 't'] = 10 data = df.t.values data = data[labels] brain = Brain(fsaverage, hemi, surf, background="white", views=views) brain.add_data(data, -10, 11, thresh=None, colormap="RdBu_r", alpha=.9) return brain
def plot_overlays_diff_singlesubj(subject,condition,method,modality,hemi,indextime, azimuth): subject_id, surface = 'fsaverage', 'inflated' hemi = hemi brain = Brain(subject_id, hemi, surface, size=(600, 600)) stc_fname = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/' + subject + '/mne_python/STCS_diff/IcaCorr_' + condition[0] + '-' + condition[1] + '/' + modality + '_' + method + '_' + subject + '_' + condition[0] + '-' + condition[1] + '_' + '_ico-5-fwd-fsaverage-.stc-'+hemi+'.stc') stc = read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1]) colormap = 'hot' time_label = lambda t: 'time=%0.2f ms' % (t * 1e3) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=4, time=time, time_label=time_label, hemi=hemi) brain.set_data_time_index(indextime) brain.scale_data_colormap(fmin=0, fmid=2.5, fmax=5, transparent=True) brain.show_view(dict(azimuth=azimuth,elevation=None, distance=None)) # mayavi.mlab.view(azimuth=0, elevation=None, distance=None, focalpoint=None, # roll=None, reset_roll=True, figure=None) realtime = stc['tmin'] + stc['tstep']*indextime PlotDir = [] PlotDir = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/' + subject + '/mne_python/BrainMaps/IcaCorr_' + + condition[0] + '-' + condition[1]) if not os.path.exists(PlotDir): os.makedirs(PlotDir) brain.save_image(PlotDir + '/IcaCorr_' + modality + '_' + method + '_' + subject + '_' + condition[0] + '-' + condition[1] + '_' + str(realtime) + hemi + '_'+ str(azimuth)+ '_ico-5-fwd-fsaverage-' +'.png')
def test_meg_inverse(): """Test plotting of MEG inverse solution.""" mlab.options.backend = 'test' brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) colormap = 'hot' def time_label(t): return 'time=%0.2f ms' % (1e3 * t) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) assert_equal(brain.data_dict['lh']['time_idx'], 0) brain.set_time(.1) assert_equal(brain.data_dict['lh']['time_idx'], 2) # viewer = TimeViewer(brain) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=10, time=time, time_label=time_label, initial_time=.09, remove_existing=True) assert_equal(brain.data_dict['lh']['time_idx'], 1) brain.close()
def test_meg_inverse(): """Test plotting of MEG inverse solution.""" _set_backend() brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) vertices = stc['vertices'] colormap = 'hot' data = stc['data'] data_full = (brain.geo['lh'].nn[vertices][..., np.newaxis] * data[:, np.newaxis]) time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) def time_label(t): return 'time=%0.2f ms' % (1e3 * t) for use_data in (data, data_full): brain.add_data(use_data, colormap=colormap, vertices=vertices, smoothing_steps=1, time=time, time_label=time_label) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) assert_equal(brain.data_dict['lh']['time_idx'], 0) brain.set_time(.1) assert_equal(brain.data_dict['lh']['time_idx'], 2) # viewer = TimeViewer(brain) # multiple data layers assert_raises(ValueError, brain.add_data, data, vertices=vertices, time=time[:-1]) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=1, time=time, time_label=time_label, initial_time=.09) assert_equal(brain.data_dict['lh']['time_idx'], 1) data_dicts = brain._data_dicts['lh'] assert_equal(len(data_dicts), 3) assert_equal(data_dicts[0]['time_idx'], 1) assert_equal(data_dicts[1]['time_idx'], 1) # shift time in both layers brain.set_data_time_index(0) assert_equal(data_dicts[0]['time_idx'], 0) assert_equal(data_dicts[1]['time_idx'], 0) brain.set_data_smoothing_steps(2) # add second data-layer without time axis brain.add_data(data[:, 1], colormap=colormap, vertices=vertices, smoothing_steps=2) brain.set_data_time_index(2) assert_equal(len(data_dicts), 4) # change surface brain.set_surf('white') # remove all layers brain.remove_data() assert_equal(brain._data_dicts['lh'], []) brain.close()
def test_meg_inverse(): """Test plotting of MEG inverse solution.""" _set_backend() brain = Brain(*std_args) stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') stc = io.read_stc(stc_fname) vertices = stc['vertices'] colormap = 'hot' data = stc['data'] data_full = (brain.geo['lh'].nn[vertices][..., np.newaxis] * data[:, np.newaxis]) time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) def time_label(t): return 'time=%0.2f ms' % (1e3 * t) for use_data in (data, data_full): brain.add_data(use_data, colormap=colormap, vertices=vertices, smoothing_steps=1, time=time, time_label=time_label) brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) assert brain.data_dict['lh']['time_idx'] == 0 brain.set_time(.1) assert brain.data_dict['lh']['time_idx'] == 2 # viewer = TimeViewer(brain) # multiple data layers pytest.raises(ValueError, brain.add_data, data, vertices=vertices, time=time[:-1]) brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=1, time=time, time_label=time_label, initial_time=.09) assert brain.data_dict['lh']['time_idx'] == 1 data_dicts = brain._data_dicts['lh'] assert len(data_dicts) == 3 assert data_dicts[0]['time_idx'] == 1 assert data_dicts[1]['time_idx'] == 1 # shift time in both layers brain.set_data_time_index(0) assert data_dicts[0]['time_idx'] == 0 assert data_dicts[1]['time_idx'] == 0 brain.set_data_smoothing_steps(2) # add second data-layer without time axis brain.add_data(data[:, 1], colormap=colormap, vertices=vertices, smoothing_steps=2) brain.set_data_time_index(2) assert len(data_dicts) == 4 # change surface brain.set_surf('white') # remove all layers brain.remove_data() assert brain._data_dicts['lh'] == [] brain.close()
def plot_roi_weights(filename, threshold, hemi='lh'): roi = make_surface_mask(filename, threshold, hemi) # transform values within mask into random numbers roi = roi * np.random.uniform(-1, 1, size=roi.shape) brain = Brain('fsaverage', hemi, "inflated", background="white", cortex='low_contrast', size=(1000, 600)) # replace nans so that they can be thresholded off when adding data roi[np.isnan(roi)] = -11 brain.add_data(roi, thresh=-10, min=-1, max=1, colormap='bwr', colorbar=False) return brain
def test_probabilistic_labels(): """Test plotting of probabilistic labels.""" mlab.options.backend = 'test' brain = Brain("fsaverage", "lh", "inflated", cortex="low_contrast") brain.add_label("BA1", color="darkblue") brain.add_label("BA1", color="dodgerblue", scalar_thresh=.5) brain.add_label("BA45", color="firebrick", borders=True) brain.add_label("BA45", color="salmon", borders=True, scalar_thresh=.5) label_file = pjoin(subj_dir, "fsaverage", "label", "lh.BA6.label") prob_field = np.zeros_like(brain._geo.x) ids, probs = nib.freesurfer.read_label(label_file, read_scalars=True) prob_field[ids] = probs brain.add_data(prob_field, thresh=1e-5) brain.data["colorbar"].number_of_colors = 10 brain.data["colorbar"].number_of_labels = 11 brain.close()
def plot_brains(subjects, axes): for subj, subj_axes in zip(subjects, axes): exp = dict(pc="dots", ti="sticks")[subj[:2]] data_fname = "roi_cache/{}_{}_ifs.npz".format(subj, exp) with np.load(data_fname) as dobj: vox_ijk = dobj["vox_ijk"] res_fname = "decoding_analysis/{}_{}_ifs.pkz".format(subj, exp) res = moss.load_pkl(res_fname) prefs = res.prefs surf_vals = roi_to_surf(exp, subj, prefs, vox_ijk) lut = get_colormap(exp, False) for hemi, ax in zip(["lh", "rh"], subj_axes): b = Brain(subj, hemi, "inflated", background="white", cortex=("binary", -4, 8, False), size=(1000, 600)) b.add_data(surf_vals.ix[hemi].fillna(-11).values, colormap=lut, colorbar=False, thresh=-10, min=-1.75, max=1.75) mlab.view(*get_ifs_view(subj, hemi)) img = crop(b.screenshot()) ax.imshow(img, rasterized=True) ax.set(xticks=[], yticks=[]) b.close()
def test_probabilistic_labels(): """Test plotting of probabilistic labels.""" _set_backend() brain = Brain("fsaverage", "lh", "inflated", cortex="low_contrast") brain.add_label("BA1", color="darkblue") brain.add_label("BA1", color="dodgerblue", scalar_thresh=.5) brain.add_label("BA45", color="firebrick", borders=True) brain.add_label("BA45", color="salmon", borders=True, scalar_thresh=.5) subj_dir = utils._get_subjects_dir() label_file = pjoin(subj_dir, "fsaverage", "label", "lh.BA6.label") prob_field = np.zeros_like(brain.geo['lh'].x) ids, probs = nib.freesurfer.read_label(label_file, read_scalars=True) prob_field[ids] = probs brain.add_data(prob_field, thresh=1e-5) with warnings.catch_warnings(record=True): brain.data["colorbar"].number_of_colors = 10 brain.data["colorbar"].number_of_labels = 11 brain.close()
surf_data = surf_data.squeeze() surf_f = '%s/fsaverage5/surf/%s.orig' % (fsDir, hemi) surf_faces = nib.freesurfer.io.read_geometry(surf_f)[1] mask = np.zeros((10242)) while True in np.isnan(surf_data): nans = np.unique(np.where(np.isnan(surf_data))[0]) mask[nans] = 1 bad = [] good = {} for node in nans: neighbors = np.unique(surf_faces[np.where(np.in1d(surf_faces.ravel(), [node]).reshape(surf_faces.shape))[0]]) bad_neighbors = neighbors[np.unique(np.where(np.isnan(surf_data[neighbors]))[0])] good_neighbors = np.setdiff1d(neighbors, bad_neighbors) bad.append((node, len(bad_neighbors))) good[node] = good_neighbors bad = np.array(bad).transpose() nodes_with_least_bad_neighbors = bad[0][bad[1] == np.min(bad[1])] for node in nodes_with_least_bad_neighbors: surf_data[node] = np.mean(surf_data[list(good[node])], axis=0) surf_img._data = np.expand_dims(np.expand_dims(surf_data, axis=1), axis=1) brain = Brain('fsaverage5', hemi, 'pial', curv=False) brain.add_data(mask, mask.min(), mask.max(), colormap="spectral", alpha=0.6) brain.save_montage(mask_img_f, order=['lat', 'med'], orientation='h', border_size=10) np.save(mask_f, mask) else: surf_img._data = surf_data surf_img.to_filename(rest_interp_f)
config_opts=dict(background="lightslategray", cortex="high_contrast")) """ Read in the aparc annotation file """ aparc_file = op.join(os.environ["SUBJECTS_DIR"], subject_id, "label", hemi + ".aparc.a2009s.annot") labels, ctab, names = io.read_annot(aparc_file) """ Make a random vector of scalar data corresponding to a value for each region in the parcellation. """ roi_data = np.random.random(len(names)) """ Make a vector containing the data point at each vertex. """ vtx_data = np.zeros(len(labels)) for i, data in enumerate(roi_data): vtx_data[labels == i] = data """ Display these values on the brain. Use the hot colormap and add an alpha channel so the underlying anatomy is visible. """ brain.add_data(vtx_data, 0, 1, "hot", alpha=.7)
"""Bring up the visualization""" brain = Brain("fsaverage", "lh", "inflated", config_opts=dict(background="white")) """Project the volume file and return as an array""" mri_file = "auto_examples/data/resting_corr.nii.gz" reg_file = "auto_examples/data/register.dat" surf_data = io.project_volume_data(mri_file, "lh", reg_file) """ You can pass this array to the add_overlay method for a typical activation overlay (with thresholding, etc.) """ brain.add_overlay(surf_data, min=.3, max=.7, name="ang_corr") """ You can also pass it to add_data for more control over the visualzation. Here we'll plot the whole range of correlations """ brain.overlays["ang_corr"].remove() brain.add_data(surf_data, -.7, .7, colormap="jet", alpha=.7) """ This overlay represents resting-state correlations with a seed in left angular gyrus. Let's plot that seed. """ seed_coords = (-45, -67, 36) brain.add_foci(seed_coords, map_surface="white")
left_label_file = 'lh.aparc.a2009s.annot' right_label_file = 'rh.aparc.a2009s.annot' lh_aparc_file = os.path.join(label_dir, left_label_file) rh_aparc_file = os.path.join(label_dir, right_label_file) lh_labels, lh_ctab, lh_names = nb.freesurfer.read_annot(lh_aparc_file) rh_labels, rh_ctab, rh_names = nb.freesurfer.read_annot(rh_aparc_file) left_df = left_df.set_index('col').loc[lh_names].reset_index().fillna(0) right_df = right_df.set_index('col').loc[rh_names].reset_index().fillna(0) vtx_lh = left_df.val.values[lh_labels] vtx_lh[lh_labels == -1] = 0 vtx_rh = right_df.val.values[rh_labels] vtx_rh[rh_labels == -1] = 0 brain.add_data(vtx_lh, 0, 400, colormap="Reds", alpha=.8, hemi='lh') brain.add_annotation(lh_aparc_file, hemi='lh') brain.add_data(vtx_rh, 0, 400, colormap="Reds", alpha=.8, hemi='rh') brain.add_annotation(rh_aparc_file, hemi='rh', remove_existing=False) save_name = "../images/{}_brain.png".format(save_name) brain.save_image(save_name)
You can also threshold based on the probability of that region being at each vertex. """ brain.add_label("BA1", color="#2B8CBE", scalar_thresh=.5) """ It's also possible to plot just the label boundary, in case you wanted to overlay the label on an activation plot to asses whether it falls within that region. """ brain.add_label("BA45", color="#F0F8FF", borders=True, scalar_thresh=.5) brain.add_label("BA45", color="#F0F8FF", alpha=.3, scalar_thresh=.5) """ Finally, with a few tricks, you can display the whole probabilistic map. """ subjects_dir = environ["SUBJECTS_DIR"] label_file = join(subjects_dir, "fsaverage", "label", "lh.BA6.label") prob_field = np.zeros_like(brain._geo.x) ids, probs = io.read_label(label_file, read_scalars=True) prob_field[ids] = probs brain.add_data(prob_field, thresh=1e-5, colormap="RdPu") """ Adjust the colorbar to represent the coarseness of the probability estimates more closely. """ brain.data["colorbar"].number_of_colors = 10 brain.data["colorbar"].number_of_labels = 11
brain = Brain(subject_id, hemi, surface, background="white") """ Read in the Buckner resting state network annotation. (This requires a relatively recent version of Freesurfer, or it can be downloaded separately). """ aparc_file = os.path.join(os.environ["SUBJECTS_DIR"], subject_id, "label", hemi + ".Yeo2011_17Networks_N1000.annot") labels, ctab, names = nib.freesurfer.read_annot(aparc_file) """ Make a random vector of scalar data corresponding to a value for each region in the parcellation. """ rs = np.random.RandomState(4) roi_data = rs.uniform(.5, .75, size=len(names)) """ Make a vector containing the data point at each vertex. """ vtx_data = roi_data[labels] """ Display these values on the brain. Use a sequential colormap (assuming these data move from low to high values), and add an alpha channel so the underlying anatomy is visible. """ brain.add_data(vtx_data, .5, .75, colormap="GnBu", alpha=.8)
brain = Brain(subject_id, hemi, surface, config_opts=dict(background="white")) """ Read in the annot file """ aparc_file = op.abspath("%s.Lausanne1015_fsavg.annot" % hemi) labels, ctab, names = nb.freesurfer.read_annot(aparc_file) print(names) print(len(names)) """ Make a random vector of scalar data corresponding to a value for each region in the parcellation. """ rs = np.random.randint(0,2,size=len(names)) roi_data = rs """ Make a vector containing the data point at each vertex. """ vtx_data = roi_data[labels] """ Display these values on the brain. Use a sequential colormap (assuming these data move from low to high values), and add an alpha channel so the underlying anatomy is visible. """ brain.add_data(vtx_data, 0, 2, colormap="GnBu", alpha=.8)
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', colormap='auto', time_label='auto', smoothing_steps=10, transparent=None, alpha=1.0, time_viewer=False, config_opts=None, subjects_dir=None, figure=None, views='lat', colorbar=True, clim='auto', cortex="classic", size=800, background="black", foreground="white", initial_time=None, time_unit=None): """Plot SourceEstimates with PySurfer Note: PySurfer currently needs the SUBJECTS_DIR environment variable, which will automatically be set by this function. Plotting multiple SourceEstimates with different values for subjects_dir will cause PySurfer to use the wrong FreeSurfer surfaces when using methods of the returned Brain object. It is therefore recommended to set the SUBJECTS_DIR environment variable or always use the same value for subjects_dir (within the same Python session). Parameters ---------- stc : SourceEstimates The source estimates to plot. subject : str | None The subject name corresponding to FreeSurfer environment variable SUBJECT. If None stc.subject will be used. If that is None, the environment will be used. surface : str The type of surface (inflated, white etc.). hemi : str, 'lh' | 'rh' | 'split' | 'both' The hemisphere to display. colormap : str | np.ndarray of float, shape(n_colors, 3 | 4) Name of colormap to use or a custom look up table. If array, must be (n x 3) or (n x 4) array for with RGB or RGBA values between 0 and 255. If 'auto', either 'hot' or 'mne' will be chosen based on whether 'lims' or 'pos_lims' are specified in `clim`. time_label : str | callable | None Format of the time label (a format string, a function that maps floating point time values to strings, or None for no label). The default is ``time=%0.2f ms``. smoothing_steps : int The amount of smoothing transparent : bool | None If True, use a linear transparency between fmin and fmid. None will choose automatically based on colormap type. alpha : float Alpha value to apply globally to the overlay. time_viewer : bool Display time viewer GUI. config_opts : dict Deprecated parameter. subjects_dir : str The path to the freesurfer subjects reconstructions. It corresponds to Freesurfer environment variable SUBJECTS_DIR. figure : instance of mayavi.core.scene.Scene | list | int | None If None, a new figure will be created. If multiple views or a split view is requested, this must be a list of the appropriate length. If int is provided it will be used to identify the Mayavi figure by it's id or create a new figure with the given id. views : str | list View to use. See surfer.Brain(). colorbar : bool If True, display colorbar on scene. clim : str | dict Colorbar properties specification. If 'auto', set clim automatically based on data percentiles. If dict, should contain: ``kind`` : str Flag to specify type of limits. 'value' or 'percent'. ``lims`` : list | np.ndarray | tuple of float, 3 elements Note: Only use this if 'colormap' is not 'mne'. Left, middle, and right bound for colormap. ``pos_lims`` : list | np.ndarray | tuple of float, 3 elements Note: Only use this if 'colormap' is 'mne'. Left, middle, and right bound for colormap. Positive values will be mirrored directly across zero during colormap construction to obtain negative control points. cortex : str or tuple specifies how binarized curvature values are rendered. either the name of a preset PySurfer cortex colorscheme (one of 'classic', 'bone', 'low_contrast', or 'high_contrast'), or the name of mayavi colormap, or a tuple with values (colormap, min, max, reverse) to fully specify the curvature colors. size : float or pair of floats The size of the window, in pixels. can be one number to specify a square window, or the (width, height) of a rectangular window. background : matplotlib color Color of the background of the display window. foreground : matplotlib color Color of the foreground of the display window. initial_time : float | None The time to display on the plot initially. ``None`` to display the first time sample (default). time_unit : 's' | 'ms' Whether time is represented in seconds (expected by PySurfer) or milliseconds. The current default is 'ms', but will change to 's' in MNE 0.14. To avoid a deprecation warning specify ``time_unit`` explicitly. Returns ------- brain : Brain A instance of surfer.viz.Brain from PySurfer. """ import surfer from surfer import Brain, TimeViewer import mayavi # import here to avoid circular import problem from ..source_estimate import SourceEstimate surfer_version = LooseVersion(surfer.__version__) v06 = LooseVersion('0.6') if surfer_version < v06: raise ImportError("This function requires PySurfer 0.6 (you are " "running version %s). You can update PySurfer " "using:\n\n $ pip install -U pysurfer" % surfer.__version__) if initial_time is not None and surfer_version > v06: kwargs = {'initial_time': initial_time} initial_time = None # don't set it twice else: kwargs = {} if time_unit is None: warn("The time_unit parameter default will change from 'ms' to 's' " "in MNE 0.14. To avoid this warning specify the parameter " "explicitly.", DeprecationWarning) time_unit = 'ms' elif time_unit not in ('s', 'ms'): raise ValueError("time_unit needs to be 's' or 'ms', got %r" % (time_unit,)) if time_label == 'auto': if time_unit == 'ms': time_label = 'time=%0.2f ms' else: def time_label(t): return 'time=%0.2f ms' % (t * 1e3) if not isinstance(stc, SourceEstimate): raise ValueError('stc has to be a surface source estimate') if hemi not in ['lh', 'rh', 'split', 'both']: raise ValueError('hemi has to be either "lh", "rh", "split", ' 'or "both"') # check `figure` parameter (This will be performed by PySurfer > 0.6) if figure is not None: if isinstance(figure, int): # use figure with specified id size_ = size if isinstance(size, (tuple, list)) else (size, size) figure = [mayavi.mlab.figure(figure, size=size_)] elif not isinstance(figure, (list, tuple)): figure = [figure] if not all(isinstance(f, mayavi.core.scene.Scene) for f in figure): raise TypeError('figure must be a mayavi scene or list of scenes') # convert control points to locations in colormap ctrl_pts, colormap = _limits_to_control_points(clim, stc.data, colormap) # Construct cmap manually if 'mne' and get cmap bounds # and triage transparent argument if colormap in ('mne', 'mne_analyze'): colormap = mne_analyze_colormap(ctrl_pts) scale_pts = [-1 * ctrl_pts[-1], 0, ctrl_pts[-1]] transparent = False if transparent is None else transparent else: scale_pts = ctrl_pts transparent = True if transparent is None else transparent subjects_dir = get_subjects_dir(subjects_dir=subjects_dir, raise_error=True) subject = _check_subject(stc.subject, subject, True) if hemi in ['both', 'split']: hemis = ['lh', 'rh'] else: hemis = [hemi] title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0]) with warnings.catch_warnings(record=True): # traits warnings brain = Brain(subject, hemi, surface, True, title, cortex, size, background, foreground, figure, subjects_dir, views, config_opts=config_opts) if time_unit == 's': times = stc.times else: # time_unit == 'ms' times = 1e3 * stc.times for hemi in hemis: hemi_idx = 0 if hemi == 'lh' else 1 if hemi_idx == 0: data = stc.data[:len(stc.vertices[0])] else: data = stc.data[len(stc.vertices[0]):] vertices = stc.vertices[hemi_idx] with warnings.catch_warnings(record=True): # traits warnings brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=smoothing_steps, time=times, time_label=time_label, alpha=alpha, hemi=hemi, colorbar=colorbar, **kwargs) # scale colormap and set time (index) to display brain.scale_data_colormap(fmin=scale_pts[0], fmid=scale_pts[1], fmax=scale_pts[2], transparent=transparent) if initial_time is not None: brain.set_time(initial_time) if time_viewer: TimeViewer(brain) return brain
def vizBrain(data, subject_id='fsaverage5', hemi='lh', surface='pial', filename='brain.png'): brain = Brain(subject_id, hemi, surface) dmin = data.min()#+(data.std()/2) dmax = data.max()#-(data.std()/2) brain.add_data(data, dmin, dmax, colormap="hot", alpha=0.7) brain.save_montage(filename, order=['lat', 'med'], orientation='h', border_size=10)
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', colormap='auto', time_label='time=%0.2f ms', smoothing_steps=10, fmin=None, fmid=None, fmax=None, transparent=None, alpha=1.0, time_viewer=False, config_opts={}, subjects_dir=None, figure=None, views='lat', colorbar=True, clim=None): """Plot SourceEstimates with PySurfer Note: PySurfer currently needs the SUBJECTS_DIR environment variable, which will automatically be set by this function. Plotting multiple SourceEstimates with different values for subjects_dir will cause PySurfer to use the wrong FreeSurfer surfaces when using methods of the returned Brain object. It is therefore recommended to set the SUBJECTS_DIR environment variable or always use the same value for subjects_dir (within the same Python session). Parameters ---------- stc : SourceEstimates The source estimates to plot. subject : str | None The subject name corresponding to FreeSurfer environment variable SUBJECT. If None stc.subject will be used. If that is None, the environment will be used. surface : str The type of surface (inflated, white etc.). hemi : str, 'lh' | 'rh' | 'split' | 'both' The hemisphere to display. colormap : str | np.ndarray of float, shape(n_colors, 3 | 4) Name of colormap to use or a custom look up table. If array, must be (n x 3) or (n x 4) array for with RGB or RGBA values between 0 and 255. If 'auto', either 'hot' or 'mne' will be chosen based on whether 'lims' or 'pos_lims' are specified in `clim`. time_label : str How to print info about the time instant visualized. smoothing_steps : int The amount of smoothing transparent : bool | None If True, use a linear transparency between fmin and fmid. None will choose automatically based on colormap type. alpha : float Alpha value to apply globally to the overlay. time_viewer : bool Display time viewer GUI. config_opts : dict Keyword arguments for Brain initialization. See pysurfer.viz.Brain. subjects_dir : str The path to the freesurfer subjects reconstructions. It corresponds to Freesurfer environment variable SUBJECTS_DIR. figure : instance of mayavi.core.scene.Scene | list | int | None If None, a new figure will be created. If multiple views or a split view is requested, this must be a list of the appropriate length. If int is provided it will be used to identify the Mayavi figure by it's id or create a new figure with the given id. views : str | list View to use. See surfer.Brain(). colorbar : bool If True, display colorbar on scene. clim : str | dict Colorbar properties specification. If 'auto', set clim automatically based on data percentiles. If dict, should contain: ``kind`` : str Flag to specify type of limits. 'value' or 'percent'. ``lims`` : list | np.ndarray | tuple of float, 3 elements Note: Only use this if 'colormap' is not 'mne'. Left, middle, and right bound for colormap. ``pos_lims`` : list | np.ndarray | tuple of float, 3 elements Note: Only use this if 'colormap' is 'mne'. Left, middle, and right bound for colormap. Positive values will be mirrored directly across zero during colormap construction to obtain negative control points. Returns ------- brain : Brain A instance of surfer.viz.Brain from PySurfer. """ from surfer import Brain, TimeViewer import mayavi from mayavi import mlab # import here to avoid circular import problem from ..source_estimate import SourceEstimate if not isinstance(stc, SourceEstimate): raise ValueError('stc has to be a surface source estimate') if hemi not in ['lh', 'rh', 'split', 'both']: raise ValueError('hemi has to be either "lh", "rh", "split", ' 'or "both"') n_split = 2 if hemi == 'split' else 1 n_views = 1 if isinstance(views, string_types) else len(views) if figure is not None: # use figure with specified id or create new figure if isinstance(figure, int): figure = mlab.figure(figure, size=(600, 600)) # make sure it is of the correct type if not isinstance(figure, list): figure = [figure] if not all(isinstance(f, mayavi.core.scene.Scene) for f in figure): raise TypeError('figure must be a mayavi scene or list of scenes') # make sure we have the right number of figures n_fig = len(figure) if not n_fig == n_split * n_views: raise RuntimeError('`figure` must be a list with the same ' 'number of elements as PySurfer plots that ' 'will be created (%s)' % n_split * n_views) # Check if using old fmin/fmid/fmax cmap behavior if clim is None: # Throw deprecation warning and indicate future behavior warnings.warn('Using fmin, fmid, fmax (either manually or by default)' ' is deprecated and will be removed in v0.10. Set' ' "clim" to define color limits. In v0.10, "clim" will' ' be set to "auto" by default.', DeprecationWarning) # Fill in any missing flim values from deprecated defaults dep_lims = [v or c for v, c in zip([fmin, fmid, fmax], [5., 10., 15.])] clim = dict(kind='value', lims=dep_lims) else: if any(f is not None for f in [fmin, fmid, fmax]): raise ValueError('"clim" overrides fmin, fmid, fmax') # convert control points to locations in colormap ctrl_pts, colormap = _limits_to_control_points(clim, stc.data, colormap) # Construct cmap manually if 'mne' and get cmap bounds # and triage transparent argument if colormap in ('mne', 'mne_analyze'): colormap = mne_analyze_colormap(ctrl_pts) scale_pts = [-1 * ctrl_pts[-1], 0, ctrl_pts[-1]] transparent = False if transparent is None else transparent else: scale_pts = ctrl_pts transparent = True if transparent is None else transparent subjects_dir = get_subjects_dir(subjects_dir=subjects_dir) subject = _check_subject(stc.subject, subject, False) if subject is None: if 'SUBJECT' in os.environ: subject = os.environ['SUBJECT'] else: raise ValueError('SUBJECT environment variable not set') if hemi in ['both', 'split']: hemis = ['lh', 'rh'] else: hemis = [hemi] title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0]) args = inspect.getargspec(Brain.__init__)[0] kwargs = dict(title=title, figure=figure, config_opts=config_opts, subjects_dir=subjects_dir) if 'views' in args: kwargs['views'] = views with warnings.catch_warnings(record=True): # traits warnings brain = Brain(subject, hemi, surface, **kwargs) for hemi in hemis: hemi_idx = 0 if hemi == 'lh' else 1 if hemi_idx == 0: data = stc.data[:len(stc.vertices[0])] else: data = stc.data[len(stc.vertices[0]):] vertices = stc.vertices[hemi_idx] time = 1e3 * stc.times with warnings.catch_warnings(record=True): # traits warnings brain.add_data(data, colormap=colormap, vertices=vertices, smoothing_steps=smoothing_steps, time=time, time_label=time_label, alpha=alpha, hemi=hemi, colorbar=colorbar) # scale colormap and set time (index) to display brain.scale_data_colormap(fmin=scale_pts[0], fmid=scale_pts[1], fmax=scale_pts[2], transparent=transparent) if time_viewer: TimeViewer(brain) return brain
# Read the MNE dSPM inverse solution hemi = 'lh' stc_fname = os.path.join('example_data', 'meg_source_estimate-' + hemi + '.stc') stc = read_stc(stc_fname) # data and vertices for which the data is defined data = stc['data'] vertices = stc['vertices'] time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], data.shape[1], endpoint=False) # MNE will soon add the option for a "full" inverse to be computed and stored. # In the meantime, we can get the equivalent for our data based on the # surface normals: data_full = brain.geo['lh'].nn[vertices][..., np.newaxis] * data[:, np.newaxis] # Now we add the data and set the initial time displayed to 100 ms: brain.add_data(data_full, colormap='hot', vertices=vertices, alpha=0.5, smoothing_steps=5, time=time, hemi=hemi, initial_time=0.1, vector_alpha=0.5, verbose=False) # scale colormap brain.scale_data_colormap(fmin=7, fmid=14, fmax=21, transparent=True, verbose=False) # viewer = TimeViewer(brain)