def test_grow_labels(): """Test generation of circular source labels""" seeds = [0, 50000] # these were chosen manually in mne_analyze should_be_in = [[49, 227], [51207, 48794]] hemis = [0, 1] names = ['aneurism', 'tumor'] labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, n_jobs=2, names=names) tgt_names = ['aneurism-lh', 'tumor-rh'] tgt_hemis = ['lh', 'rh'] for label, seed, hemi, sh, name in zip(labels, seeds, tgt_hemis, should_be_in, tgt_names): assert_true(np.any(label.vertices == seed)) assert_true(np.all(in1d(sh, label.vertices))) assert_equal(label.hemi, hemi) assert_equal(label.name, name) # grow labels with and without overlap seeds = [57532, [58887, 6304]] l01, l02 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir) seeds = [57532, [58887, 6304]] l11, l12 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir, overlap=False) # test label naming assert_equal(l01.name, 'Label_0-lh') assert_equal(l02.name, 'Label_1-lh') assert_equal(l11.name, 'Label_0-lh') assert_equal(l12.name, 'Label_1-lh') # make sure set 1 does not overlap overlap = np.intersect1d(l11.vertices, l12.vertices, True) assert_array_equal(overlap, []) # make sure both sets cover the same vertices l0 = l01 + l02 l1 = l11 + l12 assert_array_equal(l1.vertices, l0.vertices)
def apply_stand(fn_stc, radius=5.0, min_subject='fsaverage', tmin=0.1, tmax=0.5): """ Standardize the size of the selected ROIs. Parameters ---------- fn_stc: string or list The path of the common STCs. radius: the radius of every ROI. tmin, tmax: float (s). The interest time range. """ fnlist = get_files_from_list(fn_stc) # loop across all filenames for fn_stc in fnlist: stc_path = fn_stc[:fn_stc.rfind('-')] stc = mne.read_source_estimate(fn_stc, subject=min_subject) stc = stc.crop(tmin, tmax) #min_path = subjects_dir + '/%s' %min_subject # extract the subject infromation from the file name source_path = stc_path + '/ROIs/' stan_path = stc_path + '/standard/' reset_directory(stan_path) list_dirs = os.walk(source_path) for root, dirs, files in list_dirs: for f in files: label_fname = os.path.join(root, f) label = mne.read_label(label_fname) stc_label = stc.in_label(label) src_pow = np.sum(stc_label.data ** 2, axis=1) if label.hemi == 'lh': # Get the max MNE value within each ROI seed_vertno = stc_label.vertices[0][np.argmax(src_pow)] func_label = mne.grow_labels(min_subject, seed_vertno, extents=radius, hemis=0, subjects_dir=subjects_dir, n_jobs=1) func_label = func_label[0] func_label.save(stan_path + '%s' %f) elif label.hemi == 'rh': seed_vertno = stc_label.vertices[1][np.argmax(src_pow)] func_label = mne.grow_labels(min_subject, seed_vertno, extents=radius, hemis=1, subjects_dir=subjects_dir, n_jobs=1) func_label = func_label[0] func_label.save(stan_path + '%s' %f)
def stan_rois(fname=None, stan_path=None, size=8.0, min_subject='fsaverage'): """ Before merging all ROIs together, the size of ROIs will be standardized. Keep every ROIs in a same size ---------- fname: averaged STC of the trials. stan_path: path to store all subjects standarlized labels size: the radius of every ROI. min_subject: the subject for the common brain space. """ fnlist = get_files_from_list(fname) subjects_dir = os.environ['SUBJECTS_DIR'] # loop across all filenames for fn_stc in fnlist: stc_path = os.path.split(fn_stc)[0] stc_morph = mne.read_source_estimate(fn_stc, subject=min_subject) #min_path = subjects_dir + '/%s' %min_subject # extract the subject infromation from the file name name = os.path.basename(fn_stc) subject = name.split('_')[0] mer_path = stc_path + '/ROIs/' #stan_path = min_path + '/Group_ROIs/standard/' #set_directory(stan_path) list_dirs = os.walk(mer_path) for root, dirs, files in list_dirs: for f in files: label_fname = os.path.join(root, f) label = mne.read_label(label_fname) stc_label = stc_morph.in_label(label) src_pow = np.sum(stc_label.data ** 2, axis=1) if label.hemi == 'lh': # Get the max MNE value within each ROI seed_vertno = stc_label.vertices[0][np.argmax(src_pow)] func_label = mne.grow_labels(min_subject, seed_vertno, extents=size, hemis=0, subjects_dir=subjects_dir, n_jobs=1) func_label = func_label[0] func_label.save(stan_path + '%s_%s' % (subject, f)) elif label.hemi == 'rh': seed_vertno = stc_label.vertices[1][np.argmax(src_pow)] func_label = mne.grow_labels(min_subject, seed_vertno, extents=size, hemis=1, subjects_dir=subjects_dir, n_jobs=1) func_label = func_label[0] func_label.save(stan_path + '%s_%s' % (subject, f))
def ROIs_standardlization(fname_stc, size=8.0): import mne, os import numpy as np fnlist = get_files_from_list(fname_stc) # loop across all filenames for fn_stc in fnlist: stc_morph = mne.read_source_estimate(fn_stc, subject=subject_id) #extract the subject infromation from the file name name = os.path.basename(fn_stc) subject = name.split('_')[0] subject_path = subjects_dir + subject sta_path = MNI_dir + 'func_labels/standard/' isExists = os.path.exists(sta_path) if not isExists: os.makedirs(sta_path) list_dirs = os.walk(subject_path + '/func_labels/merged/') for root, dirs, files in list_dirs: for f in files: label_fname = os.path.join(root, f) label = mne.read_label(label_fname) stc_label = stc_morph.in_label(label) src_pow = np.sum(stc_label.data**2, axis=1) if label.hemi == 'lh': seed_vertno = stc_label.vertno[0][np.argmax( src_pow)] #Get the max MNE value within each ROI func_label = mne.grow_labels(subject_id, seed_vertno, extents=size, hemis=0, subjects_dir=subjects_dir, n_jobs=1) func_label = func_label[0] func_label.save(sta_path + '%s_%s' % (subject, f)) elif label.hemi == 'rh': seed_vertno = stc_label.vertno[1][np.argmax(src_pow)] func_label = mne.grow_labels(subject_id, seed_vertno, extents=size, hemis=1, subjects_dir=subjects_dir, n_jobs=1) func_label = func_label[0] func_label.save(sta_path + '%s_%s' % (subject, f))
def apply_stand(fn_stc, event, radius=5.0, min_subject='fsaverage'): """ ---------- fname: averaged STC of the trials. radius: the radius of every ROI. """ fnlist = get_files_from_list(fn_stc) # loop across all filenames for fn_stc in fnlist: import glob, shutil labels_path = os.path.split(fn_stc)[0] stan_path = labels_path + '/%s/standard/' %event reset_directory(stan_path) source_path = labels_path + '/%s/ini/' %event source = glob.glob(os.path.join(source_path, '*.*')) for filename in source: shutil.copy(filename, stan_path) stc = mne.read_source_estimate(fn_stc, subject=min_subject) list_dirs = os.walk(stan_path) for root, dirs, files in list_dirs: for f in files: label_fname = os.path.join(root, f) label = mne.read_label(label_fname) stc_label = stc.in_label(label) src_pow = np.sum(stc_label.data ** 2, axis=1) if label.hemi == 'lh': # Get the max MNE value within each ROI seed_vertno = stc_label.vertices[0][np.argmax(src_pow)] func_label = mne.grow_labels(min_subject, seed_vertno, extents=radius, hemis=0, subjects_dir=subjects_dir, n_jobs=1) func_label = func_label[0] func_label.save(stan_path + '%s' %f) elif label.hemi == 'rh': seed_vertno = stc_label.vertices[1][np.argmax(src_pow)] func_label = mne.grow_labels(min_subject, seed_vertno, extents=radius, hemis=1, subjects_dir=subjects_dir, n_jobs=1) func_label = func_label[0] func_label.save(stan_path + '%s' %f)
def grow_labels(subject, seeds, extents, surface, subjects_dir): names = [str(seed) for seed in seeds] labels = mne.grow_labels(subject, seeds, extents, 0, subjects_dir, names=names, surface=surface) seeds_pos = [] for label, seed in zip(labels, seeds): seed_pos = label.pos[np.where(label.vertices==seed)[0][0]] seeds_pos.append(seed_pos) label.pos /= 1000.0 label.save(op.join(subjects_dir, subject, 'label', '{}.label'.format(label.name))) return np.array(seeds_pos)
def test_grow_labels(): """Test generation of circular source labels""" seeds = [0, 50000] hemis = [0, 1] labels = grow_labels('sample', seeds, 3, hemis) for label, seed, hemi in zip(labels, seeds, hemis): assert (np.any(label.vertices == seed)) if hemi == 0: assert (label.hemi == 'lh') else: assert (label.hemi == 'rh')
def test_grow_labels(): """Test generation of circular source labels""" seeds = [0, 50000] hemis = [0, 1] labels = grow_labels('sample', seeds, 3, hemis) for label, seed, hemi in zip(labels, seeds, hemis): assert(np.any(label.vertices == seed)) if hemi == 0: assert(label.hemi == 'lh') else: assert(label.hemi == 'rh')
def apply_stand(fn_stc, radius=5.0, min_subject="fsaverage", tmin=0.1, tmax=0.5): """ ---------- fname: averaged STC of the trials. radius: the radius of every ROI. """ fnlist = get_files_from_list(fn_stc) # loop across all filenames for fn_stc in fnlist: stc_path = fn_stc[: fn_stc.rfind("-")] stc = mne.read_source_estimate(fn_stc, subject=min_subject) stc = stc.crop(tmin, tmax) # min_path = subjects_dir + '/%s' %min_subject # extract the subject infromation from the file name source_path = stc_path + "/ROIs/" stan_path = stc_path + "/standard/" reset_directory(stan_path) list_dirs = os.walk(source_path) for root, dirs, files in list_dirs: for f in files: label_fname = os.path.join(root, f) label = mne.read_label(label_fname) stc_label = stc.in_label(label) src_pow = np.sum(stc_label.data ** 2, axis=1) if label.hemi == "lh": # Get the max MNE value within each ROI seed_vertno = stc_label.vertices[0][np.argmax(src_pow)] func_label = mne.grow_labels( min_subject, seed_vertno, extents=radius, hemis=0, subjects_dir=subjects_dir, n_jobs=1 ) func_label = func_label[0] func_label.save(stan_path + "%s" % f) elif label.hemi == "rh": seed_vertno = stc_label.vertices[1][np.argmax(src_pow)] func_label = mne.grow_labels( min_subject, seed_vertno, extents=radius, hemis=1, subjects_dir=subjects_dir, n_jobs=1 ) func_label = func_label[0] func_label.save(stan_path + "%s" % f)
def test_grow_labels(): """Test generation of circular source labels""" seeds = [0, 50000] # these were chosen manually in mne_analyze should_be_in = [[49, 227], [51207, 48794]] hemis = [0, 1] labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, n_jobs=2) for label, seed, hemi, sh in zip(labels, seeds, hemis, should_be_in): assert_true(np.any(label.vertices == seed)) assert_true(np.all(in1d(sh, label.vertices))) if hemi == 0: assert_equal(label.hemi, 'lh') else: assert_equal(label.hemi, 'rh')
def labels_from_mni_coords(seeds, extent=30., subject='fsaverage', surface='white', mask=None, subjects_dir=None, parc=None): """Create a parcellation from seed coordinates in MNI space Parameters ---------- seeds : dict Seed coordinates. Keys are label names, including -hemi tags. values are seeds (array_like of shape (3,) or (3, n_seeds)). extent : scalar Extent of the label in millimeters (maximum distance from the seed). subject : str MRI-subject to use (default 'fsaverage'). surface : str Surface to use (default 'white'). mask : None | str A parcellation used to mask the parcellation under construction. subjects_dir : str SUBJECTS_DIR. parc : None | str Name of the parcellation under construction (only used for error messages). """ name_re = re.compile("\w+-(lh|rh)$") if not all(name.endswith(('lh', 'rh')) for name in seeds): err = ("Names need to end in 'lh' or 'rh' so that the proper " "hemisphere can be selected") raise ValueError(err) # load surfaces subjects_dir = get_subjects_dir(subjects_dir) fpath = os.path.join(subjects_dir, subject, 'surf', '.'.join(('%s', surface))) surfs = {hemi: mne.read_surface(fpath % hemi) for hemi in ('lh', 'rh')} # prepare seed properties for mne.grow_labels vertices = [] names = [] hemis = [] for name, coords_ in seeds.iteritems(): m = name_re.match(name) if not m: raise ValueError("Invalid seed name in %r parc: %r. Names must " "conform to the 'xxx-lh' or 'xxx-rh' scheme." % (parc, name)) coords = np.atleast_2d(coords_) if coords.ndim != 2 or coords.shape[1] != 3: raise ValueError("Invalid coordinate specification for seed %r in " "parc %r: %r. Seeds need to be specified as " "arrays with shape (3,) or (n_seeds, 3)." % (name, parc, coords_)) hemi = m.group(1) seed_verts = [] for coord in coords: dist = np.sqrt(np.sum((surfs[hemi][0] - coord) ** 2, axis=1)) seed_verts.append(np.argmin(dist)) vertices.append(seed_verts) names.append(name) hemis.append(hemi == 'rh') # grow labels labels = mne.grow_labels(subject, vertices, extent, hemis, subjects_dir, 1, False, names, surface) # apply mask if mask is not None: mlabels = mne.read_labels_from_annot(subject, mask, subjects_dir=subjects_dir) unknown = {l.hemi: l for l in mlabels if l.name.startswith('unknown-')} for label in labels: rm = unknown[label.hemi] if np.any(np.in1d(label.vertices, rm.vertices)): label.vertices = np.setdiff1d(label.vertices, rm.vertices, True) return labels
def labels_from_mni_coords(seeds, extent=30., subject='fsaverage', surface='white', mask=None, subjects_dir=None, parc=None): """Create a parcellation from seed coordinates in MNI space Parameters ---------- seeds : dict Seed coordinates. Keys are label names, including -hemi tags. values are seeds (array_like of shape (3,) or (3, n_seeds)). extent : scalar Extent of the label in millimeters (maximum distance from the seed). subject : str MRI-subject to use (default 'fsaverage'). surface : str Surface to use (default 'white'). mask : None | str A parcellation used to mask the parcellation under construction. subjects_dir : str SUBJECTS_DIR. parc : None | str Name of the parcellation under construction (only used for error messages). """ name_re = re.compile(r"^\w+-(lh|rh)$") matches = {name: name_re.match(name) for name in seeds} invalid = sorted(name for name, m in matches.items() if m is None) if invalid: raise ValueError( "Invalid seed names in parc %r: %s; seed names need to conform to " "the 'xxx-lh' or 'xxx-rh' scheme so that the proper hemisphere can " "be selected" % (parc, ', '.join(map(repr, sorted(invalid))))) # load surfaces subjects_dir = get_subjects_dir(subjects_dir) fpath = os.path.join(subjects_dir, subject, 'surf', '.'.join( ('%s', surface))) surfs = {hemi: mne.read_surface(fpath % hemi) for hemi in ('lh', 'rh')} # prepare seed properties for mne.grow_labels vertices = [] names = [] hemis = [] for name, coords_ in seeds.items(): coords = np.atleast_2d(coords_) if coords.ndim != 2 or coords.shape[1] != 3: raise ValueError("Invalid coordinate specification for seed %r in " "parc %r: %r. Seeds need to be specified as " "arrays with shape (3,) or (n_seeds, 3)." % (name, parc, coords_)) hemi = matches[name].group(1) seed_verts = [] for coord in coords: dist = np.sqrt(np.sum((surfs[hemi][0] - coord)**2, axis=1)) seed_verts.append(np.argmin(dist)) vertices.append(seed_verts) names.append(name) hemis.append(hemi == 'rh') # grow labels labels = mne.grow_labels(subject, vertices, extent, hemis, subjects_dir, 1, False, names, surface) # apply mask if mask is not None: mlabels = mne.read_labels_from_annot(subject, mask, subjects_dir=subjects_dir) unknown = {l.hemi: l for l in mlabels if l.name.startswith('unknown-')} for label in labels: rm = unknown[label.hemi] if np.any(np.in1d(label.vertices, rm.vertices)): label.vertices = np.setdiff1d(label.vertices, rm.vertices, True) return labels
def identify_roi_from_atlas(pos, approx=4, atlas=None, subjects_dir=None, subject=None): ''' Find the surface labels contacted by an electrode at this position in RAS space. Parameters ---------- pos : np.ndarray 1x3 matrix holding position of the electrode to identify approx : int Number of millimeters error radius atlas : str or None The string containing the name of the surface parcellation, does not apply to subcortical structures. If None, aparc is used. ''' if subjects_dir is None or subjects_dir == '': subjects_dir = os.environ['SUBJECTS_DIR'] if subject is None or subject == '': subject = os.environ['SUBJECT'] if atlas is None or atlas in ('', 'aparc'): return identify_roi_from_aparc(pos, approx=approx, subjects_dir=subjects_dir, subject=subject) from scipy.spatial.distance import cdist # conceptually, we should grow the closest vertex around this electrode # probably following snapping but the code for this function is not # altered either way # load the surfaces and annotation # uses the pial surface, this change is pushed to MNE python lh_pia, _ = nib.freesurfer.read_geometry( os.path.join(subjects_dir, subject, 'surf', 'lh.pial')) rh_pia, _ = nib.freesurfer.read_geometry( os.path.join(subjects_dir, subject, 'surf', 'rh.pial')) pia = np.vstack((lh_pia, rh_pia)) # find closest vertex # import pdb # pdb.set_trace() closest_vert = np.argmin(cdist(pia, [pos])) # grow the area of surface surrounding the vertex import mne # we force the label to only contact one hemisphere even if it is # beyond the extent of the medial surface hemi_str = 'lh' if closest_vert < len(lh_pia) else 'rh' hemi_code = 0 if hemi_str == 'lh' else 1 if hemi_str == 'rh': closest_vert -= len(lh_pia) radius_label, = mne.grow_labels(subject, closest_vert, approx, hemi_code, subjects_dir=subjects_dir, surface='pial') parcels = mne.read_labels_from_annot(subject, parc=atlas, hemi=hemi_str, subjects_dir=subjects_dir, surf_name='pial') regions = [] for parcel in parcels: if len(np.intersect1d(parcel.vertices, radius_label.vertices)) > 0: #force convert from unicode # regions.append(str(parcel.name)) regions.append(parcel) # subcortical_regions = identify_roi_from_aparc(pos, approx=approx, # subjects_dir=subjects_dir, subject=subject, subcortical_only=True) # # if regions is not None and subcortical_regions is not None: # regions.extend(subcortical_regions) return regions
# Find dist between MNI point and all vertices dists.append(np.squeeze(cdist(knob_pos, label_pos, 'euclidean'))) # Find min dist and index min_dist, ind = min( (min_dist, ind) for (ind, min_dist) in enumerate(dists[-1])) print 'Min dist: %02.02f Ind: %i' % (min_dist, ind) # Set seed vertex (closest vert to MNE point) seed = [orig_label.vertices[ind]] seed = seed * len(labelRadii) # Generate circular labels hemi_i = ['lh', 'rh'].index(orig_label.hemi) handMotorLabels = mne.grow_labels(modelSubj, seed, labelRadii, hemi_i, n_jobs=6) # find intersection circular labels with pre-central sulcus label for each radii overlapInds = [ np.in1d(np.array(synthetic_label.vertices), np.array(orig_label.vertices)) for synthetic_label in handMotorLabels ] # Restrict vertices in synthetic labels for i, label in enumerate(handMotorLabels): label.vertices = label.vertices[overlapInds[i]] label.pos = label.pos[overlapInds[i]]
def apply_sigSTC(fn_list_v, vevent, mevent, method='dSPM', vtmin=0., vtmax=0.35, mtmin=-0.3, mtmax=0.05, radius=10.0): from mne import spatial_tris_connectivity, grade_to_tris from mne.stats import spatio_temporal_cluster_test from scipy import stats as stats X1, X2 = [], [] stcs_trial = [] for fn_v in fn_list_v: fn_m = fn_v[: fn_v.rfind('evtW')] + 'evtW_%s_bc_norm_1-lh.stc' %mevent stc_v = mne.read_source_estimate(fn_v) stcs_trial.append(stc_v.copy()) stc_m = mne.read_source_estimate(fn_m) stc_v.resample(200) stc_m.resample(200) X1.append(stc_v.copy().crop(vtmin, vtmax).data) X2.append(stc_m.copy().crop(mtmin, mtmax).data) stcs_path = subjects_dir+'/fsaverage/%s_ROIs/conditions/' %method reset_directory(stcs_path) fn_avg = stcs_path + '%s' %(vevent) stcs = np.array(stcs_trial) stc_avg = np.sum(stcs, axis=0)/stcs.shape[0] stc_avg.save(fn_avg, ftype='stc') X1 = np.array(X1).transpose(0, 2, 1) X2 = np.array(X2).transpose(0, 2, 1) ############################################################################### # Compute statistic # To use an algorithm optimized for spatio-temporal clustering, we # just pass the spatial connectivity matrix (instead of spatio-temporal) print('Computing connectivity.') connectivity = spatial_tris_connectivity(grade_to_tris(5)) # Note that X needs to be a list of multi-dimensional array of shape # samples (subjects_k) x time x space, so we permute dimensions X = [X1, X2] # Now let's actually do the clustering. This can take a long time... # Here we set the threshold quite high to reduce computation. p_threshold = 0.0001 f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2., X1.shape[0] - 1, X1.shape[0] - 1) print('Clustering.') clu = spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2, threshold=f_threshold) # Now select the clusters that are sig. at p < 0.05 (note that this value # is multiple-comparisons corrected). #fsave_vertices = [np.arange(10242), np.arange(10242)] tstep = stc_v.tstep #stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep, # vertices=fsave_vertices, # subject='fsaverage') #stc_sig = stc_all_cluster_vis.mean() #fn_sig = subjects_dir+'/fsaverage/%s_ROIs/%s' %(method,vevent) #stc_sig.save(fn_sig) tstep = stc_v.tstep T_obs, clusters, clu_pvals, _ = clu n_times, n_vertices = T_obs.shape good_cluster_inds = np.where(clu_pvals < 0.05)[0] seeds = [] # Build a convenient representation of each cluster, where each # cluster becomes a "time point" in the SourceEstimate T_obs = abs(T_obs) if len(good_cluster_inds) > 0: data = np.zeros((n_vertices, n_times)) for cluster_ind in good_cluster_inds: data.fill(0) v_inds = clusters[cluster_ind][1] t_inds = clusters[cluster_ind][0] data[v_inds, t_inds] = T_obs[t_inds, v_inds] # Store a nice visualization of the cluster by summing across time data = np.sign(data) * np.logical_not(data == 0) * tstep seed = np.argmax(data.sum(axis=-1)) seeds.append(seed) min_subject = 'fsaverage' labels_path = subjects_dir + '/fsaverage/dSPM_ROIs/%s/ini' %vevent reset_directory(labels_path) seeds = np.array(seeds) non_index_lh = seeds[seeds < 10242] if non_index_lh.shape != []: func_labels_lh = mne.grow_labels(min_subject, non_index_lh, extents=radius, hemis=0, subjects_dir=subjects_dir, n_jobs=1) i = 0 while i < len(func_labels_lh): func_label = func_labels_lh[i] func_label.save(labels_path + '/%s_%d' %(vevent, i)) i = i + 1 seeds_rh = seeds - 10242 non_index_rh = seeds_rh[seeds_rh > 0] if non_index_rh.shape != []: func_labels_rh = mne.grow_labels(min_subject, non_index_rh, extents=radius, hemis=1, subjects_dir=subjects_dir, n_jobs=1) # right hemisphere definition j = 0 while j < len(func_labels_rh): func_label = func_labels_rh[j] func_label.save(labels_path + '/%s_%d' %(vevent, j)) j = j + 1
dists = [] for orig_label, label_pos, knob_pos in zip(primaryMotor, primaryMotorMNI_pos, hand_knob_pos): # Find dist between MNI point and all vertices dists.append(np.squeeze(cdist(knob_pos, label_pos, 'euclidean'))) # Find min dist and index min_dist, ind = min((min_dist, ind) for (ind, min_dist) in enumerate(dists[-1])) print 'Min dist: %02.02f Ind: %i' % (min_dist, ind) # Set seed vertex (closest vert to MNE point) seed = [orig_label.vertices[ind]] seed = seed * len(labelRadii) # Generate circular labels hemi_i = ['lh', 'rh'].index(orig_label.hemi) handMotorLabels = mne.grow_labels(modelSubj, seed, labelRadii, hemi_i, n_jobs=6) # find intersection circular labels with pre-central sulcus label for each radii overlapInds = [np.in1d(np.array(synthetic_label.vertices), np.array(orig_label.vertices)) for synthetic_label in handMotorLabels] # Restrict vertices in synthetic labels for i, label in enumerate(handMotorLabels): label.vertices = label.vertices[overlapInds[i]] label.pos = label.pos[overlapInds[i]] label.values = label.values[overlapInds[i]] label.subject = modelSubj label.name = 'G_precentral_handMotor_radius_' + str(labelRadii[i]) + 'mm'
def make_annot_from_csv(subject, subjects_dir, csv_fname, lab_size=10, parc_fname='standard_garces_2016', n_jobs=4, make_annot=False, return_label_coords=False): """Make annotations from given csv file. For a given subject, given a csv file with set of MNI coordinates, make an annotation of labels grown from these ROIs. Mainly used to generate standard resting state network annotation from MNI coordinates provided in literature. Parameters: ----------- subject: str The name of the subject. subjects_dir: str The SUBJECTS_DIR where the surfaces are available. csv_fname: str Comma separated file with seed MNI coordinates. # example Network,Node,x,y,z,BA,hemi Visual,Left visual cortex,-41,-77,3,19,lh lab_size: int The size of the label (radius in mm) to be grown around ROI coordinates parc_fname: str Name used to save the parcellation as if make_annot is True. n_jobs: int Number of parallel jobs to run. make_annot: bool If True, an annotation file is created and written to disk. return_label_coords: bool If True, the function returns labels and MNI seed coordinates used. """ from mne import grow_labels import pandas as pd import matplotlib.cm as cmx import matplotlib.colors as colors from surfer import (Surface, utils) surf = 'white' hemi = 'both' rsns = pd.read_csv(csv_fname, comment='#') all_coords, all_labels = [], [] all_foci = [] for netw in rsns.Network.unique(): print(netw, end=' ') nodes = rsns[rsns.Network == netw]['Node'].values for node in nodes: mni_coords = rsns[(rsns.Network == netw) & (rsns.Node == node)].loc[:, ('x', 'y', 'z')].values[0] all_coords.append(mni_coords) hemi = rsns[(rsns.Network == netw) & (rsns.Node == node)].hemi.values[0] print(node, ':', mni_coords, hemi, end=' ') # but we are interested in getting the vertices and # growing our own labels foci_surf = Surface(subject, hemi=hemi, surf=surf, subjects_dir=subjects_dir) foci_surf.load_geometry() foci_vtxs = utils.find_closest_vertices(foci_surf.coords, mni_coords) print('Closest vertex on surface chosen:', foci_vtxs) all_foci.append(foci_vtxs) if hemi == 'lh': hemis = 0 else: hemis = 1 # rh lab_name = netw + '_' + node mylabel = grow_labels(subject, foci_vtxs, extents=lab_size, hemis=hemis, subjects_dir=subjects_dir, n_jobs=n_jobs, overlap=True, names=lab_name, surface=surf)[0] all_labels.append(mylabel) # assign colours to the labels # labels within the same network get the same color n_nodes = len(rsns.Node.unique()) # n_networks = len(rsns.Network.unique()) # total number of networks color_norm = colors.Normalize(vmin=0, vmax=n_nodes - 1) scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv') for n, lab in enumerate(all_labels): lab.color = scalar_map.to_rgba(n) if make_annot: mne.label.write_labels_to_annot(all_labels, subject=subject, parc=parc_fname, subjects_dir=subjects_dir) if return_label_coords: # returns the list of labels grown and MNI coords used as seeds return all_labels, all_coords, all_foci
def test_grow_labels(): """Test generation of circular source labels.""" seeds = [0, 50000] # these were chosen manually in mne_analyze should_be_in = [[49, 227], [51207, 48794]] hemis = [0, 1] names = ['aneurism', 'tumor'] labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, names=names) tgt_names = ['aneurism-lh', 'tumor-rh'] tgt_hemis = ['lh', 'rh'] for label, seed, hemi, sh, name in zip(labels, seeds, tgt_hemis, should_be_in, tgt_names): assert (np.any(label.vertices == seed)) assert (np.all(np.in1d(sh, label.vertices))) assert_equal(label.hemi, hemi) assert_equal(label.name, name) # grow labels with and without overlap seeds = [57532, [58887, 6304]] l01, l02 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir) seeds = [57532, [58887, 6304]] l11, l12 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir, overlap=False) # test label naming assert_equal(l01.name, 'Label_0-lh') assert_equal(l02.name, 'Label_1-lh') assert_equal(l11.name, 'Label_0-lh') assert_equal(l12.name, 'Label_1-lh') # test color assignment l11_c, l12_c = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir, overlap=False, colors=None) assert_equal(l11_c.color, _n_colors(2)[0]) assert_equal(l12_c.color, _n_colors(2)[1]) lab_colors = np.array([[0, 0, 1, 1], [1, 0, 0, 1]]) l11_c, l12_c = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir, overlap=False, colors=lab_colors) assert_array_equal(l11_c.color, lab_colors[0, :]) assert_array_equal(l12_c.color, lab_colors[1, :]) lab_colors = np.array([.1, .2, .3, .9]) l11_c, l12_c = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir, overlap=False, colors=lab_colors) assert_array_equal(l11_c.color, lab_colors) assert_array_equal(l12_c.color, lab_colors) # make sure set 1 does not overlap overlap = np.intersect1d(l11.vertices, l12.vertices, True) assert_array_equal(overlap, []) # make sure both sets cover the same vertices l0 = l01 + l02 l1 = l11 + l12 assert_array_equal(l1.vertices, l0.vertices) # non-overlapping (gh-8848) for overlap in (False, True): grow_labels('fsaverage', [0], 1, 1, subjects_dir, overlap=overlap)