def euclDist_infl(subject): import numpy as np import nibabel.freesurfer.io as fs from scipy.spatial import distance_matrix fsDir = '/afs/cbs.mpg.de/projects/mar004_lsd-lemon-preproc/freesurfer' surfDir = '/afs/cbs.mpg.de/projects/mar005_lsd-lemon-surf/probands' for hemi in ['lh', 'rh']: # fsaverage5 coords on sphere fsa5_sphere_coords = fs.read_geometry('%s/fsaverage5/surf/%s.sphere' % (fsDir, hemi))[0] cort = fs.read_label('%s/fsaverage5/label/%s.cortex.label' % (fsDir, hemi)) # get corresponding nodes on subject sphere (find coords of high-dim subject surface closest to fsa5 nodes in sphere space) subj_sphere_coords = fs.read_geometry('%s/%s/surf/%s.sphere' % (fsDir, subject, hemi))[0] subj_indices = [] for node in cort: dist2all = np.squeeze(distance_matrix(np.expand_dims(fsa5_sphere_coords[node], axis=0), subj_sphere_coords)) subj_indices.append(list(dist2all).index(min(dist2all))) # pair-wise euclidean distance between included nodes on subject surface (midline) subj_surf_coords = fs.read_geometry('%s/%s/surf/%s.inflated' % (fsDir, subject, hemi))[0] euclDist = np.zeros((10242,10242)) euclDist[np.ix_(cort, cort)] = distance_matrix(subj_surf_coords[subj_indices,:],subj_surf_coords[subj_indices,:]) np.save('%s/%s/distance_maps/%s_%s_euclDist_inflated_fsa5' % (surfDir, subject, subject, hemi), euclDist)
def euclDist(subject): import numpy as np import nibabel.freesurfer.io as fs from scipy.spatial import distance_matrix fsDir = '/afs/cbs.mpg.de/projects/mar004_lsd-lemon-preproc/freesurfer' surfDir = '/afs/cbs.mpg.de/projects/mar005_lsd-lemon-surf/probands' for hemi in ['lh', 'rh']: # fsaverage5 coords on sphere fsa5_sphere_coords = fs.read_geometry('%s/fsaverage5/surf/%s.sphere' % (fsDir, hemi))[0] cort = fs.read_label('%s/fsaverage5/label/%s.cortex.label' % (fsDir, hemi)) # get corresponding nodes on subject sphere (find coords of high-dim subject surface closest to fsa5 nodes in sphere space) subj_sphere_coords = fs.read_geometry('%s/%s/surf/%s.sphere' % (fsDir, subject, hemi))[0] subj_indices = [] for node in cort: dist2all = np.squeeze( distance_matrix( np.expand_dims(fsa5_sphere_coords[node], axis=0), subj_sphere_coords)) subj_indices.append(list(dist2all).index(min(dist2all))) # pair-wise euclidean distance between included nodes on subject surface (midline) subj_surf_coords_pial = fs.read_geometry('%s/%s/surf/%s.pial' % (fsDir, subject, hemi))[0] subj_surf_coords_wm = fs.read_geometry('%s/%s/surf/%s.smoothwm' % (fsDir, subject, hemi))[0] subj_surf_coords = (subj_surf_coords_pial + subj_surf_coords_wm) / 2. euclDist = np.zeros((10242, 10242)) euclDist[np.ix_(cort, cort)] = distance_matrix( subj_surf_coords[subj_indices, :], subj_surf_coords[subj_indices, :]) np.save( '%s/%s/distance_maps/%s_%s_euclDist_fsa5' % (surfDir, subject, subject, hemi), euclDist)
def read_label_md(label_file, hemisphere_label, meta_data=None): """ Read label file and record meta data for it. A label file is a FreeSurfer text file like 'subject/label/lh.cortex.label' that contains a list of vertex ids (with RAS coordinates) that are part of the label. It may optionally contain a scalar values for each vertex, but that is currently ignored by this function. Parameters ---------- label_file: string A string representing a path to a FreeSurfer vertex annotation file (e.g., the path to 'lh.cortex.label'). hemisphere_label: {'lh' or 'rh'} A string representing the hemisphere this file belongs to. This is used to write the correct meta data. meta_data: dictionary | None, optional Meta data to merge into the output `meta_data`. Defaults to the empty dictionary. Returns ------- verts_in_label: ndarray, shape (num_labeled_verts,) Contains an array of vertex ids, one id for each vertex that is part of the label. meta_data: dictionary Contains detailed information on the data that was loaded. The following keys are available (replace `?h` with the value of the argument `hemisphere_label`, which must be 'lh' or 'rh'). - `?h.label_file` : the file that was loaded """ if hemisphere_label not in ('lh', 'rh'): raise ValueError( "ERROR: hemisphere_label must be one of {'lh', 'rh'} but is '%s'." % hemisphere_label) if meta_data is None: meta_data = {} verts_in_label = fsio.read_label(label_file, read_scalars=False) key_for_label_file = hemisphere_label + '.label_file' meta_data[key_for_label_file] = label_file return verts_in_label, meta_data
def smooth_aparc(insurfname, inaparcname, incortexname, outaparcname): """ (string) -> None smoothes aparc """ # read input files print("Reading in surface: {} ...".format(insurfname)) surf = read_geometry(insurfname, read_metadata=True) print("Reading in annotation: {} ...".format(inaparcname)) aparc = fs.read_annot(inaparcname) print("Reading in cortex label: {} ...".format(incortexname)) cortex = fs.read_label(incortexname) # set labels (n) and triangles (n x 3) labels = aparc[0] faces = surf[1] nvert = labels.size if labels.size != surf[0].shape[0]: sys.exit("ERROR smooth_aparc: vertec count "+format(surf[0].shape[0])+" does not match label length "+format(labels.size)) # Compute Cortex Mask mask = np.zeros(labels.shape, dtype=bool) mask[cortex] = True # check if we have places where non-cortex has some labels noncortnum=np.where(~mask & (labels != -1)) print("Non-cortex vertices with labels: "+str(noncortnum[0].size)) # num of places where non cortex has some real labels # here we need to decide how to deal with them # either we set everything outside cortex to -1 (the FS way) # or we keep these real labels and allow them to vote, maybe even shrink cortex label? Probably not. # get non-cortex ids (here we could subtract the ids that have a real label) # for now we remove everything outside cortex noncortids = np.where(~mask) # remove triangles where one vertex is non-cortex to avoid these edges to vote on neighbors later rr = np.in1d(faces, noncortids) rr = np.reshape(rr, faces.shape) rr = np.amax(rr, 1) faces = faces[~rr, :] # get Edge matrix (adjacency) adjM = get_adjM(faces, nvert) # add identity so that each vertex votes in the mode filter below adjM = adjM + sparse.eye(adjM.shape[0]) #print("adj shape: {}".format(adjM.shape)) #print("v shape: {}".format(surf[0].shape)) #print("labels shape: {}".format(labels.size)) #print("labels: {}".format(labels)) #print("minlab: "+str(np.min(labels))+" maxlab: "+str(np.max(labels))) # set all labels inside cortex that are -1 or 0 to fill label fillonlylabel = np.max(labels)+1 labels[mask & (labels == -1)] = fillonlylabel labels[mask & (labels == 0)] = fillonlylabel # now we do not have any -1 or 0 (except 0 outside of cortex) # FILL HOLES ids = np.where(labels == fillonlylabel)[0] counter = 1 idssize = ids.size while idssize != 0: print("Fill Round: "+str(counter)) labels_new = mode_filter(adjM, labels, fillonlylabel, np.array([fillonlylabel])) labels = labels_new ids = np.where(labels == fillonlylabel)[0] if ids.size == idssize: # no more improvement, strange could be an island in the cortex label that cannot be filled print("Warning: Cannot improve but still have holes. Maybe there is an island in the cortex label that cannot be filled with real labels.") fillids = np.where(labels == fillonlylabel)[0] labels[fillids] = 0 rr = np.in1d(faces, fillids) rr = np.reshape(rr, faces.shape) rr = np.amax(rr, 1) faces = faces[~rr, :] # get Edge matrix (adjacency) adjM = get_adjM(faces, nvert) # add identity so that each vertex votes in the mode filter below adjM = adjM + sparse.eye(adjM.shape[0]) break idssize = ids.size counter += 1 # SMOOTH other labels (first with wider kernel then again fine-tune): labels = mode_filter(adjM*adjM, labels) labels = mode_filter(adjM, labels) # set labels outside cortex to -1 labels[~mask] = -1 print ("Outputing fixed annot: {}".format(outaparcname)) fs.write_annot(outaparcname, labels, aparc[1], aparc[2])
outfile = 'BCI-DNI_Perirhinal' + '.' + hemi + '.mid.cortex.dfs' ''' BCI to FS processed BCI ''' bci_bsti = readdfs( '/home/ajoshi/BrainSuite19b/svreg/BCI-DNI_brain_atlas/BCI-DNI_brain.' + hemi + '.inner.cortex.dfs') bci_bst_mid = readdfs( '/home/ajoshi/BrainSuite19b/svreg/BCI-DNI_brain_atlas/BCI-DNI_brain.' + hemi + '.mid.cortex.dfs') bci_bsti.vertices[:, 0] -= 96 * 0.8 bci_bsti.vertices[:, 1] -= 192 * 0.546875 bci_bsti.vertices[:, 2] -= 192 * 0.546875 bci.vertices, bci.faces = fsio.read_geometry( '/big_disk/ajoshi/data/BCI_DNI_Atlas/surf/' + fshemi + '.white') bci.labels = np.zeros(bci.vertices.shape[0]) for i in range(len(broadmann)): labind = fsio.read_label('/big_disk/ajoshi/data/BCI_DNI_Atlas/label/' + fshemi + '.' + broadmann[i] + '.label') bci.labels[labind] = i + 1 bci = patch_color_labels(bci) view_patch_vtk(bci) bci_bsti = interpolate_labels(bci, bci_bsti) bci_bst_mid.labels = bci_bsti.labels bci_bst_mid = smooth_patch(bci_bst_mid, iterations=3000, relaxation=.5) bci_bst_labels = patch_color_labels(bci_bst_mid) view_patch_vtk(bci_bst_labels) writedfs(outfile, bci_bst_labels)
def analyze_alff_between_conditions(input_label, input_contrast1, input_contrast2, input_rest, min_contrast, nvert): """ Comparison of resting-state between different stripes populations. Mask stripes from an input contrast within a label ROI and randomly select <nvert> vertices within the final mask. An independent samples t-test is computed (or Welch's test if Levene's test is significant). Inputs: *input_label: input label file to define the region of interest. *input_contrast1: first input contrast data for masking. *input_contrast2: second input contrast data for masking. *input_rest: input resting-state data (alff or falff). *min_contrast: minimum contrast for masking (t-score). *min_rest: minimum resting-state fluctuation within the mask. *nvert: number of selected vertices. Outputs: *rest1: resting-state data within condition1. *rest2: resting-state data within condition2. *t: t-score from independent samples t-test. *p: p-value from independent samples t-test. *p_levene: p-value from Levene's test. created by Daniel Haenelt Date created: 11-03-2019 Last modified: 24-07-2020 """ import random import numpy as np import nibabel as nb from nibabel.freesurfer.io import read_label from scipy.stats import ttest_ind, levene # load data label = read_label(input_label) contrast1 = np.squeeze(nb.load(input_contrast1).get_fdata()) contrast2 = np.squeeze(nb.load(input_contrast2).get_fdata()) rest = np.squeeze(nb.load(input_rest).get_fdata()) # mask data contrast1[contrast1 < min_contrast] = np.NaN contrast2[contrast2 < min_contrast] = np.NaN contrast1[~label] = np.NaN contrast2[~label] = np.NaN contrast1[~np.isnan(contrast1)] = 1 contrast2[~np.isnan(contrast2)] = 1 # get resting-state data in mask rest1 = rest * contrast1 rest1 = rest1[~np.isnan(rest1)] rest1 = rest1[rest1 != np.min(rest1)] rest2 = rest * contrast2 rest2 = rest2[~np.isnan(rest2)] rest2 = rest2[rest2 != np.min(rest2)] # select random number of vertices label_shuffled1 = random.sample(range(0, len(rest1)), nvert) label_shuffled2 = random.sample(range(0, len(rest2)), nvert) rest1 = rest1[label_shuffled1] rest2 = rest2[label_shuffled2] # independent samples t-test # Levene's test is run to check for equal variances. If variances are not equal, Welch's t-test # is performed. _, p_levene = levene(rest1, rest2) if p_levene < 0.05: t, p = ttest_ind(rest1, rest2, equal_var=False) else: t, p = ttest_ind(rest1, rest2, equal_var=True) return rest1, rest2, t, p, p_levene
def load_freesurfer_label(filename, read_scalars=False): ''' load_freesurfer_label(filename) is equivalent to nibabel.freesurfer.io.read_label(filename). ''' return fsio.read_label(filename, read_scalars=read_scalars)
class rh: pass class lh32k: pass class rh32k: pass # Process left hemisphere yeomapL, _, _ = fsio.read_label(inputfile_L) vert, faces = fsio.read_geometry(fsAve_sph_L) gL = nib.load(fsAve_sph_32k_L) vert32k = gL.darrays[0].data faces32k = gL.darrays[1].data lh_sph.vertices = vert lh_sph.faces = faces lh_sph.labels = yeomapL lh32k.vertices = vert32k lh32k.faces = faces32k lh32k = interpolate_labels(lh_sph, lh32k) gL = nib.load(fsAve_32k_L) lh32k.vertices = gL.darrays[0].data lh32k.faces = gL.darrays[1].data lh32k = patch_color_labels(lh32k) view_patch_vtk(lh32k)
def analyze_alff_between_stripes(input_label, input_contrast, input_rest, min_contrast, nvert): """ Comparison of resting-state data within and between V2 stripes. Mask stripes within a ROI from a label with a thresholded contrast and select randomly <nvert> vertices either within or between stripes. An independent samples t-test is computed (or Welch's test if Levene's test is significant). Inputs: *input_label: input label file to define the region of interest. *input_contrast: input contrast data for masking. *input_rest: input resting-state data (alff or falff). *min_contrast: minimum contrast for masking (t-score). *nvert: number of selected vertices. Outputs: *rest_pos: resting-state data within stripes. *rest_neg: resting-state data between stripes. *t: t-score from independent samples t-test. *p: p-value from independent samples t-test. *p_levene: p-value from Levene's test. created by Daniel Haenelt Date created: 11-03-2019 Last modified: 24-07-2020 """ import random import numpy as np import nibabel as nb from nibabel.freesurfer.io import read_label from scipy.stats import ttest_ind, levene # load data label = read_label(input_label) contrast = np.squeeze(nb.load(input_contrast).get_fdata()) rest = np.squeeze(nb.load(input_rest).get_fdata()) # mask data contrast_pos = contrast.copy() contrast_neg = contrast.copy() contrast_pos[contrast < min_contrast] = np.NaN contrast_neg[contrast > -min_contrast] = np.NaN contrast_pos[~label] = np.NaN contrast_neg[~label] = np.NaN contrast_pos[~np.isnan(contrast_pos)] = 1 contrast_neg[~np.isnan(contrast_neg)] = 1 # get resting-state data in mask rest_pos = rest.copy() rest_neg = rest.copy() rest_pos = rest_pos * contrast_pos rest_pos = rest_pos[~np.isnan(rest_pos)] rest_pos = rest_pos[rest_pos != np.min(rest_pos)] rest_neg = rest_neg * contrast_neg rest_neg = rest_neg[~np.isnan(rest_neg)] rest_neg = rest_neg[rest_neg != np.min(rest_neg)] # select random number of vertices label_shuffled_pos = random.sample(range(0, len(rest_pos)), nvert) label_shuffled_neg = random.sample(range(0, len(rest_neg)), nvert) rest_pos = rest_pos[label_shuffled_pos] rest_neg = rest_neg[label_shuffled_neg] # independent samples t-test # Levene's test is run to check for equal variances. If variances are not equal, Welch's t-test # is performed. _, p_levene = levene(rest_pos, rest_neg) if p_levene < 0.05: t, p = ttest_ind(rest_pos, rest_neg, equal_var=False) else: t, p = ttest_ind(rest_pos, rest_neg, equal_var=True) return rest_pos, rest_neg, t, p, p_levene
path_output = '/data/pt_01880' basename_output = "se_epi1_se_epi2" # correlation plot labels corr_title = "" corr_x_label = "t-score (session 3)" corr_y_label = "t-score (session 4)" # parameters frac = 0.25 niter = 1000 """ do not edit below """ # load data label = read_label(input_label).tolist() # if input file extension is not *.mgh, interprete as morphological file if os.path.splitext(os.path.basename(input_sess1))[1] == ".mgh": sess1 = np.squeeze(nb.load(input_sess1).get_fdata()) else: sess1 = np.squeeze(read_morph_data(input_sess1)) # if input file extension is not *.mgh, interprete as morphological file if os.path.splitext(os.path.basename(input_sess2))[1] == ".mgh": sess2 = np.squeeze(nb.load(input_sess2).get_fdata()) else: sess2 = np.squeeze(read_morph_data(input_sess2)) # get the amount of data points ndata = np.round(frac * len(label)).astype(int)