def propagate_fundus_lines(surf_file, fundus_lines_file, thickness_file): """Propagate fundus lines to tile the surface. Parameters ---------- surf_file: file containing the surface geometry in vtk format fundus_lines_file: file containing scalars representing fundus lines thickness_file: file containing cortical thickness scalar data (for masking out the medial wall only) Returns ------- scalars indicating whether each vertex is part of the closed fundus lines or not """ from mindboggle.mio.vtks import read_vtk, read_scalars faces, _, _, points, num_points, fundus_lines, _, _ = read_vtk( surf_file, return_first=True, return_array=True) fundus_lines, _ = read_scalars(fundus_lines_file) fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5] thickness, _ = read_scalars(thickness_file, return_first=True, return_array=True) return propagate_fundus_lines( points, faces, fundus_line_indices, thickness)
def propagate_fundus_lines(surf_file, fundus_lines_file, thickness_file): """Propagate fundus lines to tile the surface. Parameters ---------- surf_file: file containing the surface geometry in vtk format fundus_lines_file: file containing scalars representing fundus lines thickness_file: file containing cortical thickness scalar data (for masking out the medial wall only) Returns ------- scalars indicating whether each vertex is part of the closed fundus lines or not """ from mindboggle.mio.vtks import read_vtk, read_scalars points, indices, lines, faces, fundus_lines, scalar_names, num_points, \ input_vtk = read_vtk(surf_file, return_first=True, return_array=True) fundus_lines, _ = read_scalars(fundus_lines_file) fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5] thickness, _ = read_scalars(thickness_file, return_first=True, return_array=True) return propagate_fundus_lines( points, faces, fundus_line_indices, thickness)
def histogram_of_vtk_scalars(vtk_file, nbins=100): """ Plot histogram of VTK surface mesh scalar values. Parameters ---------- vtk_file : string name of VTK file with scalar values to plot nbins : integer number of histogram bins Examples -------- >>> import os >>> from mindboggle.mio.plots import histogram_of_vtk_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_mean_curvature'], '', '.vtk') >>> os.rename(vtk_file, vtk_file + '.nii.gz') >>> vtk_file = vtk_file + '.nii.gz' >>> histogram_of_vtk_scalars(vtk_file, nbins=500) # doctest: +SKIP """ import matplotlib.pyplot as plt from mindboggle.mio.vtks import read_scalars # Load values: values, name = read_scalars(vtk_file) # Histogram: fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.hist(values, nbins, normed=False, facecolor='gray', alpha=0.5) plt.show()
def histogram_of_vtk_scalars(vtk_file, nbins=100): """ Plot histogram of VTK surface mesh scalar values. Parameters ---------- vtk_file : string name of VTK file with scalar values to plot nbins : integer number of histogram bins Examples -------- >>> import os >>> from mindboggle.mio.plots import histogram_of_vtk_scalars >>> path = os.environ['MINDBOGGLE_DATA'] >>> vtk_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk') >>> histogram_of_vtk_scalars(vtk_file, nbins=500) """ import matplotlib.pyplot as plt from mindboggle.mio.vtks import read_scalars # Load values: values, name = read_scalars(vtk_file) # Histogram: fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.hist(values, nbins, normed=False, facecolor='gray', alpha=0.5) plt.show()
def histogram_of_vtk_scalars(vtk_file, nbins=100): """ Plot histogram of VTK surface mesh scalar values. Parameters ---------- vtk_file : string name of VTK file with scalar values to plot nbins : integer number of histogram bins Examples -------- >>> import os >>> from mindboggle.mio.plots import histogram_of_vtk_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_mean_curvature'], '', '.vtk') >>> os.rename(vtk_file, vtk_file + '.nii.gz') >>> vtk_file = vtk_file + '.nii.gz' >>> histogram_of_vtk_scalars(vtk_file, nbins=500) # doctest: +SKIP """ import matplotlib.pyplot as plt from mindboggle.mio.vtks import read_scalars # Load values: values, name = read_scalars(vtk_file) # Histogram: fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.hist(values, bins=nbins, density=False, facecolor='gray', alpha=0.5) plt.show()
def spectrum_from_file(vtk_file, spectrum_size=10, exclude_labels=[-1], normalization=None, area_file='', verbose=False): """ Compute Laplace-Beltrami spectrum of a 3D shape in a VTK file. Parameters ---------- vtk_file : string the input vtk file spectrum_size : integer number of eigenvalues to be computed (the length of the spectrum) exclude_labels : list of integers labels to be excluded normalization : string the method used to normalize eigenvalues ('area' or None) if "area", use area of the 2D structure as in Reuter et al. 2006 area_file : string name of VTK file with surface area scalar values verbose : bool print statements? Returns ------- spectrum : list of floats first spectrum_size of Laplace-Beltrami spectrum Examples -------- >>> # Spectrum for entire left hemisphere of Twins-2-1: >>> import numpy as np >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> spectrum = spectrum_from_file(vtk_file, spectrum_size=6) >>> print(np.array_str(np.array(spectrum[1::]), ... precision=5, suppress_small=True)) [ 0.00013 0.00027 0.00032 0.00047 0.00058] """ from mindboggle.mio.vtks import read_vtk, read_scalars from mindboggle.shapes.laplace_beltrami import spectrum_of_largest points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file) # Area file: if area_file: areas, u1 = read_scalars(area_file) else: areas = None spectrum = spectrum_of_largest(points, faces, spectrum_size, exclude_labels, normalization, areas, verbose) return spectrum
def get_freesurfer_curvatures(mean_curv_file, max_curv_file, min_curv_file, curvatures_file): """ Gets FreeSurfer curvatures from VTK files to a CSV file. Args: mean_curv_file (str): input mean curvature files path max_curv_file (str): input maximum curvature files path min_curv_file (str): input minimum curvature files path curvatures_file (str): output CSV file with the curvature values Returns: None """ # Get the curvatures from VTK files # [[x1, y1, z1], [x2, y2, z2], ...]] points = np.array(read_points(mean_curv_file)) xyz = points.T # transposed: [[x1, x2, ...], [y1, y2, ...], [z1, z2, ...]] mean_curv, scalar_name = read_scalars(mean_curv_file, return_first=True, return_array=True) try: assert (xyz.shape[1] == mean_curv.size) except AssertionError: print( "number of points={} is not equal to number of scalars={}".format( xyz.shape[1], mean_curv.size)) max_curv, _ = read_scalars(max_curv_file, return_first=True, return_array=True) min_curv, _ = read_scalars(min_curv_file, return_first=True, return_array=True) # Write the curvatures to a CSV file df = pd.DataFrame() df['x'] = xyz[0] df['y'] = xyz[1] df['z'] = xyz[2] df['mean_curvature'] = mean_curv df['kappa1'] = max_curv df['kappa2'] = min_curv df.to_csv(curvatures_file, sep=';')
def spectrum_from_file(vtk_file, spectrum_size=10, exclude_labels=[-1], normalization="areaindex", area_file='', verbose=False): """ Compute Laplace-Beltrami spectrum of a 3D shape in a VTK file. Parameters ---------- vtk_file : string the input vtk file spectrum_size : integer number of eigenvalues to be computed (the length of the spectrum) exclude_labels : list of integers labels to be excluded normalization : string the method used to normalize eigenvalues if None, no normalization is used if "area", use area of the 2D structure as in Reuter et al. 2006 if "index", divide eigenvalue by index to account for linear trend if "areaindex", do both (default) area_file : string name of VTK file with surface area scalar values verbose : bool print statements? Returns ------- spectrum : list of floats first spectrum_size of Laplace-Beltrami spectrum Examples -------- >>> # Spectrum for entire left hemisphere of Twins-2-1: >>> import numpy as np >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> spectrum = spectrum_from_file(vtk_file, spectrum_size=6, ... exclude_labels=[-1], normalization=None, area_file="", verbose=False) >>> [np.float("{0:.{1}f}".format(x, 5)) for x in spectrum[1::]] [0.00013, 0.00027, 0.00032, 0.00047, 0.00058] >>> spectrum = spectrum_from_file(vtk_file, spectrum_size=6, ... exclude_labels=[-1], normalization="areaindex", area_file="", ... verbose=False) >>> [np.float("{0:.{1}f}".format(x, 5)) for x in spectrum[1::]] [14.12801, 14.93573, 11.75397, 12.93141, 12.69348] """ from mindboggle.mio.vtks import read_vtk, read_scalars from mindboggle.shapes.laplace_beltrami import spectrum_of_largest points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file) # Area file: if area_file: areas, u1 = read_scalars(area_file) else: areas = None spectrum = spectrum_of_largest(points, faces, spectrum_size, exclude_labels, normalization, areas, verbose) return spectrum
def rescale_by_neighborhood(input_vtk, indices=[], nedges=10, p=99, set_max_to_1=True, save_file=False, output_filestring='rescaled_scalars', background_value=-1): """ Rescale the scalar values of a VTK file by a percentile value in each vertex's surface mesh neighborhood. Parameters ---------- input_vtk : string name of VTK file with a scalar value for each vertex indices : list of integers (optional) indices of scalars to normalize nedges : integer number or edges from vertex, defining the size of its neighborhood p : float in range of [0,100] percentile used to normalize each scalar set_max_to_1 : Boolean set all rescaled values greater than 1 to 1.0? save_file : Boolean save output VTK file? output_filestring : string (if save_file) name of output file background_value : integer background value Returns ------- rescaled_scalars : list of floats rescaled scalar values rescaled_scalars_file : string (if save_file) name of output VTK file with rescaled scalar values Examples -------- >>> import os >>> from mindboggle.guts.mesh import rescale_by_neighborhood >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars >>> from mindboggle.mio.plots import plot_surfaces >>> path = os.environ['MINDBOGGLE_DATA'] >>> input_vtk = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk') >>> indices = [] >>> nedges = 10 >>> p = 99 >>> set_max_to_1 = True >>> save_file = True >>> output_filestring = 'rescaled_scalars' >>> background_value = -1 >>> # >>> rescaled_scalars, rescaled_scalars_file = rescale_by_neighborhood(input_vtk, >>> indices, nedges, p, set_max_to_1, save_file, output_filestring, background_value) >>> # >>> # View rescaled scalar values per fold: >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> folds, name = read_scalars(folds_file) >>> # >>> rewrite_scalars(rescaled_scalars_file, rescaled_scalars_file, >>> rescaled_scalars, 'rescaled_depths', folds) >>> plot_surfaces(rescaled_scalars_file) """ import os import numpy as np from mindboggle.mio.vtks import read_scalars, rewrite_scalars from mindboggle.guts.mesh import find_neighbors_from_file, find_neighborhood # Load scalars and vertex neighbor lists: scalars, name = read_scalars(input_vtk, True, True) if not indices: indices = [i for i, x in enumerate(scalars) if x != background_value] print(" Rescaling {0} scalar values by neighborhood...".format( len(indices))) neighbor_lists = find_neighbors_from_file(input_vtk) # Loop through vertices: rescaled_scalars = scalars.copy() for index in indices: # Determine the scalars in the vertex's neighborhood: neighborhood = find_neighborhood(neighbor_lists, [index], nedges) # Compute a high neighborhood percentile to normalize vertex's value: normalization_factor = np.percentile(scalars[neighborhood], p) rescaled_scalar = scalars[index] / normalization_factor rescaled_scalars[index] = rescaled_scalar # Make any rescaled value greater than 1 equal to 1: if set_max_to_1: rescaled_scalars[[x for x in indices if rescaled_scalars[x] > 1.0]] = 1 rescaled_scalars = rescaled_scalars.tolist() #------------------------------------------------------------------------- # Return rescaled scalars and file name #------------------------------------------------------------------------- if save_file: rescaled_scalars_file = os.path.join(os.getcwd(), output_filestring + '.vtk') rewrite_scalars(input_vtk, rescaled_scalars_file, rescaled_scalars, 'rescaled_scalars') if not os.path.exists(rescaled_scalars_file): raise (IOError(rescaled_scalars_file + " not found")) else: rescaled_scalars_file = None return rescaled_scalars, rescaled_scalars_file
def evaluate_deep_features(features_file, labels_file, sulci_file='', hemi='', excludeIDs=[-1], output_vtk_name='', verbose=True): """ Evaluate deep surface features by computing the minimum distance from each label border vertex to all of the feature vertices in the same sulcus, and from each feature vertex to all of the label border vertices in the same sulcus. The label borders run along the deepest parts of sulci and correspond to fundi in the DKT cortical labeling protocol. Parameters ---------- features_file : string VTK surface file with feature numbers for vertex scalars labels_file : string VTK surface file with label numbers for vertex scalars sulci_file : string VTK surface file with sulcus numbers for vertex scalars excludeIDs : list of integers feature/sulcus/label IDs to exclude (background set to -1) output_vtk_name : Boolean if not empty, output a VTK file beginning with output_vtk_name that contains a surface with mean distances as scalars verbose : Boolean print mean distances to standard output? Returns ------- feature_to_border_mean_distances : numpy array [number of features x 1] mean distance from each feature to sulcus label border feature_to_border_sd_distances : numpy array [number of features x 1] standard deviations of feature-to-border distances feature_to_border_distances_vtk : string VTK surface file containing feature-to-border distances border_to_feature_mean_distances : numpy array [number of features x 1] mean distances from each sulcus label border to feature border_to_feature_sd_distances : numpy array [number of features x 1] standard deviations of border-to-feature distances border_to_feature_distances_vtk : string VTK surface file containing border-to-feature distances """ import os import sys import numpy as np from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk from mindboggle.guts.mesh import find_neighbors, remove_faces from mindboggle.guts.segment import extract_borders from mindboggle.guts.compute import source_to_target_distances from mindboggle.mio.labels import DKTprotocol dkt = DKTprotocol() #------------------------------------------------------------------------- # Load labels, features, and sulci: #------------------------------------------------------------------------- faces, lines, indices, points, npoints, labels, scalar_names, \ input_vtk = read_vtk(labels_file, True, True) features, name = read_scalars(features_file, True, True) if sulci_file: sulci, name = read_scalars(sulci_file, True, True) # List of indices to sulcus vertices: sulcus_indices = [i for i, x in enumerate(sulci) if x != -1] segmentIDs = sulci sulcus_faces = remove_faces(faces, sulcus_indices) else: sulcus_indices = range(len(labels)) segmentIDs = [] sulcus_faces = faces #------------------------------------------------------------------------- # Prepare neighbors, label pairs, border IDs, and outputs: #------------------------------------------------------------------------- # Calculate neighbor lists for all points: print('Find neighbors for all vertices...') neighbor_lists = find_neighbors(faces, npoints) # Find label border points in any of the sulci: print('Find label border points in any of the sulci...') border_indices, border_label_tuples, unique_border_label_tuples = \ extract_borders(sulcus_indices, labels, neighbor_lists, ignore_values=[], return_label_pairs=True) if not len(border_indices): sys.exit('There are no label border points!') # Initialize an array of label border IDs # (label border vertices that define sulci in the labeling protocol): print('Build an array of label border IDs...') label_borders = -1 * np.ones(npoints) if hemi == 'lh': nsulcus_lists = len(dkt.left_sulcus_label_pair_lists) else: nsulcus_lists = len(dkt.right_sulcus_label_pair_lists) feature_to_border_mean_distances = -1 * np.ones(nsulcus_lists) feature_to_border_sd_distances = -1 * np.ones(nsulcus_lists) border_to_feature_mean_distances = -1 * np.ones(nsulcus_lists) border_to_feature_sd_distances = -1 * np.ones(nsulcus_lists) feature_to_border_distances_vtk = '' border_to_feature_distances_vtk = '' #------------------------------------------------------------------------- # Loop through sulci: #------------------------------------------------------------------------- # For each list of sorted label pairs (corresponding to a sulcus): for isulcus, label_pairs in enumerate(dkt.sulcus_label_pair_lists): # Keep the border points with label pair labels: label_pair_border_indices = [ x for i, x in enumerate(border_indices) if np.unique(border_label_tuples[i]).tolist() in label_pairs ] # Store the points as sulcus IDs in the border IDs array: if label_pair_border_indices: label_borders[label_pair_border_indices] = isulcus if len(np.unique(label_borders)) > 1: #--------------------------------------------------------------------- # Construct a feature-to-border distance matrix and VTK file: #--------------------------------------------------------------------- # Construct a distance matrix: print('Construct a feature-to-border distance matrix...') sourceIDs = features targetIDs = label_borders distances, distance_matrix = source_to_target_distances( sourceIDs, targetIDs, points, segmentIDs, excludeIDs) # Compute mean distances for each feature: nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists) for ifeature in range(nfeatures): feature_distances = [ x for x in distance_matrix[:, ifeature] if x != -1 ] feature_to_border_mean_distances[ifeature] = \ np.mean(feature_distances) feature_to_border_sd_distances[ifeature] = \ np.std(feature_distances) if verbose: print('Feature-to-border mean distances:') print(feature_to_border_mean_distances) print('Feature-to-border standard deviations of distances:') print(feature_to_border_sd_distances) # Write resulting feature-label border distances to VTK file: if output_vtk_name: feature_to_border_distances_vtk = os.path.join( os.getcwd(), output_vtk_name + '_feature_to_border_mean_distances.vtk') print('Write feature-to-border distances to {0}...'.format( feature_to_border_distances_vtk)) write_vtk(feature_to_border_distances_vtk, points, [], [], sulcus_faces, [distances], ['feature-to-border_distances'], 'float') #--------------------------------------------------------------------- # Construct a border-to-feature distance matrix and VTK file: #--------------------------------------------------------------------- # Construct a distance matrix: print('Construct a border-to-feature distance matrix...') sourceIDs = label_borders targetIDs = features distances, distance_matrix = source_to_target_distances( sourceIDs, targetIDs, points, segmentIDs, excludeIDs) # Compute mean distances for each feature: nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists) for ifeature in range(nfeatures): border_distances = [ x for x in distance_matrix[:, ifeature] if x != -1 ] border_to_feature_mean_distances[ifeature] = \ np.mean(border_distances) border_to_feature_sd_distances[ifeature] = \ np.std(border_distances) if verbose: print('border-to-feature mean distances:') print(border_to_feature_mean_distances) print('border-to-feature standard deviations of distances:') print(border_to_feature_sd_distances) # Write resulting feature-label border distances to VTK file: if output_vtk_name: border_to_feature_distances_vtk = os.path.join( os.getcwd(), output_vtk_name + '_border_to_feature_mean_distances.vtk') print('Write border-to-feature distances to {0}...'.format( border_to_feature_distances_vtk)) write_vtk(border_to_feature_distances_vtk, points, [], [], sulcus_faces, [distances], ['border-to-feature_distances'], 'float') #------------------------------------------------------------------------- # Return outputs: #------------------------------------------------------------------------- return feature_to_border_mean_distances, feature_to_border_sd_distances,\ feature_to_border_distances_vtk,\ border_to_feature_mean_distances, border_to_feature_sd_distances,\ border_to_feature_distances_vtk
def write_face_vertex_averages(input_file, output_table='', area_file=''): """ Make table of average vertex values per face (divided by face area if area_file provided). Parameters ---------- input_file : string name of VTK file with scalars to average area_file : string name of VTK file with surface area scalar values output_table : string output table filename Returns ------- output_table : string output table filename Examples -------- >>> import os >>> from mindboggle.mio.tables import write_face_vertex_averages >>> path = '/homedir/mindboggled' >>> input_file = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'freesurfer_thickness.vtk') >>> area_file = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'area.vtk') >>> output_table = '' >>> # >>> write_face_vertex_averages(input_file, output_table, area_file) """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_vtk, read_scalars faces, lines, indices, points, npoints, scalars, name, \ input_vtk = read_vtk(input_file, True, True) if area_file: area_scalars, name = read_scalars(area_file, True, True) #--------------------------------------------------------------------- # For each face, average vertex values: #--------------------------------------------------------------------- columns = [] for face in faces: values = [] for index in face: if area_file: values.append(scalars[index] / area_scalars[index]) else: values.append(scalars[index]) columns.append(np.mean(values)) #----------------------------------------------------------------- # Write to table: #----------------------------------------------------------------- if not output_table: output_table = os.path.join(os.getcwd(), 'average_face_values.csv') df = pd.DataFrame({'': columns}) df.to_csv(output_table, index=False) if not os.path.exists(output_table): raise (IOError(output_table + " not found")) return output_table
def write_shape_stats(labels_or_file=[], sulci=[], fundi=[], affine_transform_files=[], inverse_booleans=[], transform_format='itk', area_file='', normalize_by_area=False, mean_curvature_file='', travel_depth_file='', geodesic_depth_file='', freesurfer_thickness_file='', freesurfer_curvature_file='', freesurfer_sulc_file='', labels_spectra=[], labels_spectra_IDs=[], sulci_spectra=[], sulci_spectra_IDs=[], labels_zernike=[], labels_zernike_IDs=[], sulci_zernike=[], sulci_zernike_IDs=[], exclude_labels=[-1], verbose=False): """ Make tables of shape statistics per label, sulcus, and/or fundus. There can be thousands of vertices in a single feature such as a gyrus, sulcus, or fundus, and for per-vertex shape measures, it makes sense to characterize their collective shape as a distribution of shape values. Mindboggle's stats_per_label function generates tables of summary statistical measures for these distributions, and includes the shape measures computed on cortical features as well. Note :: This function is tailored for Mindboggle outputs. Parameters ---------- labels_or_file : list or string label number for each vertex or name of VTK file with index scalars sulci : list of integers indices to sulci, one per vertex, with -1 indicating no sulcus fundi : list of integers indices to fundi, one per vertex, with -1 indicating no fundus affine_transform_files : list of strings affine transform files to standard space inverse_booleans : list of of zeros and ones for each transform, 1 to take the inverse, else 0 transform_format : string format for transform file Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format area_file : string name of VTK file with surface area scalar values normalize_by_area : bool normalize all shape measures by area of label/feature? (UNTESTED) mean_curvature_file : string name of VTK file with mean curvature scalar values travel_depth_file : string name of VTK file with travel depth scalar values geodesic_depth_file : string name of VTK file with geodesic depth scalar values freesurfer_thickness_file : string name of VTK file with FreeSurfer thickness scalar values freesurfer_curvature_file : string name of VTK file with FreeSurfer curvature (curv) scalar values freesurfer_sulc_file : string name of VTK file with FreeSurfer convexity (sulc) scalar values labels_spectra : list of lists of floats Laplace-Beltrami spectra for each labeled region labels_spectra_IDs : list of integers unique labels for labels_spectra sulci_spectra : list of lists of floats Laplace-Beltrami spectra for each sulcus sulci_spectra_IDs : list of integers unique sulcus IDs for sulci_spectra labels_zernike : list of lists of floats Zernike moments for each labeled region labels_zernike_IDs : list of integers unique labels for labels_zernike sulci_zernike : list of lists of floats Zernike moments for each sulcus sulci_zernike_IDs : list of integers unique sulcus IDs for sulci_zernike exclude_labels : list of lists of integers indices to be excluded (in addition to -1) verbose : bool print statements? Returns ------- label_table : string output table filename for label shapes sulcus_table : string output table filename for sulcus shapes fundus_table : string output table filename for fundus shapes Examples -------- >>> from mindboggle.mio.tables import write_shape_stats >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> label_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> sulci_file = fetch_data(urls['left_sulci'], '', '.vtk') >>> fundi_file = fetch_data(urls['left_fundus_per_sulcus'], '', '.vtk') >>> mean_curvature_file = fetch_data(urls['left_mean_curvature'], '', '.vtk') >>> travel_depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk') >>> geodesic_depth_file = fetch_data(urls['left_geodesic_depth'], '', '.vtk') >>> area_file = fetch_data(urls['left_area'], '', '.vtk') >>> freesurfer_thickness_file = '' >>> freesurfer_curvature_file = '' >>> freesurfer_sulc_file = '' >>> sulci, name = read_scalars(sulci_file) >>> fundi, name = read_scalars(fundi_file) >>> affine_transform_files = [] >>> inverse_booleans = [] >>> transform_format = 'itk' >>> normalize_by_area = False >>> labels, name = read_scalars(label_file) >>> labels_spectra = [] >>> labels_spectra_IDs = [] >>> sulci_spectra = [] >>> sulci_spectra_IDs = [] >>> labels_zernike = [] >>> labels_zernike_IDs = [] >>> sulci_zernike = [] >>> sulci_zernike_IDs = [] >>> exclude_labels = [-1] >>> verbose = False >>> label_table, sulcus_table, fundus_table = write_shape_stats(label_file, ... sulci, fundi, affine_transform_files, inverse_booleans, ... transform_format, area_file, normalize_by_area, ... mean_curvature_file, travel_depth_file, geodesic_depth_file, ... freesurfer_thickness_file, freesurfer_curvature_file, ... freesurfer_sulc_file, labels_spectra, labels_spectra_IDs, ... sulci_spectra, sulci_spectra_IDs, labels_zernike, ... labels_zernike_IDs, sulci_zernike, sulci_zernike_IDs, ... exclude_labels, verbose) """ import os import numpy as np import pandas as pd from mindboggle.guts.compute import stats_per_label from mindboggle.guts.compute import means_per_label from mindboggle.guts.compute import sum_per_label from mindboggle.mio.vtks import read_scalars, read_vtk from mindboggle.mio.vtks import apply_affine_transforms from mindboggle.mio.labels import DKTprotocol dkt = DKTprotocol() # Make sure inputs are lists: if isinstance(labels_or_file, np.ndarray): labels = [int(x) for x in labels_or_file] elif isinstance(labels_or_file, list): labels = labels_or_file elif isinstance(labels_or_file, str): labels, name = read_scalars(labels_or_file) if isinstance(sulci, np.ndarray): sulci = [int(x) for x in sulci] if isinstance(fundi, np.ndarray): fundi = [int(x) for x in fundi] if not labels and not sulci and not fundi: raise IOError('No feature data to tabulate in write_shape_stats().') spectrum_start = 1 # Store all columns of spectral components (0), # or start from higher frequency components (>=1) # ------------------------------------------------------------------------ # Feature lists, shape names, and shape files: # ------------------------------------------------------------------------ # Feature lists: feature_lists = [labels, sulci, fundi] feature_names = ['label', 'sulcus', 'fundus'] spectra_lists = [labels_spectra, sulci_spectra] spectra_ID_lists = [labels_spectra_IDs, sulci_spectra_IDs] zernike_lists = [labels_zernike, sulci_zernike] zernike_ID_lists = [labels_zernike_IDs, sulci_zernike_IDs] table_names = [ 'label_shapes.csv', 'sulcus_shapes.csv', 'fundus_shapes.csv' ] # Shape names corresponding to shape files below: shape_names = [ 'area', 'travel depth', 'geodesic depth', 'mean curvature', 'freesurfer curvature', 'freesurfer thickness', 'freesurfer convexity (sulc)' ] # Load shape files as a list of numpy arrays of per-vertex shape values: shape_files = [ area_file, travel_depth_file, geodesic_depth_file, mean_curvature_file, freesurfer_curvature_file, freesurfer_thickness_file, freesurfer_sulc_file ] shape_arrays = [] first_pass = True area_array = [] for ishape, shape_file in enumerate(shape_files): if os.path.exists(shape_file): if first_pass: points, indices, lines, faces, scalars_array, scalar_names, \ npoints, input_vtk = read_vtk(shape_file, True, True) points = np.array(points) first_pass = False if affine_transform_files and transform_format: affine_points, \ foo1 = apply_affine_transforms(affine_transform_files, inverse_booleans, transform_format, points, vtk_file_stem='') else: scalars_array, name = read_scalars(shape_file, True, True) if scalars_array.size: shape_arrays.append(scalars_array) # Store area array: if ishape == 0: area_array = scalars_array.copy() if normalize_by_area: use_area = area_array else: use_area = [] # Initialize table file names: label_table = '' sulcus_table = '' fundus_table = '' # Loop through features / tables: for itable, feature_list in enumerate(feature_lists): column_names = [] # ---------------------------------------------------------------- # Label names: # ---------------------------------------------------------------- label_title = 'name' if itable == 0: label_numbers = dkt.cerebrum_cortex_DKT31_numbers label_names = dkt.cerebrum_cortex_DKT31_names elif itable in [1, 2]: label_numbers = dkt.sulcus_numbers label_names = dkt.sulcus_names else: label_numbers = [] label_names = [] include_labels = label_numbers nlabels = len(label_numbers) # -------------------------------------------------------------------- # For each feature, construct a table of average shape values: # -------------------------------------------------------------------- if feature_list: feature_name = feature_names[itable] columns = [] # ---------------------------------------------------------------- # Loop through shape measures: # ---------------------------------------------------------------- column_names.extend(column_names[:]) for ishape, shape_array in enumerate(shape_arrays): shape = shape_names[ishape] if verbose: print(' Compute statistics on {0} {1}...'.format( feature_name, shape)) # ------------------------------------------------------------ # Append feature areas to columns: # ------------------------------------------------------------ if ishape == 0 and np.size(area_array): sums, label_list = sum_per_label(shape_array, feature_list, include_labels, exclude_labels) column_names.append(shape) columns.append(sums) # ------------------------------------------------------------ # Append feature shape statistics to columns: # ------------------------------------------------------------ else: medians, mads, means, sdevs, skews, kurts, \ lower_quarts, upper_quarts, \ label_list = stats_per_label(shape_array, feature_list, include_labels, exclude_labels, area_array, precision=1) column_names.append(shape + ': median') column_names.append(shape + ': MAD') column_names.append(shape + ': mean') column_names.append(shape + ': SD') column_names.append(shape + ': skew') column_names.append(shape + ': kurtosis') column_names.append(shape + ': 25%') column_names.append(shape + ': 75%') columns.append(medians) columns.append(mads) columns.append(means) columns.append(sdevs) columns.append(skews) columns.append(kurts) columns.append(lower_quarts) columns.append(upper_quarts) # ---------------------------------------------------------------- # Mean positions in the original space: # ---------------------------------------------------------------- # Compute mean position per feature: positions, sdevs, label_list, foo = means_per_label( points, feature_list, include_labels, exclude_labels, use_area) # Append mean x,y,z position per feature to columns: xyz_positions = np.asarray(positions) for ixyz, xyz in enumerate(['x', 'y', 'z']): column_names.append('mean position: {0}'.format(xyz)) columns.append(xyz_positions[:, ixyz].tolist()) # ---------------------------------------------------------------- # Mean positions in standard space: # ---------------------------------------------------------------- if affine_transform_files and transform_format: # Compute standard space mean position per feature: standard_positions, sdevs, label_list, \ foo = means_per_label(affine_points, feature_list, include_labels, exclude_labels, use_area) # Append standard space x,y,z position per feature to columns: xyz_std_positions = np.asarray(standard_positions) for ixyz, xyz in enumerate(['x', 'y', 'z']): column_names.append('mean position in standard space:' ' {0}'.format(xyz)) columns.append(xyz_std_positions[:, ixyz].tolist()) # ---------------------------------------------------------------- # Laplace-Beltrami spectra: # ---------------------------------------------------------------- if itable in [0, 1]: spectra = spectra_lists[itable] if spectra: spectra_IDs = spectra_ID_lists[itable] # Construct a matrix of spectra: len_spectrum = len(spectra[0]) spectrum_matrix = np.zeros((nlabels, len_spectrum)) for ilabel, label in enumerate(include_labels): if label in spectra_IDs: spectrum = spectra[spectra_IDs.index(label)] spectrum_matrix[ilabel, 0:len_spectrum] = spectrum # Append spectral shape name and values to columns: for ispec in range(spectrum_start, len_spectrum): columns.append(spectrum_matrix[:, ispec].tolist()) column_names.append('Laplace-Beltrami spectrum:' ' component {0}'.format(ispec + 1)) # ---------------------------------------------------------------- # Zernike moments: # ---------------------------------------------------------------- if itable in [0, 1]: zernike = zernike_lists[itable] if zernike: zernike_IDs = zernike_ID_lists[itable] # Construct a matrix of Zernike moments: len_moments = len(zernike[0]) moments_matrix = np.zeros((nlabels, len_moments)) for ilabel, label in enumerate(include_labels): if label in zernike_IDs: moments = zernike[zernike_IDs.index(label)] moments_matrix[ilabel, 0:len_moments] = moments # Append Zernike shape name and values to columns: for imoment in range(0, len_moments): columns.append(moments_matrix[:, imoment].tolist()) column_names.append( 'Zernike moments: component {0}'.format(imoment + 1)) # ---------------------------------------------------------------- # Write labels/IDs and values to table: # ---------------------------------------------------------------- # Write labels/IDs to table: output_table = os.path.join(os.getcwd(), table_names[itable]) if columns: df1 = pd.DataFrame({'ID': label_numbers}) df2 = pd.DataFrame(np.transpose(columns), columns=column_names) df = pd.concat([df1, df2], axis=1) if label_names: df0 = pd.DataFrame({'name': label_names}) df = pd.concat([df0, df], axis=1) df.to_csv(output_table, index=False, encoding='utf-8') if not os.path.exists(output_table): raise IOError(output_table + " not found") # ---------------------------------------------------------------- # Return correct table file name: # ---------------------------------------------------------------- if itable == 0: label_table = output_table elif itable == 1: sulcus_table = output_table elif itable == 2: fundus_table = output_table return label_table, sulcus_table, fundus_table
def compute_likelihood(trained_file, depth_file, curvature_file, folds, save_file=False): """ Compute likelihoods based on input values, folds, and estimated parameters. Compute likelihood values for a given VTK surface mesh file, after training on distributions of depth and curvature values from multiple files. Parameters ---------- trained_file : pickle compressed file contains the following dictionaries containing lists of floats (estimates of depth or curvature means, sigmas, and weights trained on fold vertices either on or off sulcus label borders) depth_border, curv_border, depth_nonborder, curv_nonborder depth_file : string VTK surface mesh file with depth values in [0,1] for all vertices curvature_file : string VTK surface mesh file with curvature values in [-1,1] for all vertices folds : list of integers fold number for all vertices (-1 for non-fold vertices) save_file : Boolean save output VTK file? Returns ------- likelihoods : list of floats likelihood values for all vertices (0 for non-fold vertices) likelihoods_file : string (if save_file) name of output VTK file with likelihood scalars (-1 for non-fold vertices) Examples -------- >>> import os >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars >>> from mindboggle.shapes.likelihood import compute_likelihood >>> from mindboggle.mio.plots import plot_surfaces >>> path = os.environ['MINDBOGGLE_DATA'] >>> trained_file = os.path.join(path, 'atlases', 'depth_curv_border_nonborder_parameters.pkl') >>> #depth_file = os.path.join(path, 'arno', 'shapes', 'travel_depth_rescaled.vtk') >>> depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk') >>> curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk') >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> folds, name = read_scalars(folds_file) >>> save_file = True >>> # >>> compute_likelihood(trained_file, depth_file, curvature_file, folds, save_file) >>> # View: >>> plot_surfaces('likelihoods.vtk', folds_file) """ import os import numpy as np from math import pi import cPickle as pickle from mindboggle.mio.vtks import read_scalars, rewrite_scalars # Initialize variables: tiny = 0.000000001 L = np.zeros(len(folds)) probs_border = np.zeros(len(folds)) probs_nonborder = np.zeros(len(folds)) # Load estimated depth and curvature distribution parameters: depth_border, curv_border, depth_nonborder, curv_nonborder = pickle.load( open(trained_file, "r")) # Load depths, curvatures: depths, name = read_scalars(depth_file, True, True) curvatures, name = read_scalars(curvature_file, True, True) # Prep for below: n = 2 twopiexp = (2 * pi)**(n / 2) border_sigmas = depth_border['sigmas'] * curv_border['sigmas'] nonborder_sigmas = depth_nonborder['sigmas'] * curv_nonborder['sigmas'] norm_border = 1 / (twopiexp * border_sigmas + tiny) norm_nonborder = 1 / (twopiexp * nonborder_sigmas + tiny) I = [i for i, x in enumerate(folds) if x != -1] N = depth_border['sigmas'].shape[0] for j in range(N): # Border: expB = depth_border['weights'][j] * \ ((depths[I]-depth_border['means'][j])**2) / \ depth_border['sigmas'][j]**2 expB += curv_border['weights'][j] * \ ((curvatures[I]-curv_border['means'][j])**2) / \ curv_border['sigmas'][j]**2 expB = -expB / 2 probs_border[I] = probs_border[I] + norm_border[j] * np.exp(expB) # Non-border: expNB = depth_nonborder['weights'][j] * \ ((depths[I]-depth_nonborder['means'][j])**2) / \ depth_nonborder['sigmas'][j]**2 expNB += curv_nonborder['weights'][j] * \ ((curvatures[I]-curv_nonborder['means'][j])**2) / \ curv_nonborder['sigmas'][j]**2 expNB = -expNB / 2 probs_nonborder[ I] = probs_nonborder[I] + norm_nonborder[j] * np.exp(expNB) likelihoods = probs_border / (probs_nonborder + probs_border + tiny) likelihoods = likelihoods.tolist() #------------------------------------------------------------------------- # Return likelihoods and output file name #------------------------------------------------------------------------- if save_file: likelihoods_file = os.path.join(os.getcwd(), 'likelihoods.vtk') rewrite_scalars(depth_file, likelihoods_file, likelihoods, 'likelihoods', likelihoods) if not os.path.exists(likelihoods_file): raise (IOError(likelihoods_file + " not found")) else: likelihoods_file = None return likelihoods, likelihoods_file
def concatenate_sulcus_scalars(scalar_files, fold_files, label_files): """ Prepare data for estimating scalar distributions along and outside fundi. Extract (e.g., depth, curvature) scalar values in folds, along sulcus label boundaries as well as outside the sulcus label boundaries. Concatenate these scalar values across multiple files. Parameters ---------- scalar_files : list of strings names of surface mesh VTK files with scalar values to concatenate fold_files : list of strings (corr. to each list in scalar_files) VTK files with fold numbers as scalars (-1 for non-fold vertices) label_files : list of strings (corr. to fold_files) VTK files with label numbers (-1 for unlabeled vertices) Returns ------- border_scalars : list of floats concatenated scalar values within folds along sulcus label boundaries nonborder_scalars : list of floats concatenated scalar values within folds outside sulcus label boundaries Examples -------- >>> # Concatenate (duplicate) depth scalars: >>> import os >>> from mindboggle.shapes.likelihood import concatenate_sulcus_scalars >>> path = os.environ['MINDBOGGLE_DATA'] >>> depth_file = os.path.join(path, 'arno', 'shapes', 'depth_rescaled.vtk') >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> labels_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk') >>> scalar_files = [depth_file, depth_file] >>> fold_files = [folds_file, folds_file] >>> label_files = [labels_file, labels_file] >>> # >>> S = concatenate_sulcus_scalars(scalar_files, fold_files, label_files) """ import numpy as np from mindboggle.mio.vtks import read_scalars from mindboggle.guts.mesh import find_neighbors_from_file from mindboggle.guts.segment import extract_borders from mindboggle.mio.labels import DKTprotocol dkt = DKTprotocol() # Prepare (non-unique) list of sulcus label pairs: protocol_label_pairs = [x for lst in dkt.sulcus_label_pair_lists for x in lst] border_scalars = [] nonborder_scalars = [] # Loop through files with the scalar values: for ifile, scalar_file in enumerate(scalar_files): print(scalar_file) # Load scalars, folds, and labels: folds_file = fold_files[ifile] labels_file = label_files[ifile] scalars, name = read_scalars(scalar_file, True, True) if scalars.shape: folds, name = read_scalars(folds_file) labels, name = read_scalars(labels_file) indices_folds = [i for i,x in enumerate(folds) if x != -1] neighbor_lists = find_neighbors_from_file(labels_file) # Find all label border pairs within the folds: indices_label_pairs, label_pairs, unique_pairs = extract_borders( indices_folds, labels, neighbor_lists, ignore_values=[-1], return_label_pairs=True) indices_label_pairs = np.array(indices_label_pairs) # Find vertices with label pairs in the sulcus labeling protocol: Ipairs_in_protocol = [i for i,x in enumerate(label_pairs) if x in protocol_label_pairs] indices_label_pairs = indices_label_pairs[Ipairs_in_protocol] indices_outside_pairs = list(frozenset(indices_folds).difference( indices_label_pairs)) # Store scalar values in folds along label border pairs: border_scalars.extend(scalars[indices_label_pairs].tolist()) # Store scalar values in folds outside label border pairs: nonborder_scalars.extend(scalars[indices_outside_pairs].tolist()) return border_scalars, nonborder_scalars
def compute_likelihood(trained_file, depth_file, curvature_file, folds, save_file=False, background_value=-1): """ Compute likelihoods based on input values, folds, estimated parameters. Compute likelihood values for a given VTK surface mesh file, after training on distributions of depth and curvature values from multiple files. Parameters ---------- trained_file : pickle compressed file depth_border, curv_border, depth_nonborder, and curv_nonborder are dictionaries containing lists of floats (estimates of depth or curvature means, sigmas, and weights trained on fold vertices either on or off sulcus label borders) depth_file : string VTK surface mesh file with depth values in [0,1] for all vertices curvature_file : string VTK surface mesh file with curvature values in [-1,1] for all vertices folds : list of integers fold number for all vertices (-1 for non-fold vertices) save_file : bool save output VTK file? background_value : integer or float background value Returns ------- likelihoods : list of floats likelihood values for all vertices (0 for non-fold vertices) likelihoods_file : string (if save_file) name of output VTK file with likelihood scalars (-1 for non-fold vertices) Notes ----- The depth_curv_border_nonborder_parameters.pkl file needs to be updated. Examples -------- >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.shapes.likelihood import compute_likelihood >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk') >>> curvature_file = fetch_data(urls['left_mean_curvature'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> trained_file = fetch_data(urls[ ... 'depth_curv_border_nonborder_parameters'], '', '.pkl') # doctest: +SKIP >>> folds, name = read_scalars(folds_file) >>> save_file = True >>> background_value = -1 >>> likelihoods, likelihoods_file = compute_likelihood(trained_file, ... depth_file, curvature_file, folds, save_file, background_value) # doctest: +SKIP View result (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> plot_surfaces('likelihoods.vtk', folds_file) # doctest: +SKIP """ import os import numpy as np from math import pi import pickle from io import open from mindboggle.mio.vtks import read_scalars, rewrite_scalars # Initialize variables: tiny = 0.000000001 L = np.zeros(len(folds)) probs_border = np.zeros(len(folds)) probs_nonborder = np.zeros(len(folds)) # Load estimated depth and curvature distribution parameters: depth_border, curv_border, depth_nonborder, \ curv_nonborder = pickle.load(open(trained_file, "rb")) # Load depths, curvatures: depths, name = read_scalars(depth_file, True, True) curvatures, name = read_scalars(curvature_file, True, True) # Prep for below: n = 2 twopiexp = (2*pi)**(n/2) border_sigmas = depth_border['sigmas'] * curv_border['sigmas'] nonborder_sigmas = depth_nonborder['sigmas'] * curv_nonborder['sigmas'] norm_border = 1 / (twopiexp * border_sigmas + tiny) norm_nonborder = 1 / (twopiexp * nonborder_sigmas + tiny) I = [i for i,x in enumerate(folds) if x != background_value] N = depth_border['sigmas'].shape[0] for j in range(N): # Border: expB = depth_border['weights'][j] * \ ((depths[I]-depth_border['means'][j])**2) / \ depth_border['sigmas'][j]**2 expB += curv_border['weights'][j] * \ ((curvatures[I]-curv_border['means'][j])**2) / \ curv_border['sigmas'][j]**2 expB = -expB / 2 probs_border[I] = probs_border[I] + norm_border[j] * np.exp(expB) # Non-border: expNB = depth_nonborder['weights'][j] * \ ((depths[I]-depth_nonborder['means'][j])**2) / \ depth_nonborder['sigmas'][j]**2 expNB += curv_nonborder['weights'][j] * \ ((curvatures[I]-curv_nonborder['means'][j])**2) / \ curv_nonborder['sigmas'][j]**2 expNB = -expNB / 2 probs_nonborder[I] = probs_nonborder[I] + \ norm_nonborder[j] * np.exp(expNB) likelihoods = probs_border / (probs_nonborder + probs_border + tiny) likelihoods = likelihoods.tolist() # ------------------------------------------------------------------------ # Return likelihoods and output file name # ------------------------------------------------------------------------ if save_file: likelihoods_file = os.path.join(os.getcwd(), 'likelihoods.vtk') rewrite_scalars(depth_file, likelihoods_file, likelihoods, 'likelihoods', likelihoods, background_value) if not os.path.exists(likelihoods_file): raise IOError(likelihoods_file + " not found") else: likelihoods_file = None return likelihoods, likelihoods_file
verbose = True curvatures_file = '{}_curvatures.csv'.format(stem) # Run the method default_mean_curv_file, _, _, _, _ = curvature( command, method, arguments, surface_file, verbose) # rename mean curvature output file os.rename(default_mean_curv_file, mean_curv_file) # remove unneeded output file os.remove(os.path.join(os.getcwd(), 'output.nipype')) # Get the curvatures from VTK files # [[x1, y1, z1], [x2, y2, z2], ...] points = np.array(read_points(mean_curv_file)) xyz = points.T # transposed: [[x1, x2, ...], [y1, y2, ...], [z1, z2, ...]] mean_curv, _ = read_scalars( mean_curv_file, return_first=True, return_array=True) print('number of points: {}'.format(mean_curv.size)) if method == 0 or method == 1: gauss_curv, _ = read_scalars( gauss_curv_file, return_first=True, return_array=True) if method == 0: max_curv, _ = read_scalars( max_curv_file, return_first=True, return_array=True) min_curv, _ = read_scalars( min_curv_file, return_first=True, return_array=True) curvedness = np.sqrt((max_curv ** 2 + min_curv ** 2) / 2) rewrite_scalars(surface_file, curvedness_file, curvedness, "curvedness") # Write the curvatures to a CSV file df = pd.DataFrame() df['x'] = xyz[0]
def extract_sulci( labels_file, folds_or_file, hemi, min_boundary=1, sulcus_names=[], save_file=False, output_file="", background_value=-1, verbose=False, ): """ Identify sulci from folds in a brain surface according to a labeling protocol that includes a list of label pairs defining each sulcus. Since folds are defined as deep, connected areas of a surface, and since folds may be connected to each other in ways that differ across brains, there usually does not exist a one-to-one mapping between folds of one brain and those of another. To address the correspondence problem then, we need to find just those portions of the folds that correspond across brains. To accomplish this, Mindboggle segments folds into sulci, which do have a one-to-one correspondence across non-pathological brains. Mindboggle defines a sulcus as a folded portion of cortex whose opposing banks are labeled with one or more sulcus label pairs in the DKT labeling protocol, where each label pair is unique to one sulcus and represents a boundary between two adjacent gyri, and each vertex has one gyrus label. This function assigns vertices in a fold to a sulcus in one of two cases. In the first case, vertices whose labels are in only one label pair in the fold are assigned to the label pair’s sulcus if they are connected through similarly labeled vertices to the boundary between the two labels. In the second case, the segment_regions function propagates labels from label borders to vertices whose labels are in multiple label pairs in the fold. Steps for each fold :: 1. Remove fold if it has fewer than two labels. 2. Remove fold if its labels do not contain a sulcus label pair. 3. Find vertices with labels that are in only one of the fold's label boundary pairs. Assign the vertices the sulcus with the label pair if they are connected to the label boundary for that pair. 4. If there are remaining vertices, segment into sets of vertices connected to label boundaries, and assign a unique ID to each set. Parameters ---------- labels_file : string file name for surface mesh VTK containing labels for all vertices folds_or_file : numpy array, list or string fold number for each vertex / name of VTK file containing fold scalars hemi : string hemisphere abbreviation in {'lh', 'rh'} for sulcus labels min_boundary : integer minimum number of vertices for a sulcus label boundary segment sulcus_names : list of strings names of sulci save_file : bool save output VTK file? output_file : string name of output file in VTK format background_value : integer or float background value verbose : bool print statements? Returns ------- sulci : list of integers sulcus numbers for all vertices (-1 for non-sulcus vertices) n_sulci : integers number of sulci sulci_file : string output VTK file with sulcus numbers (-1 for non-sulcus vertices) Examples -------- >>> # Example 1: Extract sulcus from a fold with one sulcus label pair: >>> import numpy as np >>> from mindboggle.features.sulci import extract_sulci >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> # Load labels, folds, neighbor lists, and sulcus names and label pairs >>> labels_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> folds_or_file, name = read_scalars(folds_file, True, True) >>> save_file = True >>> output_file = 'extract_sulci_fold4_1sulcus.vtk' >>> background_value = -1 >>> # Limit number of folds to speed up the test: >>> limit_folds = True >>> if limit_folds: ... fold_numbers = [4] #[4, 6] ... i0 = [i for i,x in enumerate(folds_or_file) if x not in fold_numbers] ... folds_or_file[i0] = background_value >>> hemi = 'lh' >>> min_boundary = 10 >>> sulcus_names = [] >>> verbose = False >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file, ... hemi, min_boundary, sulcus_names, save_file, output_file, ... background_value, verbose) >>> n_sulci # 23 # (if not limit_folds) 1 >>> lens = [len([x for x in sulci if x==y]) ... for y in np.unique(sulci) if y != -1] >>> lens[0:10] # [6358, 3288, 7612, 5205, 4414, 6251, 3493, 2566, 4436, 739] # (if not limit_folds) [1151] View result without background (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP >>> output = 'extract_sulci_fold4_1sulcus_no_background.vtk' >>> rewrite_scalars(sulci_file, output, sulci, ... 'sulci', sulci) # doctest: +SKIP >>> plot_surfaces(output) # doctest: +SKIP Example 2: Extract sulcus from a fold with multiple sulcus label pairs: >>> folds_or_file, name = read_scalars(folds_file, True, True) >>> output_file = 'extract_sulci_fold7_2sulci.vtk' >>> # Limit number of folds to speed up the test: >>> limit_folds = True >>> if limit_folds: ... fold_numbers = [7] #[4, 6] ... i0 = [i for i,x in enumerate(folds_or_file) if x not in fold_numbers] ... folds_or_file[i0] = background_value >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file, ... hemi, min_boundary, sulcus_names, save_file, output_file, ... background_value, verbose) >>> n_sulci # 23 # (if not limit_folds) 2 >>> lens = [len([x for x in sulci if x==y]) ... for y in np.unique(sulci) if y != -1] >>> lens[0:10] # [6358, 3288, 7612, 5205, 4414, 6251, 3493, 2566, 4436, 739] # (if not limit_folds) [369, 93] View result without background (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP >>> output = 'extract_sulci_fold7_2sulci_no_background.vtk' >>> rewrite_scalars(sulci_file, output, sulci, ... 'sulci', sulci) # doctest: +SKIP >>> plot_surfaces(output) # doctest: +SKIP """ import os from time import time import numpy as np from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars from mindboggle.guts.mesh import find_neighbors from mindboggle.guts.segment import extract_borders, propagate, segment_regions from mindboggle.mio.labels import DKTprotocol # Load fold numbers if folds_or_file is a string: if isinstance(folds_or_file, str): folds, name = read_scalars(folds_or_file) elif isinstance(folds_or_file, list): folds = folds_or_file elif isinstance(folds_or_file, np.ndarray): folds = folds_or_file.tolist() dkt = DKTprotocol() if hemi == "lh": pair_lists = dkt.left_sulcus_label_pair_lists elif hemi == "rh": pair_lists = dkt.right_sulcus_label_pair_lists else: raise IOError("Warning: hemisphere not properly specified ('lh' or 'rh').") # Load points, faces, and neighbors: points, indices, lines, faces, labels, scalar_names, npoints, input_vtk = read_vtk(labels_file) neighbor_lists = find_neighbors(faces, npoints) # Array of sulcus IDs for fold vertices, initialized as -1. # Since we do not touch gyral vertices and vertices whose labels # are not in the label list, or vertices having only one label, # their sulcus IDs will remain -1: sulci = background_value * np.ones(npoints) # ------------------------------------------------------------------------ # Loop through folds # ------------------------------------------------------------------------ fold_numbers = [int(x) for x in np.unique(folds) if x != background_value] n_folds = len(fold_numbers) if verbose: print("Extract sulci from {0} folds...".format(n_folds)) t0 = time() for n_fold in fold_numbers: fold_indices = [i for i, x in enumerate(folds) if x == n_fold] len_fold = len(fold_indices) # List the labels in this fold: fold_labels = [labels[x] for x in fold_indices] unique_fold_labels = [int(x) for x in np.unique(fold_labels) if x != background_value] # -------------------------------------------------------------------- # NO MATCH -- fold has fewer than two labels # -------------------------------------------------------------------- if verbose and len(unique_fold_labels) < 2: # Ignore: sulci already initialized with -1 values: if not unique_fold_labels: print(" Fold {0} ({1} vertices): " "NO MATCH -- fold has no labels".format(n_fold, len_fold)) else: print( " Fold {0} ({1} vertices): " "NO MATCH -- fold has only one label ({2})".format(n_fold, len_fold, unique_fold_labels[0]) ) # Ignore: sulci already initialized with -1 values else: # Find all label boundary pairs within the fold: indices_fold_pairs, fold_pairs, unique_fold_pairs = extract_borders( fold_indices, labels, neighbor_lists, ignore_values=[], return_label_pairs=True ) # Find fold label pairs in the protocol (pairs are already sorted): fold_pairs_in_protocol = [x for x in unique_fold_pairs if x in dkt.unique_sulcus_label_pairs] if verbose and unique_fold_labels: print( " Fold {0} labels: {1} ({2} vertices)".format( n_fold, ", ".join([str(x) for x in unique_fold_labels]), len_fold ) ) # ---------------------------------------------------------------- # NO MATCH -- fold has no sulcus label pair # ---------------------------------------------------------------- if verbose and not fold_pairs_in_protocol: print(" Fold {0}: NO MATCH -- fold has no sulcus label pair".format(n_fold, len_fold)) # ---------------------------------------------------------------- # Possible matches # ---------------------------------------------------------------- else: if verbose: print( " Fold {0} label pairs in protocol: {1}".format( n_fold, ", ".join([str(x) for x in fold_pairs_in_protocol]) ) ) # Labels in the protocol (includes repeats across label pairs): labels_in_pairs = [x for lst in fold_pairs_in_protocol for x in lst] # Labels that appear in one or more sulcus label boundary: unique_labels = [] nonunique_labels = [] for label in np.unique(labels_in_pairs): if len([x for x in labels_in_pairs if x == label]) == 1: unique_labels.append(label) else: nonunique_labels.append(label) # ------------------------------------------------------------ # Vertices whose labels are in only one sulcus label pair # ------------------------------------------------------------ # Find vertices with a label that is in only one of the fold's # label pairs (the other label in the pair can exist in other # pairs). Assign the vertices the sulcus with the label pair # if they are connected to the label boundary for that pair. # ------------------------------------------------------------ if unique_labels: for pair in fold_pairs_in_protocol: # If one or both labels in label pair is/are unique: unique_labels_in_pair = [x for x in pair if x in unique_labels] n_unique = len(unique_labels_in_pair) if n_unique: ID = None for i, pair_list in enumerate(pair_lists): if not isinstance(pair_list, list): pair_list = [pair_list] if pair in pair_list: ID = i break if ID: # Seeds from label boundary vertices # (fold_pairs and pair already sorted): indices_pair = [x for i, x in enumerate(indices_fold_pairs) if fold_pairs[i] == pair] # Vertices with unique label(s) in pair: indices_unique_labels = [ fold_indices[i] for i, x in enumerate(fold_labels) if x in unique_labels_in_pair ] # dkt.unique_sulcus_label_pairs] # Propagate sulcus ID from seeds to vertices # with "unique" labels (only exist in one # label pair in a fold); propagation ensures # that sulci consist of contiguous vertices # for each label boundary: sulci2 = segment_regions( indices_unique_labels, neighbor_lists, min_region_size=1, seed_lists=[indices_pair], keep_seeding=False, spread_within_labels=True, labels=labels, label_lists=[], values=[], max_steps="", background_value=background_value, verbose=False, ) sulci[sulci2 != background_value] = ID # Print statement: if verbose: if n_unique == 1: ps1 = "One label" else: ps1 = "Both labels" if len(sulcus_names): ps2 = sulcus_names[ID] else: ps2 = "" print( " {0} unique to one fold pair: " "{1} {2}".format(ps1, ps2, unique_labels_in_pair) ) # ------------------------------------------------------------ # Vertex labels shared by multiple label pairs # ------------------------------------------------------------ # Propagate labels from label borders to vertices with labels # that are shared by multiple label pairs in the fold. # ------------------------------------------------------------ if len(nonunique_labels): # For each label shared by different label pairs: for label in nonunique_labels: # Print statement: if verbose: print(" Propagate sulcus borders with label {0}".format(int(label))) # Construct seeds from label boundary vertices: seeds = background_value * np.ones(npoints) for ID, pair_list in enumerate(pair_lists): if not isinstance(pair_list, list): pair_list = [pair_list] label_pairs = [x for x in pair_list if label in x] for label_pair in label_pairs: indices_pair = [ x for i, x in enumerate(indices_fold_pairs) if np.sort(fold_pairs[i]).tolist() == label_pair ] if indices_pair: # Do not include short boundary segments: if min_boundary > 1: indices_pair2 = [] seeds2 = segment_regions( indices_pair, neighbor_lists, 1, [], False, False, [], [], [], "", background_value, verbose, ) useeds2 = [x for x in np.unique(seeds2) if x != background_value] for seed2 in useeds2: iseed2 = [i for i, x in enumerate(seeds2) if x == seed2] if len(iseed2) >= min_boundary: indices_pair2.extend(iseed2) elif verbose: if len(iseed2) == 1: print( " Remove " "assignment " "of ID {0} from " "1 vertex".format(seed2) ) else: print( " Remove " "assignment " "of ID {0} from " "{1} vertices".format(seed2, len(iseed2)) ) indices_pair = indices_pair2 # Assign sulcus IDs to seeds: seeds[indices_pair] = ID # Identify vertices with the label: indices_label = [fold_indices[i] for i, x in enumerate(fold_labels) if x == label] if len(indices_label): # Propagate sulcus ID from seeds to vertices # with a given shared label: seg_vs_prop = False if seg_vs_prop: indices_seeds = [] for seed in [x for x in np.unique(seeds) if x != background_value]: indices_seeds.append([i for i, x in enumerate(seeds) if x == seed]) sulci2 = segment_regions( indices_label, neighbor_lists, 50, indices_seeds, False, True, labels, [], [], "", background_value, verbose, ) else: label_array = background_value * np.ones(npoints) label_array[indices_label] = 1 sulci2 = propagate( points, faces, label_array, seeds, sulci, max_iters=10000, tol=0.001, sigma=5, background_value=background_value, verbose=verbose, ) sulci[sulci2 != background_value] = sulci2[sulci2 != background_value] sulcus_numbers = [int(x) for x in np.unique(sulci) if x != background_value] n_sulci = len(sulcus_numbers) # ------------------------------------------------------------------------ # Print statements # ------------------------------------------------------------------------ if verbose: if n_sulci == 1: sulcus_str = "sulcus" else: sulcus_str = "sulci" if n_folds == 1: folds_str = "fold" else: folds_str = "folds" print("Extracted {0} {1} from {2} {3} ({4:.1f}s):".format(n_sulci, sulcus_str, n_folds, folds_str, time() - t0)) if sulcus_names: for sulcus_number in sulcus_numbers: print(" {0}: {1}".format(sulcus_number, sulcus_names[sulcus_number])) elif sulcus_numbers: print(" " + ", ".join([str(x) for x in sulcus_numbers])) unresolved = [i for i in range(len(pair_lists)) if i not in sulcus_numbers] if len(unresolved) == 1: print("The following sulcus is unaccounted for:") else: print("The following {0} sulci are unaccounted for:".format(len(unresolved))) if sulcus_names: for sulcus_number in unresolved: print(" {0}: {1}".format(sulcus_number, sulcus_names[sulcus_number])) else: print(" " + ", ".join([str(x) for x in unresolved])) # ------------------------------------------------------------------------ # Return sulci, number of sulci, and file name # ------------------------------------------------------------------------ sulci = [int(x) for x in sulci] sulci_file = os.path.join(os.getcwd(), "sulci.vtk") rewrite_scalars(labels_file, sulci_file, sulci, "sulci", [], background_value) if not os.path.exists(sulci_file): raise IOError(sulci_file + " not found") return sulci, n_sulci, sulci_file
def realign_boundaries_to_fundus_lines( surf_file, init_label_file, fundus_lines_file, thickness_file, out_label_file=None): """ Fix label boundaries to fundus lines. Parameters ---------- surf_file : file containing the surface geometry in vtk format init_label_file : file containing scalars that represent the initial guess at labels fundus_lines_file : file containing scalars representing fundus lines. thickness_file: file containing cortical thickness scalar data (for masking out the medial wall only) out_label_file : if specified, the realigned labels will be writen to this file Returns ------- numpy array representing the realigned label for each surface vertex. """ import numpy as np from mindboggle.guts.segment import extract_borders import mindboggle.guts.graph as go from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk from mindboggle.guts.mesh import find_neighbors import propagate_fundus_lines ## read files faces, _, indices, points, num_points, _, _, _ = read_vtk( surf_file, return_first=True, return_array=True) indices = range(num_points) init_labels, _ = read_scalars(init_label_file, return_first=True, return_array=True) fundus_lines, _ = read_scalars(fundus_lines_file, return_first=True, return_array=True) thickness, _ = read_scalars(thickness_file, return_first=True, return_array=True) # remove labels from vertices with zero thickness (get around # DKT40 annotations having the label '3' for all the Corpus # Callosum vertices). cc_inds = [x for x in indices if thickness[x] < 0.001] init_labels[cc_inds] = 0 ## setup seeds from initial label boundaries neighbor_lists = find_neighbors(faces, num_points) # extract all vertices that are on a boundary between labels boundary_indices, label_pairs, _ = extract_borders( indices, init_labels, neighbor_lists, return_label_pairs=True) # split boundary vertices into segments with common boundary pairs. boundary_segments = {} for boundary_index, label_pair in zip(boundary_indices, label_pairs): key = ((label_pair[0], label_pair[1]) if label_pair[0] < label_pair[1] else (label_pair[1], label_pair[0])) if key not in boundary_segments: boundary_segments[key] = [] boundary_segments[key].append(boundary_index) boundary_matrix, boundary_matrix_keys = _build_boundary_matrix( boundary_segments, num_points) # build the affinity matrix affinity_matrix = go.weight_graph( np.array(points), indices, np.array(faces), sigma=10, add_to_graph=False) ## propagate boundaries to fundus line vertices learned_matrix = _propagate_labels( affinity_matrix, boundary_matrix, boundary_indices, 100, 1) # assign labels to fundus line vertices based on highest probability new_boundaries = -1 * np.ones(init_labels.shape) fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5] # tile the surface into connected components delimited by fundus lines closed_fundus_lines, _, _ = propagate_fundus_lines.propagate_fundus_lines( points, faces, fundus_line_indices, thickness) closed_fundus_line_indices = np.where(closed_fundus_lines > 0)[0] # split surface into connected components connected_component_faces = _remove_boundary_faces( points, faces, closed_fundus_line_indices) # label components based on most probable label assignment new_labels = _label_components( connected_component_faces, num_points, boundary_indices, learned_matrix, boundary_matrix_keys) # propagate new labels to fill holes label_matrix, label_map = _build_label_matrix(new_labels) new_learned_matrix = _propagate_labels( affinity_matrix, label_matrix, [i for i in range(num_points) if new_labels[i] >= 0], 100, 1) # assign most probable labels for idx in [i for i in range(num_points) if new_labels[i] == -1]: max_idx = np.argmax(new_learned_matrix[idx]) new_labels[idx] = label_map[max_idx] # save if out_label_file is not None: write_vtk(out_label_file, points, faces=faces, scalars=[int(x) for x in new_labels], scalar_type='int') return new_labels
def spectrum_per_label(vtk_file, spectrum_size=10, exclude_labels=[-1], normalization='area', area_file='', largest_segment=True): """ Compute Laplace-Beltrami spectrum per labeled region in a file. Parameters ---------- vtk_file : string name of VTK surface mesh file containing index scalars (labels) spectrum_size : integer number of eigenvalues to be computed (the length of the spectrum) exclude_labels : list of integers labels to be excluded normalization : string the method used to normalize eigenvalues ('area' or None) if "area", use area of the 2D structure as in Reuter et al. 2006 area_file : string (optional) name of VTK file with surface area scalar values largest_segment : Boolean compute spectrum only for largest segment with a given label? Returns ------- spectrum_lists : list of lists first eigenvalues for each label's Laplace-Beltrami spectrum label_list : list of integers list of unique labels for which spectra are obtained Examples -------- >>> # Uncomment "if label==22:" below to run example: >>> # Spectrum for Twins-2-1 left postcentral (22) pial surface: >>> import os >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label >>> path = os.environ['MINDBOGGLE_DATA'] >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk') >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk') >>> spectrum_size = 6 >>> exclude_labels = [0] #[-1] >>> largest_segment = True >>> spectrum_per_label(vtk_file, spectrum_size, exclude_labels, None, >>> area_file, largest_segment) ([[6.3469513010430304e-18, 0.0005178862383467463, 0.0017434911095630772, 0.003667561767487686, 0.005429017880363784, 0.006309346984678924]], [22]) """ from mindboggle.mio.vtks import read_vtk, read_scalars from mindboggle.guts.mesh import remove_faces, reindex_faces_points from mindboggle.shapes.laplace_beltrami import fem_laplacian,\ spectrum_of_largest # Read VTK surface mesh file: faces, u1, u2, points, u4, labels, u5, u6 = read_vtk(vtk_file) # Area file: if area_file: areas, u1 = read_scalars(area_file) else: areas = None # Loop through labeled regions: ulabels = [] [ ulabels.append(int(x)) for x in labels if x not in ulabels if x not in exclude_labels ] label_list = [] spectrum_lists = [] for label in ulabels: #if label == 22: # print("DEBUG: COMPUTE FOR ONLY ONE LABEL") # Determine the indices per label: Ilabel = [i for i, x in enumerate(labels) if x == label] print('{0} vertices for label {1}'.format(len(Ilabel), label)) # Remove background faces: pick_faces = remove_faces(faces, Ilabel) pick_faces, pick_points, o1 = reindex_faces_points(pick_faces, points) # Compute Laplace-Beltrami spectrum for the label: if largest_segment: exclude_labels_inner = [-1] spectrum = spectrum_of_largest(pick_points, pick_faces, spectrum_size, exclude_labels_inner, normalization, areas) else: spectrum = fem_laplacian(pick_points, pick_faces, spectrum_size, normalization) # Append to a list of lists of spectra: spectrum_lists.append(spectrum) label_list.append(label) return spectrum_lists, label_list
def spectrum_per_label(vtk_file, spectrum_size=10, exclude_labels=[-1], normalization='area', area_file='', largest_segment=True, verbose=False): """ Compute Laplace-Beltrami spectrum per labeled region in a file. Parameters ---------- vtk_file : string name of VTK surface mesh file containing index scalars (labels) spectrum_size : integer number of eigenvalues to be computed (the length of the spectrum) exclude_labels : list of integers labels to be excluded normalization : string the method used to normalize eigenvalues ('area' or None) if "area", use area of the 2D structure as in Reuter et al. 2006 area_file : string (optional) name of VTK file with surface area scalar values largest_segment : bool compute spectrum only for largest segment with a given label? verbose : bool print statements? Returns ------- spectrum_lists : list of lists first eigenvalues for each label's Laplace-Beltrami spectrum label_list : list of integers list of unique labels for which spectra are obtained Examples -------- >>> # Uncomment "if label==22:" below to run example: >>> # Spectrum for Twins-2-1 left postcentral (22) pial surface: >>> import numpy as np >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> area_file = fetch_data(urls['left_area'], '', '.vtk') >>> spectrum_size = 6 >>> exclude_labels = [0] #[-1] >>> largest_segment = True >>> verbose = False >>> spectrum_lists, label_list = spectrum_per_label(vtk_file, ... spectrum_size, exclude_labels, None, area_file, largest_segment, ... verbose) >>> print(np.array_str(np.array(spectrum_lists[0][1::]), ... precision=5, suppress_small=True)) [ 0.00054 0.00244 0.00291 0.00456 0.00575] >>> label_list[0:10] [1029, 1005, 1011, 1021, 1008, 1025, 999, 1013, 1007, 1022] """ from mindboggle.mio.vtks import read_vtk, read_scalars from mindboggle.guts.mesh import keep_faces, reindex_faces_points from mindboggle.shapes.laplace_beltrami import fem_laplacian,\ spectrum_of_largest # Read VTK surface mesh file: points, indices, lines, faces, labels, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file) # Area file: if area_file: areas, u1 = read_scalars(area_file) else: areas = None # Loop through labeled regions: ulabels = [] [ulabels.append(int(x)) for x in labels if x not in ulabels if x not in exclude_labels] label_list = [] spectrum_lists = [] for label in ulabels: #if label == 22: # print("DEBUG: COMPUTE FOR ONLY ONE LABEL") # Determine the indices per label: Ilabel = [i for i,x in enumerate(labels) if x == label] if verbose: print('{0} vertices for label {1}'.format(len(Ilabel), label)) # Remove background faces: pick_faces = keep_faces(faces, Ilabel) pick_faces, pick_points, o1 = reindex_faces_points(pick_faces, points) # Compute Laplace-Beltrami spectrum for the label: if largest_segment: exclude_labels_inner = [-1] spectrum = spectrum_of_largest(pick_points, pick_faces, spectrum_size, exclude_labels_inner, normalization, areas, verbose) else: spectrum = fem_laplacian(pick_points, pick_faces, spectrum_size, normalization, verbose) # Append to a list of lists of spectra: spectrum_lists.append(spectrum) label_list.append(label) return spectrum_lists, label_list
def compute_likelihood(trained_file, depth_file, curvature_file, folds, save_file=False): """ Compute likelihoods based on input values, folds, and estimated parameters. Compute likelihood values for a given VTK surface mesh file, after training on distributions of depth and curvature values from multiple files. Parameters ---------- trained_file : pickle compressed file contains the following dictionaries containing lists of floats (estimates of depth or curvature means, sigmas, and weights trained on fold vertices either on or off sulcus label borders) depth_border, curv_border, depth_nonborder, curv_nonborder depth_file : string VTK surface mesh file with depth values in [0,1] for all vertices curvature_file : string VTK surface mesh file with curvature values in [-1,1] for all vertices folds : list of integers fold number for all vertices (-1 for non-fold vertices) save_file : Boolean save output VTK file? Returns ------- likelihoods : list of floats likelihood values for all vertices (0 for non-fold vertices) likelihoods_file : string (if save_file) name of output VTK file with likelihood scalars (-1 for non-fold vertices) Examples -------- >>> import os >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars >>> from mindboggle.shapes.likelihood import compute_likelihood >>> from mindboggle.mio.plots import plot_surfaces >>> path = os.environ['MINDBOGGLE_DATA'] >>> trained_file = os.path.join(path, 'atlases', 'depth_curv_border_nonborder_parameters.pkl') >>> #depth_file = os.path.join(path, 'arno', 'shapes', 'travel_depth_rescaled.vtk') >>> depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk') >>> curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk') >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> folds, name = read_scalars(folds_file) >>> save_file = True >>> # >>> compute_likelihood(trained_file, depth_file, curvature_file, folds, save_file) >>> # View: >>> plot_surfaces('likelihoods.vtk', folds_file) """ import os import numpy as np from math import pi import cPickle as pickle from mindboggle.mio.vtks import read_scalars, rewrite_scalars # Initialize variables: tiny = 0.000000001 L = np.zeros(len(folds)) probs_border = np.zeros(len(folds)) probs_nonborder = np.zeros(len(folds)) # Load estimated depth and curvature distribution parameters: depth_border, curv_border, depth_nonborder, curv_nonborder = pickle.load( open(trained_file, "r")) # Load depths, curvatures: depths, name = read_scalars(depth_file, True, True) curvatures, name = read_scalars(curvature_file, True, True) # Prep for below: n = 2 twopiexp = (2*pi)**(n/2) border_sigmas = depth_border['sigmas'] * curv_border['sigmas'] nonborder_sigmas = depth_nonborder['sigmas'] * curv_nonborder['sigmas'] norm_border = 1 / (twopiexp * border_sigmas + tiny) norm_nonborder = 1 / (twopiexp * nonborder_sigmas + tiny) I = [i for i,x in enumerate(folds) if x != -1] N = depth_border['sigmas'].shape[0] for j in range(N): # Border: expB = depth_border['weights'][j] * \ ((depths[I]-depth_border['means'][j])**2) / \ depth_border['sigmas'][j]**2 expB += curv_border['weights'][j] * \ ((curvatures[I]-curv_border['means'][j])**2) / \ curv_border['sigmas'][j]**2 expB = -expB / 2 probs_border[I] = probs_border[I] + norm_border[j] * np.exp(expB) # Non-border: expNB = depth_nonborder['weights'][j] * \ ((depths[I]-depth_nonborder['means'][j])**2) / \ depth_nonborder['sigmas'][j]**2 expNB += curv_nonborder['weights'][j] * \ ((curvatures[I]-curv_nonborder['means'][j])**2) / \ curv_nonborder['sigmas'][j]**2 expNB = -expNB / 2 probs_nonborder[I] = probs_nonborder[I] + norm_nonborder[j] * np.exp(expNB) likelihoods = probs_border / (probs_nonborder + probs_border + tiny) likelihoods = likelihoods.tolist() #------------------------------------------------------------------------- # Return likelihoods and output file name #------------------------------------------------------------------------- if save_file: likelihoods_file = os.path.join(os.getcwd(), 'likelihoods.vtk') rewrite_scalars(depth_file, likelihoods_file, likelihoods, 'likelihoods', likelihoods) if not os.path.exists(likelihoods_file): raise(IOError(likelihoods_file + " not found")) else: likelihoods_file = None return likelihoods, likelihoods_file
def spectrum_per_label(vtk_file, spectrum_size=10, exclude_labels=[-1], normalization='area', area_file='', largest_segment=True): """ Compute Laplace-Beltrami spectrum per labeled region in a file. Parameters ---------- vtk_file : string name of VTK surface mesh file containing index scalars (labels) spectrum_size : integer number of eigenvalues to be computed (the length of the spectrum) exclude_labels : list of integers labels to be excluded normalization : string the method used to normalize eigenvalues ('area' or None) if "area", use area of the 2D structure as in Reuter et al. 2006 area_file : string (optional) name of VTK file with surface area scalar values largest_segment : Boolean compute spectrum only for largest segment with a given label? Returns ------- spectrum_lists : list of lists first eigenvalues for each label's Laplace-Beltrami spectrum label_list : list of integers list of unique labels for which spectra are obtained Examples -------- >>> # Uncomment "if label==22:" below to run example: >>> # Spectrum for Twins-2-1 left postcentral (22) pial surface: >>> import os >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label >>> path = os.environ['MINDBOGGLE_DATA'] >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk') >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk') >>> spectrum_size = 6 >>> exclude_labels = [0] #[-1] >>> largest_segment = True >>> spectrum_per_label(vtk_file, spectrum_size, exclude_labels, None, >>> area_file, largest_segment) ([[6.3469513010430304e-18, 0.0005178862383467463, 0.0017434911095630772, 0.003667561767487686, 0.005429017880363784, 0.006309346984678924]], [22]) """ from mindboggle.mio.vtks import read_vtk, read_scalars from mindboggle.guts.mesh import remove_faces, reindex_faces_points from mindboggle.shapes.laplace_beltrami import fem_laplacian,\ spectrum_of_largest # Read VTK surface mesh file: faces, u1, u2, points, u4, labels, u5, u6 = read_vtk(vtk_file) # Area file: if area_file: areas, u1 = read_scalars(area_file) else: areas = None # Loop through labeled regions: ulabels = [] [ulabels.append(int(x)) for x in labels if x not in ulabels if x not in exclude_labels] label_list = [] spectrum_lists = [] for label in ulabels: #if label == 22: # print("DEBUG: COMPUTE FOR ONLY ONE LABEL") # Determine the indices per label: Ilabel = [i for i,x in enumerate(labels) if x == label] print('{0} vertices for label {1}'.format(len(Ilabel), label)) # Remove background faces: pick_faces = remove_faces(faces, Ilabel) pick_faces, pick_points, o1 = reindex_faces_points(pick_faces, points) # Compute Laplace-Beltrami spectrum for the label: if largest_segment: exclude_labels_inner = [-1] spectrum = spectrum_of_largest(pick_points, pick_faces, spectrum_size, exclude_labels_inner, normalization, areas) else: spectrum = fem_laplacian(pick_points, pick_faces, spectrum_size, normalization) # Append to a list of lists of spectra: spectrum_lists.append(spectrum) label_list.append(label) return spectrum_lists, label_list
def extract_fundi(folds, curv_file, depth_file, min_separation=10, erode_ratio=0.1, erode_min_size=1, save_file=False): """ Extract fundi from folds. A fundus is a branching curve that runs along the deepest and most highly curved portions of a fold. Steps :: 1. Find fundus endpoints (outer anchors) with find_outer_anchors(). 2. Include inner anchor points. 3. Connect anchor points using connect_points_erosion(); inner anchors are removed if they result in endpoints. Parameters ---------- folds : numpy array or list of integers fold number for each vertex curv_file : string surface mesh file in VTK format with mean curvature values depth_file : string surface mesh file in VTK format with rescaled depth values likelihoods : list of integers fundus likelihood value for each vertex min_separation : integer minimum number of edges between inner/outer anchor points erode_ratio : float fraction of indices to test for removal at each iteration in connect_points_erosion() save_file : Boolean save output VTK file? Returns ------- fundus_per_fold : list of integers fundus numbers for all vertices, labeled by fold (-1 for non-fundus vertices) n_fundi_in_folds : integer number of fundi fundus_per_fold_file : string (if save_file) output VTK file with fundus numbers (-1 for non-fundus vertices) Examples -------- >>> # Extract fundus from one or more folds: >>> single_fold = True >>> import os >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.features.fundi import extract_fundi >>> from mindboggle.mio.plots import plot_surfaces >>> path = os.environ['MINDBOGGLE_DATA'] >>> curv_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk') >>> depth_file = os.path.join(path, 'arno', 'shapes', 'travel_depth_rescaled.vtk') >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> folds, name = read_scalars(folds_file, True, True) >>> if single_fold: >>> fold_number = 2 #11 >>> folds[folds != fold_number] = -1 >>> min_separation = 10 >>> erode_ratio = 0.10 >>> erode_min_size = 10 >>> save_file = True >>> o1, o2, fundus_per_fold_file = extract_fundi(folds, curv_file, ... depth_file, min_separation, erode_ratio, erode_min_size, save_file) >>> # >>> # View: >>> plot_surfaces(fundi_file) """ # Extract a skeleton to connect endpoints in a fold: import os import numpy as np from time import time from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars from mindboggle.guts.compute import median_abs_dev from mindboggle.guts.paths import find_max_values from mindboggle.guts.mesh import find_neighbors_from_file, find_complete_faces from mindboggle.guts.paths import find_outer_anchors, connect_points_erosion if isinstance(folds, list): folds = np.array(folds) # Load values, inner anchor threshold, and neighbors: points, indices, lines, faces, curvs, scalar_names, npoints, \ input_vtk = read_vtk(curv_file, True, True) depths, name = read_scalars(depth_file, True, True) values = curvs * depths values0 = [x for x in values if x > 0] thr = np.median(values0) + 2 * median_abs_dev(values0) neighbor_lists = find_neighbors_from_file(curv_file) #------------------------------------------------------------------------- # Loop through folds: #------------------------------------------------------------------------- t1 = time() skeletons = [] unique_fold_IDs = [x for x in np.unique(folds) if x != -1] if len(unique_fold_IDs) == 1: print("Extract a fundus from 1 fold...") else: print("Extract a fundus from each of {0} folds...". format(len(unique_fold_IDs))) for fold_ID in unique_fold_IDs: indices_fold = [i for i,x in enumerate(folds) if x == fold_ID] if indices_fold: print(' Fold {0}:'.format(int(fold_ID))) #----------------------------------------------------------------- # Find outer anchor points on the boundary of the surface region, # to serve as fundus endpoints: #----------------------------------------------------------------- outer_anchors, tracks = find_outer_anchors(indices_fold, neighbor_lists, values, depths, min_separation) #----------------------------------------------------------------- # Find inner anchor points: #----------------------------------------------------------------- inner_anchors = find_max_values(points, values, min_separation, thr) #----------------------------------------------------------------- # Connect anchor points to create skeleton: #----------------------------------------------------------------- B = -1 * np.ones(npoints) B[indices_fold] = 1 skeleton = connect_points_erosion(B, neighbor_lists, outer_anchors, inner_anchors, values, erode_ratio, erode_min_size, save_steps=[], save_vtk='') if skeleton: skeletons.extend(skeleton) #----------------------------------------------------------------- # Remove fundus vertices if they complete triangle faces: #----------------------------------------------------------------- Iremove = find_complete_faces(skeletons, faces) if Iremove: skeletons = list(frozenset(skeletons).difference(Iremove)) indices_skel = [x for x in skeletons if folds[x] != -1] fundus_per_fold = -1 * np.ones(npoints) fundus_per_fold[indices_skel] = folds[indices_skel] n_fundi_in_folds = len([x for x in np.unique(fundus_per_fold) if x != -1]) if n_fundi_in_folds == 1: sdum = 'fold fundus' else: sdum = 'fold fundi' print(' ...Extracted {0} {1}; {2} total ({3:.2f} seconds)'. format(n_fundi_in_folds, sdum, n_fundi_in_folds, time() - t1)) #------------------------------------------------------------------------- # Return fundi, number of fundi, and file name: #------------------------------------------------------------------------- if n_fundi_in_folds > 0: fundus_per_fold = [int(x) for x in fundus_per_fold] if save_file: fundus_per_fold_file = os.path.join(os.getcwd(), 'fundus_per_fold.vtk') rewrite_scalars(curv_file, fundus_per_fold_file, fundus_per_fold, 'fundi', folds) if not os.path.exists(fundus_per_fold_file): raise(IOError(fundus_per_fold_file + " not found")) else: fundus_per_fold_file = None return fundus_per_fold, n_fundi_in_folds, fundus_per_fold_file
def spectrum_per_label(vtk_file, spectrum_size=10, exclude_labels=[-1], normalization='areaindex', area_file='', largest_segment=True, verbose=False): """ Compute Laplace-Beltrami spectrum per labeled region in a file. Parameters ---------- vtk_file : string name of VTK surface mesh file containing index scalars (labels) spectrum_size : integer number of eigenvalues to be computed (the length of the spectrum) exclude_labels : list of integers labels to be excluded normalization : string the method used to normalize eigenvalues if None, no normalization is used if "area", use area of the 2D structure as in Reuter et al. 2006 if "index", divide eigenvalue by index to account for linear trend if "areaindex", do both (default) area_file : string (optional) name of VTK file with surface area scalar values largest_segment : bool compute spectrum only for largest segment with a given label? verbose : bool print statements? Returns ------- spectrum_lists : list of lists first eigenvalues for each label's Laplace-Beltrami spectrum label_list : list of integers list of unique labels for which spectra are obtained Examples -------- >>> # Uncomment "if label==22:" below to run example: >>> # Spectrum for Twins-2-1 left postcentral (22) pial surface: >>> import numpy as np >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> area_file = fetch_data(urls['left_area'], '', '.vtk') >>> spectrum_size = 6 >>> exclude_labels = [0] #[-1] >>> largest_segment = True >>> verbose = False >>> spectrum_lists, label_list = spectrum_per_label(vtk_file, ... spectrum_size, exclude_labels, None, area_file, largest_segment, ... verbose) >>> [np.float("{0:.{1}f}".format(x, 5)) for x in spectrum_lists[0]] [0.0, 0.00054, 0.00244, 0.00291, 0.00456, 0.00575] >>> label_list[0:10] [1029, 1005, 1011, 1021, 1008, 1025, 999, 1013, 1007, 1022] """ from mindboggle.mio.vtks import read_vtk, read_scalars from mindboggle.guts.mesh import keep_faces, reindex_faces_points from mindboggle.shapes.laplace_beltrami import fem_laplacian,\ spectrum_of_largest # Read VTK surface mesh file: points, indices, lines, faces, labels, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file) # Area file: if area_file: areas, u1 = read_scalars(area_file) else: areas = None # Loop through labeled regions: ulabels = [] [ ulabels.append(int(x)) for x in labels if x not in ulabels if x not in exclude_labels ] label_list = [] spectrum_lists = [] for label in ulabels: #if label == 22: # print("DEBUG: COMPUTE FOR ONLY ONE LABEL") # Determine the indices per label: Ilabel = [i for i, x in enumerate(labels) if x == label] if verbose: print('{0} vertices for label {1}'.format(len(Ilabel), label)) # Remove background faces: pick_faces = keep_faces(faces, Ilabel) pick_faces, pick_points, o1 = reindex_faces_points(pick_faces, points) # Compute Laplace-Beltrami spectrum for the label: if largest_segment: exclude_labels_inner = [-1] spectrum = spectrum_of_largest(pick_points, pick_faces, spectrum_size, exclude_labels_inner, normalization, areas, verbose) else: spectrum = fem_laplacian(pick_points, pick_faces, spectrum_size, normalization, verbose) # Append to a list of lists of spectra: spectrum_lists.append(spectrum) label_list.append(label) return spectrum_lists, label_list
def plot_mask_surface(vtk_file, mask_file='', nonmask_value=-1, masked_output='', remove_nonmask=False, program='vtkviewer', use_colormap=False, colormap_file=''): """ Use vtkviewer or mayavi2 to visualize VTK surface mesh data. If a mask_file is provided, a temporary masked file is saved, and it is this file that is viewed. If using vtkviewer, can optionally provide colormap file or set $COLORMAP environment variable. Parameters ---------- vtk_file : string name of VTK surface mesh file mask_file : string name of VTK surface mesh file to mask vtk_file vertices nonmask_value : integer nonmask (usually background) value masked_output : string temporary masked output file name remove_nonmask : Boolean remove vertices that are not in mask? (otherwise assign nonmask_value) program : string {'vtkviewer', 'mayavi2'} program to visualize VTK file use_colormap : Boolean use Paraview-style XML colormap file set by $COLORMAP env variable? colormap_file : string use colormap in given file if use_colormap==True? if empty and use_colormap==True, use file set by $COLORMAP environment variable Examples -------- >>> import os >>> from mindboggle.mio.plots import plot_mask_surface >>> path = os.environ['MINDBOGGLE_DATA'] >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk') >>> mask_file = os.path.join(path, 'test_one_label.vtk') >>> nonmask_value = 0 #-1 >>> masked_output = '' >>> remove_nonmask = True >>> program = 'vtkviewer' >>> use_colormap = True >>> colormap_file = '' #'/software/surface_cpp_tools/colormap.xml' >>> plot_mask_surface(vtk_file, mask_file, nonmask_value, masked_output, remove_nonmask, program, use_colormap, colormap_file) """ import os import numpy as np from mindboggle.guts.mesh import remove_faces, reindex_faces_points from mindboggle.guts.utilities import execute from mindboggle.mio.plots import plot_surfaces from mindboggle.mio.vtks import read_scalars, rewrite_scalars, \ read_vtk, write_vtk #------------------------------------------------------------------------- # Filter mesh with non-background values from a second (same-size) mesh: #------------------------------------------------------------------------- if mask_file: mask, name = read_scalars(mask_file, True, True) if not masked_output: masked_output = os.path.join(os.getcwd(), 'temp.vtk') file_to_plot = masked_output #--------------------------------------------------------------------- # Remove nonmask-valued vertices: #--------------------------------------------------------------------- if remove_nonmask: #----------------------------------------------------------------- # Load VTK files: #----------------------------------------------------------------- points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file, True, True) #----------------------------------------------------------------- # Find mask indices, remove nonmask faces, and reindex: #----------------------------------------------------------------- Imask = [i for i,x in enumerate(mask) if x != nonmask_value] mask_faces = remove_faces(faces, Imask) mask_faces, points, \ original_indices = reindex_faces_points(mask_faces, points) #----------------------------------------------------------------- # Write VTK file with scalar values: #----------------------------------------------------------------- if np.ndim(scalars) == 1: scalar_type = type(scalars[0]).__name__ elif np.ndim(scalars) == 2: scalar_type = type(scalars[0][0]).__name__ else: print("Undefined scalar type!") write_vtk(file_to_plot, points, [], [], mask_faces, scalars[original_indices].tolist(), scalar_names, scalar_type=scalar_type) else: scalars, name = read_scalars(vtk_file, True, True) scalars[mask == nonmask_value] = nonmask_value rewrite_scalars(vtk_file, file_to_plot, scalars) else: file_to_plot = vtk_file #------------------------------------------------------------------------- # Display with vtkviewer.py: #------------------------------------------------------------------------- if program == 'vtkviewer': plot_surfaces(file_to_plot, use_colormap=use_colormap, colormap_file=colormap_file) #------------------------------------------------------------------------- # Display with mayavi2: #------------------------------------------------------------------------- elif program == 'mayavi2': cmd = ["mayavi2", "-d", file_to_plot, "-m", "Surface", "&"] execute(cmd, 'os')
def spectrum_from_file(vtk_file, spectrum_size=10, exclude_labels=[-1], normalization=None, area_file=''): """ Compute Laplace-Beltrami spectrum of a 3D shape in a VTK file. Parameters ---------- vtk_file : string the input vtk file spectrum_size : integer number of eigenvalues to be computed (the length of the spectrum) exclude_labels : list of integers labels to be excluded normalization : string the method used to normalize eigenvalues ('area' or None) if "area", use area of the 2D structure as in Reuter et al. 2006 area_file : string name of VTK file with surface area scalar values Returns ------- spectrum : list of floats first spectrum_size of Laplace-Beltrami spectrum Examples -------- >>> # Spectrum for entire left hemisphere of Twins-2-1: >>> import os >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file >>> path = os.environ['MINDBOGGLE_DATA'] >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk') >>> spectrum_from_file(vtk_file, spectrum_size=6) [4.829758648026223e-18, 0.00012841730024671977, 0.0002715181572272744, 0.00032051508471594173, 0.000470162807048644, 0.0005768904023010327] >>> # Spectrum for Twins-2-1 left postcentral pial surface (22) >>> # (after running explode_scalars() with reindex=True): >>> import os >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file >>> path = os.environ['MINDBOGGLE_DATA'] >>> vtk_file = os.path.join(path, 'arno', 'labels', 'label22.vtk') >>> spectrum_from_file(vtk_file, spectrum_size=6) [6.3469513010430304e-18, 0.0005178862383467463, 0.0017434911095630772, 0.003667561767487686, 0.005429017880363784, 0.006309346984678924] >>> # Loop thru all MB 101 brains >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file >>> for hemidir in os.listdir(header): >>> print hemidir >>> sulci_file = os.path.join(header, hemidir, "sulci.vtk") >>> spectrum = spectrum_from_file(sulci_file) """ from mindboggle.mio.vtks import read_vtk, read_scalars from mindboggle.shapes.laplace_beltrami import spectrum_of_largest faces, u1, u2, points, u4, u5, u6, u7 = read_vtk(vtk_file) # Area file: if area_file: areas, u1 = read_scalars(area_file) else: areas = None spectrum = spectrum_of_largest(points, faces, spectrum_size, exclude_labels, normalization, areas) return spectrum
def extract_sulci(labels_file, folds_or_file, hemi, min_boundary=1, sulcus_names=[], verbose=False): """ Identify sulci from folds in a brain surface according to a labeling protocol that includes a list of label pairs defining each sulcus. A fold is a group of connected, deep vertices. Steps for each fold :: 1. Remove fold if it has fewer than two labels. 2. Remove fold if its labels do not contain a sulcus label pair. 3. Find vertices with labels that are in only one of the fold's label boundary pairs. Assign the vertices the sulcus with the label pair if they are connected to the label boundary for that pair. 4. If there are remaining vertices, segment into sets of vertices connected to label boundaries, and assign a unique ID to each set. Parameters ---------- labels_file : string file name for surface mesh VTK containing labels for all vertices folds_or_file : list or string fold number for each vertex / name of VTK file containing fold scalars hemi : string hemisphere abbreviation in {'lh', 'rh'} for sulcus labels min_boundary : integer minimum number of vertices for a sulcus label boundary segment sulcus_names : list of strings names of sulci verbose : bool print statements? Returns ------- sulci : list of integers sulcus numbers for all vertices (-1 for non-sulcus vertices) n_sulci : integers number of sulci sulci_file : string output VTK file with sulcus numbers (-1 for non-sulcus vertices) Examples -------- >>> from mindboggle.features.sulci import extract_sulci >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> # Load labels, folds, neighbor lists, and sulcus names and label pairs >>> labels_file = fetch_data(urls['left_freesurfer_labels']) >>> folds_file = fetch_data(urls['left_folds']) >>> folds_or_file, name = read_scalars(folds_file) >>> hemi = 'lh' >>> min_boundary = 10 >>> sulcus_names = [] >>> verbose = False >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file, ... hemi, min_boundary, sulcus_names, verbose) >>> n_sulci 23 >>> lens = [len([x for x in sulci if x == y]) for y in range(n_sulci)] >>> lens[0:10] [0, 6573, 3366, 6689, 5358, 4049, 6379, 3551, 2632, 4225] >>> lens[10::] [754, 3724, 2197, 5823, 1808, 5122, 513, 2153, 1445, 418, 0, 3556, 1221] View result (skip test): >>> from mindboggle.mio.plots import plot_surfaces >>> plot_surfaces('sulci.vtk') # doctest: +SKIP """ import os from time import time import numpy as np from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars from mindboggle.guts.mesh import find_neighbors from mindboggle.guts.segment import extract_borders, propagate, segment from mindboggle.mio.labels import DKTprotocol # Load fold numbers if folds_or_file is a string: if isinstance(folds_or_file, str): folds, name = read_scalars(folds_or_file) elif isinstance(folds_or_file, list): folds = folds_or_file dkt = DKTprotocol() if hemi == 'lh': pair_lists = dkt.left_sulcus_label_pair_lists elif hemi == 'rh': pair_lists = dkt.right_sulcus_label_pair_lists else: raise IOError("Warning: hemisphere not properly specified ('lh' or 'rh').") # Load points, faces, and neighbors: points, indices, lines, faces, labels, scalar_names, npoints, \ input_vtk = read_vtk(labels_file) neighbor_lists = find_neighbors(faces, npoints) # Array of sulcus IDs for fold vertices, initialized as -1. # Since we do not touch gyral vertices and vertices whose labels # are not in the label list, or vertices having only one label, # their sulcus IDs will remain -1: sulci = -1 * np.ones(npoints) #------------------------------------------------------------------------- # Loop through folds #------------------------------------------------------------------------- fold_numbers = [int(x) for x in np.unique(folds) if x != -1] n_folds = len(fold_numbers) if verbose: print("Extract sulci from {0} folds...".format(n_folds)) t0 = time() for n_fold in fold_numbers: fold = [i for i,x in enumerate(folds) if x == n_fold] len_fold = len(fold) # List the labels in this fold: fold_labels = [labels[x] for x in fold] unique_fold_labels = [int(x) for x in np.unique(fold_labels) if x != -1] #--------------------------------------------------------------------- # NO MATCH -- fold has fewer than two labels #--------------------------------------------------------------------- if verbose and len(unique_fold_labels) < 2: # Ignore: sulci already initialized with -1 values: if not unique_fold_labels: print(" Fold {0} ({1} vertices): " "NO MATCH -- fold has no labels". format(n_fold, len_fold)) else: print(" Fold {0} ({1} vertices): " "NO MATCH -- fold has only one label ({2})". format(n_fold, len_fold, unique_fold_labels[0])) # Ignore: sulci already initialized with -1 values else: # Find all label boundary pairs within the fold: indices_fold_pairs, fold_pairs, unique_fold_pairs = \ extract_borders(fold, labels, neighbor_lists, ignore_values=[], return_label_pairs=True) # Find fold label pairs in the protocol (pairs are already sorted): fold_pairs_in_protocol = [x for x in unique_fold_pairs if x in dkt.unique_sulcus_label_pairs] if verbose and unique_fold_labels: print(" Fold {0} labels: {1} ({2} vertices)".format(n_fold, ', '.join([str(x) for x in unique_fold_labels]), len_fold)) #----------------------------------------------------------------- # NO MATCH -- fold has no sulcus label pair #----------------------------------------------------------------- if verbose and not fold_pairs_in_protocol: print(" Fold {0}: NO MATCH -- fold has no sulcus label pair". format(n_fold, len_fold)) #----------------------------------------------------------------- # Possible matches #----------------------------------------------------------------- else: if verbose: print(" Fold {0} label pairs in protocol: {1}".format(n_fold, ', '.join([str(x) for x in fold_pairs_in_protocol]))) # Labels in the protocol (includes repeats across label pairs): labels_in_pairs = [x for lst in fold_pairs_in_protocol for x in lst] # Labels that appear in one or more sulcus label boundary: unique_labels = [] nonunique_labels = [] for label in np.unique(labels_in_pairs): if len([x for x in labels_in_pairs if x == label]) == 1: unique_labels.append(label) else: nonunique_labels.append(label) #------------------------------------------------------------- # Vertices whose labels are in only one sulcus label pair #------------------------------------------------------------- # Find vertices with a label that is in only one of the fold's # label pairs (the other label in the pair can exist in other # pairs). Assign the vertices the sulcus with the label pair # if they are connected to the label boundary for that pair. #------------------------------------------------------------- if unique_labels: for pair in fold_pairs_in_protocol: # If one or both labels in label pair is/are unique: unique_labels_in_pair = [x for x in pair if x in unique_labels] n_unique = len(unique_labels_in_pair) if n_unique: ID = None for i, pair_list in enumerate(pair_lists): if not isinstance(pair_list, list): pair_list = [pair_list] if pair in pair_list: ID = i break if ID: # Seeds from label boundary vertices # (fold_pairs and pair already sorted): indices_pair = [x for i,x in enumerate(indices_fold_pairs) if fold_pairs[i] == pair] # Vertices with unique label(s) in pair: indices_unique_labels = [fold[i] for i,x in enumerate(fold_labels) if x in dkt.unique_sulcus_label_pairs] # Propagate from seeds to labels in label pair: sulci2 = segment(indices_unique_labels, neighbor_lists, min_region_size=1, seed_lists=[indices_pair], keep_seeding=False, spread_within_labels=True, labels=labels) sulci[sulci2 != -1] = ID # Print statement: if verbose: if n_unique == 1: ps1 = '1 label' else: ps1 = 'Both labels' if len(sulcus_names): ps2 = sulcus_names[ID] else: ps2 = '' print(" {0} unique to one fold pair: " "{1} {2}". format(ps1, ps2, unique_labels_in_pair)) #------------------------------------------------------------- # Vertex labels shared by multiple label pairs #------------------------------------------------------------- # Propagate labels from label borders to vertices with labels # that are shared by multiple label pairs in the fold. #------------------------------------------------------------- if len(nonunique_labels): # For each label shared by different label pairs: for label in nonunique_labels: # Print statement: if verbose: print(" Propagate sulcus borders with label {0}". format(int(label))) # Construct seeds from label boundary vertices: seeds = -1 * np.ones(len(points)) for ID, pair_list in enumerate(pair_lists): if not isinstance(pair_list, list): pair_list = [pair_list] label_pairs = [x for x in pair_list if label in x] for label_pair in label_pairs: indices_pair = [x for i,x in enumerate(indices_fold_pairs) if np.sort(fold_pairs[i]). tolist() == label_pair] if indices_pair: # Do not include short boundary segments: if min_boundary > 1: indices_pair2 = [] seeds2 = segment(indices_pair, neighbor_lists) useeds2 = [x for x in np.unique(seeds2) if x != -1] for seed2 in useeds2: iseed2 = [i for i,x in enumerate(seeds2) if x == seed2] if len(iseed2) >= min_boundary: indices_pair2.extend(iseed2) elif verbose: if len(iseed2) == 1: print(" Remove " "assignment " "of ID {0} from " "1 vertex". format(seed2)) else: print(" Remove " "assignment " "of ID {0} from " "{1} vertices". format(seed2, len(iseed2))) indices_pair = indices_pair2 # Assign sulcus IDs to seeds: seeds[indices_pair] = ID # Identify vertices with the label: label_array = -1 * np.ones(len(points)) indices_label = [fold[i] for i,x in enumerate(fold_labels) if x == label] if len(indices_label): label_array[indices_label] = 1 # Propagate from seeds to vertices with label: #indices_seeds = [] #for seed in range(int(max(seeds))+1): # indices_seeds.append([i for i,x # in enumerate(seeds) # if x == seed]) #sulci2 = segment(indices_label, neighbor_lists, # 50, indices_seeds, False, True, # labels) sulci2 = propagate(points, faces, label_array, seeds, sulci, max_iters=10000, tol=0.001, sigma=5) sulci[sulci2 != -1] = sulci2[sulci2 != -1] sulcus_numbers = [int(x) for x in np.unique(sulci) if x != -1] # if not np.isnan(x)] n_sulci = len(sulcus_numbers) #------------------------------------------------------------------------- # Print statements #------------------------------------------------------------------------- if verbose: print("Extracted {0} sulci from {1} folds ({2:.1f}s):". format(n_sulci, n_folds, time()-t0)) if sulcus_names: for sulcus_number in sulcus_numbers: print(" {0}: {1}".format(sulcus_number, sulcus_names[sulcus_number])) elif sulcus_numbers: print(" " + ", ".join([str(x) for x in sulcus_numbers])) unresolved = [i for i in range(len(pair_lists)) if i not in sulcus_numbers] if len(unresolved) == 1: print("The following sulcus is unaccounted for:") else: print("The following {0} sulci are unaccounted for:". format(len(unresolved))) if sulcus_names: for sulcus_number in unresolved: print(" {0}: {1}".format(sulcus_number, sulcus_names[sulcus_number])) else: print(" " + ", ".join([str(x) for x in unresolved])) #------------------------------------------------------------------------- # Return sulci, number of sulci, and file name #------------------------------------------------------------------------- sulci = [int(x) for x in sulci] sulci_file = os.path.join(os.getcwd(), 'sulci.vtk') rewrite_scalars(labels_file, sulci_file, sulci, 'sulci', sulci) if not os.path.exists(sulci_file): raise IOError(sulci_file + " not found") return sulci, n_sulci, sulci_file
def extract_fundi(folds, curv_file, depth_file, min_separation=10, erode_ratio=0.1, erode_min_size=1, save_file=False, output_file='', background_value=-1, verbose=False): """ Extract fundi from folds. A fundus is a branching curve that runs along the deepest and most highly curved portions of a fold. This function extracts one fundus from each fold by finding the deepest vertices inside the fold, finding endpoints along the edge of the fold, and connecting the former to the latter with tracks that run along deep and curved paths (through vertices with high values of travel depth multiplied by curvature), and a final filtration step. The deepest vertices are those with values at least two median absolute deviations above the median (non-zero) value, with the higher value chosen if two of the vertices are within (a default of) 10 edges from each other (to reduce the number of possible fundus paths as well as computation time). To find the endpoints, the find_outer_endpoints function propagates multiple tracks from seed vertices at median depth in the fold through concentric rings toward the fold’s edge, selecting maximal values within each ring, and terminating at candidate endpoints. The final endpoints are those candidates at the end of tracks that have a high median value, with the higher value chosen if two candidate endpoints are within (a default of) 10 edges from each other (otherwise, the resulting fundi can have spurious branching at the fold’s edge). The connect_points_erosion function connects the deepest fold vertices to the endpoints with a skeleton of 1-vertex-thick curves by erosion. It erodes by iteratively removing simple topological points and endpoints in order of lowest to highest values, where a simple topological point is a vertex that when added to or removed from an object on a surface mesh (such as a fundus curve) does not alter the object's topology. Steps :: 1. Find fundus endpoints (outer anchors) with find_outer_endpoints(). 2. Include inner anchor points. 3. Connect anchor points using connect_points_erosion(); inner anchors are removed if they result in endpoints. Note :: Follow this with segment_by_region() to segment fundi by sulci. Parameters ---------- folds : numpy array or list of integers fold number for each vertex curv_file : string surface mesh file in VTK format with mean curvature values depth_file : string surface mesh file in VTK format with rescaled depth values likelihoods : list of integers fundus likelihood value for each vertex min_separation : integer minimum number of edges between inner/outer anchor points erode_ratio : float fraction of indices to test for removal at each iteration in connect_points_erosion() save_file : bool save output VTK file? output_file : string output VTK file background_value : integer or float background value verbose : bool print statements? Returns ------- fundus_per_fold : list of integers fundus numbers for all vertices, labeled by fold (-1 for non-fundus vertices) n_fundi_in_folds : integer number of fundi fundus_per_fold_file : string (if save_file) output VTK file with fundus numbers (-1 for non-fundus vertices) Examples -------- >>> # Extract fundus from one or more folds: >>> import numpy as np >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.features.fundi import extract_fundi >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> curv_file = fetch_data(urls['left_mean_curvature'], '', '.vtk') >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> folds, name = read_scalars(folds_file, True, True) >>> # Limit number of folds to speed up the test: >>> limit_folds = True >>> if limit_folds: ... fold_numbers = [4] #[4, 6] ... i0 = [i for i,x in enumerate(folds) if x not in fold_numbers] ... folds[i0] = -1 >>> min_separation = 10 >>> erode_ratio = 0.10 >>> erode_min_size = 10 >>> save_file = True >>> output_file = 'extract_fundi_fold4.vtk' >>> background_value = -1 >>> verbose = False >>> o1, o2, fundus_per_fold_file = extract_fundi(folds, curv_file, ... depth_file, min_separation, erode_ratio, erode_min_size, ... save_file, output_file, background_value, verbose) >>> lens = [len([x for x in o1 if x == y]) ... for y in np.unique(o1) if y != background_value] >>> lens[0:10] # [66, 2914, 100, 363, 73, 331, 59, 30, 1, 14] # (if not limit_folds) [73] View result without background (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP >>> rewrite_scalars(fundus_per_fold_file, ... 'extract_fundi_fold4_no_background.vtk', o1, ... 'fundus_per_fold', folds) # doctest: +SKIP >>> plot_surfaces('extract_fundi_fold4_no_background.vtk') # doctest: +SKIP """ # Extract a skeleton to connect endpoints in a fold: import os import numpy as np from time import time from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars from mindboggle.guts.compute import median_abs_dev from mindboggle.guts.paths import find_max_values from mindboggle.guts.mesh import find_neighbors_from_file from mindboggle.guts.mesh import find_complete_faces from mindboggle.guts.paths import find_outer_endpoints from mindboggle.guts.paths import connect_points_erosion if isinstance(folds, list): folds = np.array(folds) # Load values, inner anchor threshold, and neighbors: if os.path.isfile(curv_file): points, indices, lines, faces, curvs, scalar_names, npoints, \ input_vtk = read_vtk(curv_file, True, True) else: raise IOError("{0} doesn't exist!".format(curv_file)) if os.path.isfile(curv_file): depths, name = read_scalars(depth_file, True, True) else: raise IOError("{0} doesn't exist!".format(depth_file)) values = curvs * depths values0 = [x for x in values if x > 0] thr = np.median(values0) + 2 * median_abs_dev(values0) neighbor_lists = find_neighbors_from_file(curv_file) # ------------------------------------------------------------------------ # Loop through folds: # ------------------------------------------------------------------------ t1 = time() skeletons = [] unique_fold_IDs = [x for x in np.unique(folds) if x != background_value] if verbose: if len(unique_fold_IDs) == 1: print("Extract a fundus from 1 fold...") else: print("Extract a fundus from each of {0} folds...". format(len(unique_fold_IDs))) for fold_ID in unique_fold_IDs: indices_fold = [i for i,x in enumerate(folds) if x == fold_ID] if indices_fold: if verbose: print(' Fold {0}:'.format(int(fold_ID))) # ---------------------------------------------------------------- # Find outer anchor points on the boundary of the surface region, # to serve as fundus endpoints: # ---------------------------------------------------------------- outer_anchors, tracks = find_outer_endpoints(indices_fold, neighbor_lists, values, depths, min_separation, background_value, verbose) # ---------------------------------------------------------------- # Find inner anchor points: # ---------------------------------------------------------------- inner_anchors = find_max_values(points, values, min_separation, thr) # ---------------------------------------------------------------- # Connect anchor points to create skeleton: # ---------------------------------------------------------------- B = background_value * np.ones(npoints) B[indices_fold] = 1 skeleton = connect_points_erosion(B, neighbor_lists, outer_anchors, inner_anchors, values, erode_ratio, erode_min_size, [], '', background_value, verbose) if skeleton: skeletons.extend(skeleton) ## --------------------------------------------------------------- ## Remove fundus vertices if they make complete triangle faces: ## --------------------------------------------------------------- #Iremove = find_complete_faces(skeletons, faces) #if Iremove: # skeletons = list(frozenset(skeletons).difference(Iremove)) indices_skel = [x for x in skeletons if folds[x] != background_value] fundus_per_fold = background_value * np.ones(npoints) fundus_per_fold[indices_skel] = folds[indices_skel] n_fundi_in_folds = len([x for x in np.unique(fundus_per_fold) if x != background_value]) if n_fundi_in_folds == 1: sdum = 'fold fundus' else: sdum = 'fold fundi' if verbose: print(' ...Extracted {0} {1}; {2} total ({3:.2f} seconds)'. format(n_fundi_in_folds, sdum, n_fundi_in_folds, time() - t1)) # ------------------------------------------------------------------------ # Return fundi, number of fundi, and file name: # ------------------------------------------------------------------------ fundus_per_fold_file = None if n_fundi_in_folds > 0: fundus_per_fold = [int(x) for x in fundus_per_fold] if save_file: if output_file: fundus_per_fold_file = output_file else: fundus_per_fold_file = os.path.join(os.getcwd(), 'fundus_per_fold.vtk') rewrite_scalars(curv_file, fundus_per_fold_file, fundus_per_fold, 'fundi', [], background_value) if not os.path.exists(fundus_per_fold_file): raise IOError(fundus_per_fold_file + " not found") return fundus_per_fold, n_fundi_in_folds, fundus_per_fold_file
def extract_fundi(folds, curv_file, depth_file, min_separation=10, erode_ratio=0.1, erode_min_size=1, save_file=False, verbose=False): """ Extract fundi from folds. A fundus is a branching curve that runs along the deepest and most highly curved portions of a fold. Steps :: 1. Find fundus endpoints (outer anchors) with find_outer_anchors(). 2. Include inner anchor points. 3. Connect anchor points using connect_points_erosion(); inner anchors are removed if they result in endpoints. Parameters ---------- folds : numpy array or list of integers fold number for each vertex curv_file : string surface mesh file in VTK format with mean curvature values depth_file : string surface mesh file in VTK format with rescaled depth values likelihoods : list of integers fundus likelihood value for each vertex min_separation : integer minimum number of edges between inner/outer anchor points erode_ratio : float fraction of indices to test for removal at each iteration in connect_points_erosion() save_file : bool save output VTK file? verbose : bool print statements? Returns ------- fundus_per_fold : list of integers fundus numbers for all vertices, labeled by fold (-1 for non-fundus vertices) n_fundi_in_folds : integer number of fundi fundus_per_fold_file : string (if save_file) output VTK file with fundus numbers (-1 for non-fundus vertices) Examples -------- >>> # Extract fundus from one or more folds: >>> single_fold = True >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.features.fundi import extract_fundi >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> curv_file = fetch_data(urls['left_mean_curvature']) >>> depth_file = fetch_data(urls['left_travel_depth']) >>> folds_file = fetch_data(urls['left_folds']) >>> folds, name = read_scalars(folds_file, True, True) >>> if single_fold: ... fold_number = 2 #11 ... folds[folds != fold_number] = -1 >>> min_separation = 10 >>> erode_ratio = 0.10 >>> erode_min_size = 10 >>> save_file = True >>> verbose = False >>> o1, o2, fundus_per_fold_file = extract_fundi(folds, curv_file, ... depth_file, min_separation, erode_ratio, erode_min_size, ... save_file, verbose) >>> if single_fold: ... lens = [len([x for x in o1 if x == 2])] ... else: ... lens = [len([x for x in o1 if x == y]) for y in range(o2)] >>> lens[0:10] [115] View result (skip test): >>> from mindboggle.mio.plots import plot_surfaces >>> plot_surfaces(fundus_per_fold_file) # doctest: +SKIP """ # Extract a skeleton to connect endpoints in a fold: import os import numpy as np from time import time from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars from mindboggle.guts.compute import median_abs_dev from mindboggle.guts.paths import find_max_values from mindboggle.guts.mesh import find_neighbors_from_file from mindboggle.guts.mesh import find_complete_faces from mindboggle.guts.paths import find_outer_anchors from mindboggle.guts.paths import connect_points_erosion if isinstance(folds, list): folds = np.array(folds) # Load values, inner anchor threshold, and neighbors: points, indices, lines, faces, curvs, scalar_names, npoints, \ input_vtk = read_vtk(curv_file, True, True) depths, name = read_scalars(depth_file, True, True) values = curvs * depths values0 = [x for x in values if x > 0] thr = np.median(values0) + 2 * median_abs_dev(values0) neighbor_lists = find_neighbors_from_file(curv_file) #------------------------------------------------------------------------- # Loop through folds: #------------------------------------------------------------------------- t1 = time() skeletons = [] unique_fold_IDs = [x for x in np.unique(folds) if x != -1] if verbose: if len(unique_fold_IDs) == 1: print("Extract a fundus from 1 fold...") else: print("Extract a fundus from each of {0} folds...".format( len(unique_fold_IDs))) for fold_ID in unique_fold_IDs: indices_fold = [i for i, x in enumerate(folds) if x == fold_ID] if indices_fold: if verbose: print(' Fold {0}:'.format(int(fold_ID))) #----------------------------------------------------------------- # Find outer anchor points on the boundary of the surface region, # to serve as fundus endpoints: #----------------------------------------------------------------- verbose = False outer_anchors, tracks = find_outer_anchors(indices_fold, neighbor_lists, values, depths, min_separation, verbose) #----------------------------------------------------------------- # Find inner anchor points: #----------------------------------------------------------------- inner_anchors = find_max_values(points, values, min_separation, thr) #----------------------------------------------------------------- # Connect anchor points to create skeleton: #----------------------------------------------------------------- B = -1 * np.ones(npoints) B[indices_fold] = 1 skeleton = connect_points_erosion(B, neighbor_lists, outer_anchors, inner_anchors, values, erode_ratio, erode_min_size, save_steps=[], save_vtk='', verbose=False) if skeleton: skeletons.extend(skeleton) #----------------------------------------------------------------- # Remove fundus vertices if they complete triangle faces: #----------------------------------------------------------------- Iremove = find_complete_faces(skeletons, faces) if Iremove: skeletons = list(frozenset(skeletons).difference(Iremove)) indices_skel = [x for x in skeletons if folds[x] != -1] fundus_per_fold = -1 * np.ones(npoints) fundus_per_fold[indices_skel] = folds[indices_skel] n_fundi_in_folds = len([x for x in np.unique(fundus_per_fold) if x != -1]) if n_fundi_in_folds == 1: sdum = 'fold fundus' else: sdum = 'fold fundi' if verbose: print(' ...Extracted {0} {1}; {2} total ({3:.2f} seconds)'.format( n_fundi_in_folds, sdum, n_fundi_in_folds, time() - t1)) #------------------------------------------------------------------------- # Return fundi, number of fundi, and file name: #------------------------------------------------------------------------- if n_fundi_in_folds > 0: fundus_per_fold = [int(x) for x in fundus_per_fold] if save_file: fundus_per_fold_file = os.path.join(os.getcwd(), 'fundus_per_fold.vtk') rewrite_scalars(curv_file, fundus_per_fold_file, fundus_per_fold, 'fundi', folds) if not os.path.exists(fundus_per_fold_file): raise IOError(fundus_per_fold_file + " not found") else: fundus_per_fold_file = None return fundus_per_fold, n_fundi_in_folds, fundus_per_fold_file
def write_vertex_measures(output_table, labels_or_file, sulci=[], fundi=[], affine_transform_files=[], inverse_booleans=[], transform_format='itk', area_file='', mean_curvature_file='', travel_depth_file='', geodesic_depth_file='', freesurfer_thickness_file='', freesurfer_curvature_file='', freesurfer_sulc_file=''): """ Make a table of shape values per vertex. Note :: This function is tailored for Mindboggle outputs. Parameters ---------- output_table : string output file (full path) labels_or_file : list or string label number for each vertex or name of VTK file with index scalars sulci : list of integers indices to sulci, one per vertex, with -1 indicating no sulcus fundi : list of integers indices to fundi, one per vertex, with -1 indicating no fundus affine_transform_files : list of strings affine transform files to standard space inverse_booleans : list of of zeros and ones for each transform, 1 to take the inverse, else 0 transform_format : string format for transform file Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format area_file : string name of VTK file with surface area scalar values mean_curvature_file : string name of VTK file with mean curvature scalar values travel_depth_file : string name of VTK file with travel depth scalar values geodesic_depth_file : string name of VTK file with geodesic depth scalar values freesurfer_thickness_file : string name of VTK file with FreeSurfer thickness scalar values freesurfer_curvature_file : string name of VTK file with FreeSurfer curvature (curv) scalar values freesurfer_sulc_file : string name of VTK file with FreeSurfer convexity (sulc) scalar values Returns ------- output_table : table file name for vertex shape values Examples -------- >>> import os >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.mio.tables import write_vertex_measures >>> # >>> output_table = ''#vertex_shapes.csv' >>> path = os.environ['MINDBOGGLE_DATA'] >>> labels_or_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk') >>> sulci_file = os.path.join(path, 'arno', 'features', 'sulci.vtk') >>> fundi_file = os.path.join(path, 'arno', 'features', 'fundi.vtk') >>> sulci, name = read_scalars(sulci_file) >>> fundi, name = read_scalars(fundi_file) >>> affine_transform_files = [os.path.join(path, 'arno', 'mri', >>> 't1weighted_brain.MNI152Affine.txt')] >>> inverse_booleans = [1] >>> transform_format = 'itk' >>> swap_xy = True >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk') >>> mean_curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk') >>> travel_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk') >>> geodesic_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.geodesic_depth.vtk') >>> freesurfer_thickness_file = '' >>> freesurfer_curvature_file = '' >>> freesurfer_sulc_file = '' >>> # >>> write_vertex_measures(output_table, labels_or_file, sulci, fundi, >>> affine_transform_files, inverse_booleans, transform_format, area_file, >>> mean_curvature_file, travel_depth_file, geodesic_depth_file, >>> freesurfer_thickness_file, freesurfer_curvature_file, freesurfer_sulc_file) """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_scalars, read_vtk, \ apply_affine_transforms # Make sure inputs are lists: if isinstance(labels_or_file, np.ndarray): labels = [int(x) for x in labels_or_file] elif isinstance(labels_or_file, list): labels = labels_or_file elif isinstance(labels_or_file, str): labels, name = read_scalars(labels_or_file) if isinstance(sulci, np.ndarray): sulci = [int(x) for x in sulci] if isinstance(fundi, np.ndarray): fundi = [int(x) for x in fundi] if not labels and not sulci and not fundi: import sys sys.exit('No feature data to tabulate in write_vertex_measures().') # Feature names and corresponding feature lists: feature_names = ['label ID', 'sulcus ID', 'fundus ID'] feature_lists = [labels, sulci, fundi] # Shape names corresponding to shape files below: shape_names = [ 'area', 'travel depth', 'geodesic depth', 'mean curvature', 'freesurfer curvature', 'freesurfer thickness', 'freesurfer convexity (sulc)' ] # Load shape files as a list of numpy arrays of per-vertex shape values: shape_files = [ area_file, travel_depth_file, geodesic_depth_file, mean_curvature_file, freesurfer_curvature_file, freesurfer_thickness_file, freesurfer_sulc_file ] # Append columns of per-vertex scalar values: columns = [] column_names = [] for ifeature, values in enumerate(feature_lists): if values: columns.append(values) column_names.append(feature_names[ifeature]) first_pass = True for ishape, shape_file in enumerate(shape_files): if os.path.exists(shape_file): if first_pass: # Append x,y,z position per vertex to columns: u1, u2, u3, points, u4, scalars, u5, u6 = read_vtk(shape_file) xyz_positions = np.asarray(points) for ixyz, xyz in enumerate(['x', 'y', 'z']): column_names.append('position: {0}'.format(xyz)) columns.append(xyz_positions[:, ixyz].tolist()) first_pass = False # Append standard space x,y,z position to columns: if affine_transform_files and transform_format: affine_points, \ foo1 = apply_affine_transforms(affine_transform_files, inverse_booleans, transform_format, points, vtk_file_stem='') xyz_std_positions = affine_points for ixyz, xyz in enumerate(['x', 'y', 'z']): column_names.append('position in standard space:' ' {0}'.format(xyz)) columns.append(xyz_std_positions[:, ixyz].tolist()) else: scalars, name = read_scalars(shape_file) if len(scalars): columns.append(scalars) column_names.append(shape_names[ishape]) # Prepend with column of indices and write table if not output_table: output_table = os.path.join(os.getcwd(), 'vertices.csv') df = pd.DataFrame(np.transpose(columns), columns=column_names) df.to_csv(output_table, index=False) if not os.path.exists(output_table): raise (IOError(output_table + " not found")) return output_table
def concatenate_sulcus_scalars(scalar_files, fold_files, label_files, background_value=-1): """ Prepare data for estimating scalar distributions along and outside fundi. Extract (e.g., depth, curvature) scalar values in folds, along sulcus label boundaries as well as outside the sulcus label boundaries. Concatenate these scalar values across multiple files. Parameters ---------- scalar_files : list of strings names of surface mesh VTK files with scalar values to concatenate fold_files : list of strings (corr. to each list in scalar_files) VTK files with fold numbers as scalars (-1 for non-fold vertices) label_files : list of strings (corr. to fold_files) VTK files with label numbers (-1 for unlabeled vertices) background_value : integer or float background value Returns ------- border_scalars : list of floats concatenated scalar values within folds along sulcus label boundaries nonborder_scalars : list of floats concatenated scalar values within folds outside sulcus label boundaries Examples -------- >>> # Concatenate (duplicate) depth scalars: >>> import numpy as np >>> from mindboggle.shapes.likelihood import concatenate_sulcus_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk') >>> labels_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> scalar_files = [depth_file, depth_file] >>> fold_files = [folds_file, folds_file] >>> label_files = [labels_file, labels_file] >>> background_value = -1 >>> border, nonborder = concatenate_sulcus_scalars(scalar_files, ... fold_files, label_files, background_value) >>> print(np.array_str(np.array(border[0:5]), ... precision=5, suppress_small=True)) [ 3.48284 2.57157 4.27596 4.56549 3.84881] >>> print(np.array_str(np.array(nonborder[0:5]), ... precision=5, suppress_small=True)) [ 2.87204 2.89388 3.55364 2.81681 3.70736] """ import numpy as np from mindboggle.mio.vtks import read_scalars from mindboggle.guts.mesh import find_neighbors_from_file from mindboggle.guts.segment import extract_borders from mindboggle.mio.labels import DKTprotocol dkt = DKTprotocol() # Prepare (non-unique) list of sulcus label pairs: protocol_label_pairs = [ x for lst in dkt.sulcus_label_pair_lists for x in lst ] border_scalars = [] nonborder_scalars = [] # Loop through files with the scalar values: for ifile, scalar_file in enumerate(scalar_files): #print(scalar_file) # Load scalars, folds, and labels: folds_file = fold_files[ifile] labels_file = label_files[ifile] scalars, name = read_scalars(scalar_file, True, True) if scalars.shape: folds, name = read_scalars(folds_file) labels, name = read_scalars(labels_file) indices_folds = [ i for i, x in enumerate(folds) if x != background_value ] neighbor_lists = find_neighbors_from_file(labels_file) # Find all label border pairs within the folds: indices_label_pairs, label_pairs, unique_pairs = extract_borders( indices_folds, labels, neighbor_lists, ignore_values=[-1], return_label_pairs=True) indices_label_pairs = np.array(indices_label_pairs) # Find vertices with label pairs in the sulcus labeling protocol: Ipairs_in_protocol = [ i for i, x in enumerate(label_pairs) if x in protocol_label_pairs ] indices_label_pairs = indices_label_pairs[Ipairs_in_protocol] indices_outside_pairs = list( frozenset(indices_folds).difference(indices_label_pairs)) # Store scalar values in folds along label border pairs: border_scalars.extend(scalars[indices_label_pairs].tolist()) # Store scalar values in folds outside label border pairs: nonborder_scalars.extend(scalars[indices_outside_pairs].tolist()) return border_scalars, nonborder_scalars
def write_average_face_values_per_label(input_indices_vtk, input_values_vtk='', area_file='', output_stem='', exclude_values=[-1], background_value=-1): """ Write out a separate VTK file for each integer in (the first) scalar list of an input VTK file. Optionally write the values drawn from a second VTK file. Parameters ---------- input_indices_vtk : string path of the input VTK file that contains indices as scalars input_values_vtk : string path of the input VTK file that contains values as scalars output_stem : string path and stem of the output VTK file exclude_values : list or array values to exclude background_value : integer or float background value in output VTK files scalar_name : string name of a lookup table of scalars values Examples -------- >>> import os >>> from mindboggle.mio.tables import write_average_face_values_per_label >>> path = '/homedir/mindboggled' >>> input_indices_vtk = os.path.join(path, 'Twins-2-1', 'labels', 'left_cortical_surface', 'freesurfer_cortex_labels.vtk') >>> input_values_vtk = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'freesurfer_thickness.vtk') >>> area_file = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'area.vtk') >>> output_stem = 'labels_thickness' >>> exclude_values = [-1] >>> background_value = -1 >>> # >>> write_average_face_values_per_label(input_indices_vtk, >>> input_values_vtk, area_file, output_stem, exclude_values, background_value) >>> # >>> # View: >>> #example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk') >>> #from mindboggle.mio.plots import plot_surfaces >>> #plot_surfaces(example_vtk) """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_scalars, read_vtk, write_vtk from mindboggle.guts.mesh import remove_faces # Load VTK file: faces, lines, indices, points, npoints, scalars, scalar_names, \ foo1 = read_vtk(input_indices_vtk, True, True) if area_file: area_scalars, name = read_scalars(area_file, True, True) print("Explode the scalar list in {0}".format( os.path.basename(input_indices_vtk))) if input_values_vtk != input_indices_vtk: values, name = read_scalars(input_values_vtk, True, True) print("Explode the scalar list of values in {0} " "with the scalar list of indices in {1}".format( os.path.basename(input_values_vtk), os.path.basename(input_indices_vtk))) else: values = np.copy(scalars) # Loop through unique (non-excluded) scalar values: unique_scalars = [ int(x) for x in np.unique(scalars) if x not in exclude_values ] for scalar in unique_scalars: keep_indices = [x for sublst in faces for x in sublst] new_faces = remove_faces(faces, keep_indices) # Create array and indices for scalar value: select_scalars = np.copy(scalars) select_scalars[scalars != scalar] = background_value scalar_indices = [ i for i, x in enumerate(select_scalars) if x == scalar ] print(" Scalar {0}: {1} vertices".format(scalar, len(scalar_indices))) #--------------------------------------------------------------------- # For each face, average vertex values: #--------------------------------------------------------------------- output_table = os.path.join(os.getcwd(), output_stem + str(scalar) + '.csv') columns = [] for face in new_faces: values = [] for index in face: if area_file: values.append(scalars[index] / area_scalars[index]) else: values.append(scalars[index]) columns.append(np.mean(values)) #----------------------------------------------------------------- # Write to table: #----------------------------------------------------------------- df = pd.DataFrame({'': columns}) df.to_csv(output_table, index=False) # Write VTK file with scalar value: #output_vtk = os.path.join(os.getcwd(), output_stem + str(scalar) + '.vtk') #write_vtk(output_vtk, points, indices, lines, new_faces, # [select_values.tolist()], [output_scalar_name]) if not os.path.exists(output_table): raise (IOError(output_table + " not found"))
def compute_likelihood(trained_file, depth_file, curvature_file, folds, save_file=False, background_value=-1): """ Compute likelihoods based on input values, folds, estimated parameters. Compute likelihood values for a given VTK surface mesh file, after training on distributions of depth and curvature values from multiple files. Parameters ---------- trained_file : pickle compressed file depth_border, curv_border, depth_nonborder, and curv_nonborder are dictionaries containing lists of floats (estimates of depth or curvature means, sigmas, and weights trained on fold vertices either on or off sulcus label borders) depth_file : string VTK surface mesh file with depth values in [0,1] for all vertices curvature_file : string VTK surface mesh file with curvature values in [-1,1] for all vertices folds : list of integers fold number for all vertices (-1 for non-fold vertices) save_file : bool save output VTK file? background_value : integer or float background value Returns ------- likelihoods : list of floats likelihood values for all vertices (0 for non-fold vertices) likelihoods_file : string (if save_file) name of output VTK file with likelihood scalars (-1 for non-fold vertices) Notes ----- The depth_curv_border_nonborder_parameters.pkl file needs to be updated. Examples -------- >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.shapes.likelihood import compute_likelihood >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk') >>> curvature_file = fetch_data(urls['left_mean_curvature'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> trained_file = fetch_data(urls[ ... 'depth_curv_border_nonborder_parameters'], '', '.pkl') # doctest: +SKIP >>> folds, name = read_scalars(folds_file) >>> save_file = True >>> background_value = -1 >>> likelihoods, likelihoods_file = compute_likelihood(trained_file, ... depth_file, curvature_file, folds, save_file, background_value) # doctest: +SKIP View result (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> plot_surfaces('likelihoods.vtk', folds_file) # doctest: +SKIP """ import os import numpy as np from math import pi import pickle from io import open from mindboggle.mio.vtks import read_scalars, rewrite_scalars # Initialize variables: tiny = 0.000000001 L = np.zeros(len(folds)) probs_border = np.zeros(len(folds)) probs_nonborder = np.zeros(len(folds)) # Load estimated depth and curvature distribution parameters: depth_border, curv_border, depth_nonborder, \ curv_nonborder = pickle.load(open(trained_file, "rb")) # Load depths, curvatures: depths, name = read_scalars(depth_file, True, True) curvatures, name = read_scalars(curvature_file, True, True) # Prep for below: n = 2 twopiexp = (2 * pi)**(n / 2) border_sigmas = depth_border['sigmas'] * curv_border['sigmas'] nonborder_sigmas = depth_nonborder['sigmas'] * curv_nonborder['sigmas'] norm_border = 1 / (twopiexp * border_sigmas + tiny) norm_nonborder = 1 / (twopiexp * nonborder_sigmas + tiny) I = [i for i, x in enumerate(folds) if x != background_value] N = depth_border['sigmas'].shape[0] for j in range(N): # Border: expB = depth_border['weights'][j] * \ ((depths[I]-depth_border['means'][j])**2) / \ depth_border['sigmas'][j]**2 expB += curv_border['weights'][j] * \ ((curvatures[I]-curv_border['means'][j])**2) / \ curv_border['sigmas'][j]**2 expB = -expB / 2 probs_border[I] = probs_border[I] + norm_border[j] * np.exp(expB) # Non-border: expNB = depth_nonborder['weights'][j] * \ ((depths[I]-depth_nonborder['means'][j])**2) / \ depth_nonborder['sigmas'][j]**2 expNB += curv_nonborder['weights'][j] * \ ((curvatures[I]-curv_nonborder['means'][j])**2) / \ curv_nonborder['sigmas'][j]**2 expNB = -expNB / 2 probs_nonborder[I] = probs_nonborder[I] + \ norm_nonborder[j] * np.exp(expNB) likelihoods = probs_border / (probs_nonborder + probs_border + tiny) likelihoods = likelihoods.tolist() # ------------------------------------------------------------------------ # Return likelihoods and output file name # ------------------------------------------------------------------------ if save_file: likelihoods_file = os.path.join(os.getcwd(), 'likelihoods.vtk') rewrite_scalars(depth_file, likelihoods_file, likelihoods, 'likelihoods', likelihoods, background_value) if not os.path.exists(likelihoods_file): raise IOError(likelihoods_file + " not found") else: likelihoods_file = None return likelihoods, likelihoods_file
def rescale_by_label(input_vtk, labels_or_file, save_file=False, output_filestring='rescaled_scalars'): """ Rescale scalars for each label (such as depth values within each fold). Default is to normalize the scalar values of a VTK file by a percentile value in each vertex's surface mesh for each label. Parameters ---------- input_vtk : string name of VTK file with a scalar value for each vertex labels_or_file : list or string label number for each vertex or name of VTK file with index scalars save_file : Boolean save output VTK file? output_filestring : string (if save_file) name of output file Returns ------- rescaled_scalars : list of floats scalar values rescaled for each label, for label numbers not equal to -1 rescaled_scalars_file : string (if save_file) name of output VTK file with rescaled scalar values for each label Examples -------- >>> # Rescale depths by neighborhood within each label: >>> import os >>> from mindboggle.guts.mesh import rescale_by_label >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars >>> from mindboggle.mio.plots import plot_surfaces >>> path = os.environ['MINDBOGGLE_DATA'] >>> input_vtk = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk') >>> labels_or_file = os.path.join(path, 'arno', 'features', 'subfolds.vtk') >>> save_file = True >>> output_filestring = 'rescaled_scalars' >>> # >>> rescaled_scalars, rescaled_scalars_file = rescale_by_label(input_vtk, >>> labels_or_file, save_file, output_filestring) >>> # >>> # View rescaled scalar values per fold: >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> folds, name = read_scalars(folds_file) >>> # >>> rewrite_scalars(rescaled_scalars_file, rescaled_scalars_file, >>> rescaled_scalars, 'rescaled_depths', folds) >>> plot_surfaces(rescaled_scalars_file) """ import os import numpy as np from mindboggle.mio.vtks import read_scalars, rewrite_scalars # Load scalars and vertex neighbor lists: scalars, name = read_scalars(input_vtk, True, True) print(" Rescaling scalar values within each label...") # Load label numbers: if isinstance(labels_or_file, str): labels, name = read_scalars(labels_or_file, True, True) elif isinstance(labels_or_file, list): labels = labels_or_file unique_labels = np.unique(labels) unique_labels = [x for x in unique_labels if x >= 0] # Loop through labels: for label in unique_labels: #print(" Rescaling scalar values within label {0} of {1} labels...".format( # int(label), len(unique_labels))) indices = [i for i, x in enumerate(labels) if x == label] if indices: # Rescale by the maximum label scalar value: scalars[indices] = scalars[indices] / np.max(scalars[indices]) rescaled_scalars = scalars.tolist() #------------------------------------------------------------------------- # Return rescaled scalars and file name #------------------------------------------------------------------------- if save_file: rescaled_scalars_file = os.path.join(os.getcwd(), output_filestring + '.vtk') rewrite_scalars(input_vtk, rescaled_scalars_file, rescaled_scalars, 'rescaled_scalars', labels) if not os.path.exists(rescaled_scalars_file): raise (IOError(rescaled_scalars_file + " not found")) else: rescaled_scalars_file = None return rescaled_scalars, rescaled_scalars_file
def realign_boundaries_to_fundus_lines(surf_file, init_label_file, fundus_lines_file, thickness_file, out_label_file=None): """ Fix label boundaries to fundus lines. Parameters ---------- surf_file : file containing the surface geometry in vtk format init_label_file : file containing scalars that represent the initial guess at labels fundus_lines_file : file containing scalars representing fundus lines. thickness_file: file containing cortical thickness scalar data (for masking out the medial wall only) out_label_file : if specified, the realigned labels will be writen to this file Returns ------- numpy array representing the realigned label for each surface vertex. """ import numpy as np from mindboggle.guts.segment import extract_borders import mindboggle.guts.graph as go from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk from mindboggle.guts.mesh import find_neighbors import propagate_fundus_lines ## read files points, indices, lines, faces, scalars, scalar_names, num_points, \ input_vtk = read_vtk(surf_file, return_first=True, return_array=True) indices = range(num_points) init_labels, _ = read_scalars(init_label_file, return_first=True, return_array=True) fundus_lines, _ = read_scalars(fundus_lines_file, return_first=True, return_array=True) thickness, _ = read_scalars(thickness_file, return_first=True, return_array=True) # remove labels from vertices with zero thickness (get around # DKT40 annotations having the label '3' for all the Corpus # Callosum vertices). cc_inds = [x for x in indices if thickness[x] < 0.001] init_labels[cc_inds] = 0 ## setup seeds from initial label boundaries neighbor_lists = find_neighbors(faces, num_points) # extract all vertices that are on a boundary between labels boundary_indices, label_pairs, _ = extract_borders(indices, init_labels, neighbor_lists, return_label_pairs=True) # split boundary vertices into segments with common boundary pairs. boundary_segments = {} for boundary_index, label_pair in zip(boundary_indices, label_pairs): key = ((label_pair[0], label_pair[1]) if label_pair[0] < label_pair[1] else (label_pair[1], label_pair[0])) if key not in boundary_segments: boundary_segments[key] = [] boundary_segments[key].append(boundary_index) boundary_matrix, boundary_matrix_keys = _build_boundary_matrix( boundary_segments, num_points) # build the affinity matrix affinity_matrix = go.weight_graph(np.array(points), indices, np.array(faces), sigma=10, add_to_graph=False) ## propagate boundaries to fundus line vertices learned_matrix = _propagate_labels(affinity_matrix, boundary_matrix, boundary_indices, 100, 1) # assign labels to fundus line vertices based on highest probability new_boundaries = -1 * np.ones(init_labels.shape) fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5] # tile the surface into connected components delimited by fundus lines closed_fundus_lines, _, _ = propagate_fundus_lines.propagate_fundus_lines( points, faces, fundus_line_indices, thickness) closed_fundus_line_indices = np.where(closed_fundus_lines > 0)[0] # split surface into connected components connected_component_faces = _remove_boundary_faces( points, faces, closed_fundus_line_indices) # label components based on most probable label assignment new_labels = _label_components(connected_component_faces, num_points, boundary_indices, learned_matrix, boundary_matrix_keys) # propagate new labels to fill holes label_matrix, label_map = _build_label_matrix(new_labels) new_learned_matrix = _propagate_labels( affinity_matrix, label_matrix, [i for i in range(num_points) if new_labels[i] >= 0], 100, 1) # assign most probable labels for idx in [i for i in range(num_points) if new_labels[i] == -1]: max_idx = np.argmax(new_learned_matrix[idx]) new_labels[idx] = label_map[max_idx] # save if out_label_file is not None: write_vtk(out_label_file, points, faces=faces, scalars=[int(x) for x in new_labels], scalar_type='int') return new_labels
def rescale_by_neighborhood(input_vtk, indices=[], nedges=10, p=99, set_max_to_1=True, save_file=False, output_filestring='rescaled_scalars', background_value=-1): """ Rescale the scalar values of a VTK file by a percentile value in each vertex's surface mesh neighborhood. Parameters ---------- input_vtk : string name of VTK file with a scalar value for each vertex indices : list of integers (optional) indices of scalars to normalize nedges : integer number or edges from vertex, defining the size of its neighborhood p : float in range of [0,100] percentile used to normalize each scalar set_max_to_1 : Boolean set all rescaled values greater than 1 to 1.0? save_file : Boolean save output VTK file? output_filestring : string (if save_file) name of output file background_value : integer background value Returns ------- rescaled_scalars : list of floats rescaled scalar values rescaled_scalars_file : string (if save_file) name of output VTK file with rescaled scalar values Examples -------- >>> import os >>> from mindboggle.guts.mesh import rescale_by_neighborhood >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars >>> from mindboggle.mio.plots import plot_surfaces >>> path = os.environ['MINDBOGGLE_DATA'] >>> input_vtk = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk') >>> indices = [] >>> nedges = 10 >>> p = 99 >>> set_max_to_1 = True >>> save_file = True >>> output_filestring = 'rescaled_scalars' >>> background_value = -1 >>> # >>> rescaled_scalars, rescaled_scalars_file = rescale_by_neighborhood(input_vtk, >>> indices, nedges, p, set_max_to_1, save_file, output_filestring, background_value) >>> # >>> # View rescaled scalar values per fold: >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> folds, name = read_scalars(folds_file) >>> # >>> rewrite_scalars(rescaled_scalars_file, rescaled_scalars_file, >>> rescaled_scalars, 'rescaled_depths', folds) >>> plot_surfaces(rescaled_scalars_file) """ import os import numpy as np from mindboggle.mio.vtks import read_scalars, rewrite_scalars from mindboggle.guts.mesh import find_neighbors_from_file, find_neighborhood # Load scalars and vertex neighbor lists: scalars, name = read_scalars(input_vtk, True, True) if not indices: indices = [i for i,x in enumerate(scalars) if x != background_value] print(" Rescaling {0} scalar values by neighborhood...".format(len(indices))) neighbor_lists = find_neighbors_from_file(input_vtk) # Loop through vertices: rescaled_scalars = scalars.copy() for index in indices: # Determine the scalars in the vertex's neighborhood: neighborhood = find_neighborhood(neighbor_lists, [index], nedges) # Compute a high neighborhood percentile to normalize vertex's value: normalization_factor = np.percentile(scalars[neighborhood], p) rescaled_scalar = scalars[index] / normalization_factor rescaled_scalars[index] = rescaled_scalar # Make any rescaled value greater than 1 equal to 1: if set_max_to_1: rescaled_scalars[[x for x in indices if rescaled_scalars[x] > 1.0]] = 1 rescaled_scalars = rescaled_scalars.tolist() #------------------------------------------------------------------------- # Return rescaled scalars and file name #------------------------------------------------------------------------- if save_file: rescaled_scalars_file = os.path.join(os.getcwd(), output_filestring + '.vtk') rewrite_scalars(input_vtk, rescaled_scalars_file, rescaled_scalars, 'rescaled_scalars') if not os.path.exists(rescaled_scalars_file): raise(IOError(rescaled_scalars_file + " not found")) else: rescaled_scalars_file = None return rescaled_scalars, rescaled_scalars_file
def extract_sulci(labels_file, folds_or_file, hemi, min_boundary=1, sulcus_names=[]): """ Identify sulci from folds in a brain surface according to a labeling protocol that includes a list of label pairs defining each sulcus. A fold is a group of connected, deep vertices. Steps for each fold :: 1. Remove fold if it has fewer than two labels. 2. Remove fold if its labels do not contain a sulcus label pair. 3. Find vertices with labels that are in only one of the fold's label boundary pairs. Assign the vertices the sulcus with the label pair if they are connected to the label boundary for that pair. 4. If there are remaining vertices, segment into sets of vertices connected to label boundaries, and assign a unique ID to each set. Parameters ---------- labels_file : string file name for surface mesh VTK containing labels for all vertices folds_or_file : list or string fold number for each vertex / name of VTK file containing fold scalars hemi : string hemisphere abbreviation in {'lh', 'rh'} for sulcus labels min_boundary : integer minimum number of vertices for a sulcus label boundary segment sulcus_names : list of strings names of sulci Returns ------- sulci : list of integers sulcus numbers for all vertices (-1 for non-sulcus vertices) n_sulci : integers number of sulci sulci_file : string output VTK file with sulcus numbers (-1 for non-sulcus vertices) Examples -------- >>> import os >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars >>> from mindboggle.features.sulci import extract_sulci >>> from mindboggle.mio.plots import plot_surfaces >>> path = os.environ['MINDBOGGLE_DATA'] >>> # Load labels, folds, neighbor lists, and sulcus names and label pairs >>> labels_file = os.path.join(path, 'arno', 'labels', 'relabeled_lh.DKTatlas40.gcs.vtk') >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> folds_or_file, name = read_scalars(folds_file) >>> hemi = 'lh' >>> min_boundary = 10 >>> sulcus_names = [] >>> # >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file, hemi, min_boundary, sulcus_names) >>> # View: >>> plot_surfaces('sulci.vtk') """ import os from time import time import numpy as np from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars from mindboggle.guts.mesh import find_neighbors from mindboggle.guts.segment import extract_borders, propagate, segment from mindboggle.mio.labels import DKTprotocol # Load fold numbers if folds_or_file is a string: if isinstance(folds_or_file, str): folds, name = read_scalars(folds_or_file) elif isinstance(folds_or_file, list): folds = folds_or_file dkt = DKTprotocol() if hemi == 'lh': pair_lists = dkt.left_sulcus_label_pair_lists elif hemi == 'rh': pair_lists = dkt.right_sulcus_label_pair_lists else: print("Warning: hemisphere not properly specified ('lh' or 'rh').") # Load points, faces, and neighbors: faces, o1, o2, points, npoints, labels, o3, o4 = read_vtk(labels_file) neighbor_lists = find_neighbors(faces, npoints) # Array of sulcus IDs for fold vertices, initialized as -1. # Since we do not touch gyral vertices and vertices whose labels # are not in the label list, or vertices having only one label, # their sulcus IDs will remain -1: sulci = -1 * np.ones(npoints) #------------------------------------------------------------------------- # Loop through folds #------------------------------------------------------------------------- fold_numbers = [int(x) for x in np.unique(folds) if x != -1] n_folds = len(fold_numbers) print("Extract sulci from {0} folds...".format(n_folds)) t0 = time() for n_fold in fold_numbers: fold = [i for i,x in enumerate(folds) if x == n_fold] len_fold = len(fold) # List the labels in this fold: fold_labels = [labels[x] for x in fold] unique_fold_labels = [int(x) for x in np.unique(fold_labels) if x != -1] #--------------------------------------------------------------------- # NO MATCH -- fold has fewer than two labels #--------------------------------------------------------------------- if len(unique_fold_labels) < 2: # Ignore: sulci already initialized with -1 values: if not unique_fold_labels: print(" Fold {0} ({1} vertices): " "NO MATCH -- fold has no labels". format(n_fold, len_fold)) else: print(" Fold {0} ({1} vertices): " "NO MATCH -- fold has only one label ({2})". format(n_fold, len_fold, unique_fold_labels[0])) # Ignore: sulci already initialized with -1 values else: # Find all label boundary pairs within the fold: indices_fold_pairs, fold_pairs, unique_fold_pairs = \ extract_borders(fold, labels, neighbor_lists, ignore_values=[], return_label_pairs=True) # Find fold label pairs in the protocol (pairs are already sorted): fold_pairs_in_protocol = [x for x in unique_fold_pairs if x in dkt.unique_sulcus_label_pairs] if unique_fold_labels: print(" Fold {0} labels: {1} ({2} vertices)".format(n_fold, ', '.join([str(x) for x in unique_fold_labels]), len_fold)) #----------------------------------------------------------------- # NO MATCH -- fold has no sulcus label pair #----------------------------------------------------------------- if not fold_pairs_in_protocol: print(" Fold {0}: NO MATCH -- fold has no sulcus label pair". format(n_fold, len_fold)) #----------------------------------------------------------------- # Possible matches #----------------------------------------------------------------- else: print(" Fold {0} label pairs in protocol: {1}".format(n_fold, ', '.join([str(x) for x in fold_pairs_in_protocol]))) # Labels in the protocol (includes repeats across label pairs): labels_in_pairs = [x for lst in fold_pairs_in_protocol for x in lst] # Labels that appear in one or more sulcus label boundary: unique_labels = [] nonunique_labels = [] for label in np.unique(labels_in_pairs): if len([x for x in labels_in_pairs if x == label]) == 1: unique_labels.append(label) else: nonunique_labels.append(label) #------------------------------------------------------------- # Vertices whose labels are in only one sulcus label pair #------------------------------------------------------------- # Find vertices with a label that is in only one of the fold's # label pairs (the other label in the pair can exist in other # pairs). Assign the vertices the sulcus with the label pair # if they are connected to the label boundary for that pair. #------------------------------------------------------------- if unique_labels: for pair in fold_pairs_in_protocol: # If one or both labels in label pair is/are unique: unique_labels_in_pair = [x for x in pair if x in unique_labels] n_unique = len(unique_labels_in_pair) if n_unique: ID = None for i, pair_list in enumerate(pair_lists): if not isinstance(pair_list, list): pair_list = [pair_list] if pair in pair_list: ID = i break if ID: # Seeds from label boundary vertices # (fold_pairs and pair already sorted): indices_pair = [x for i,x in enumerate(indices_fold_pairs) if fold_pairs[i] == pair] # Vertices with unique label(s) in pair: indices_unique_labels = [fold[i] for i,x in enumerate(fold_labels) if x in dkt.unique_sulcus_label_pairs] # Propagate from seeds to labels in label pair: sulci2 = segment(indices_unique_labels, neighbor_lists, min_region_size=1, seed_lists=[indices_pair], keep_seeding=False, spread_within_labels=True, labels=labels) sulci[sulci2 != -1] = ID # Print statement: if n_unique == 1: ps1 = '1 label' else: ps1 = 'Both labels' if len(sulcus_names): ps2 = sulcus_names[ID] else: ps2 = '' print(" {0} unique to one fold pair: " "{1} {2}". format(ps1, ps2, unique_labels_in_pair)) #------------------------------------------------------------- # Vertex labels shared by multiple label pairs #------------------------------------------------------------- # Propagate labels from label borders to vertices with labels # that are shared by multiple label pairs in the fold. #------------------------------------------------------------- if len(nonunique_labels): # For each label shared by different label pairs: for label in nonunique_labels: # Print statement: print(" Propagate sulcus borders with label {0}". format(int(label))) # Construct seeds from label boundary vertices: seeds = -1 * np.ones(len(points)) for ID, pair_list in enumerate(pair_lists): if not isinstance(pair_list, list): pair_list = [pair_list] label_pairs = [x for x in pair_list if label in x] for label_pair in label_pairs: indices_pair = [x for i,x in enumerate(indices_fold_pairs) if np.sort(fold_pairs[i]). tolist() == label_pair] if indices_pair: # Do not include short boundary segments: if min_boundary > 1: indices_pair2 = [] seeds2 = segment(indices_pair, neighbor_lists) useeds2 = [x for x in np.unique(seeds2) if x != -1] for seed2 in useeds2: iseed2 = [i for i,x in enumerate(seeds2) if x == seed2] if len(iseed2) >= min_boundary: indices_pair2.extend(iseed2) else: if len(iseed2) == 1: print(" Remove " "assignment " "of ID {0} from " "1 vertex". format(seed2)) else: print(" Remove " "assignment " "of ID {0} from " "{1} vertices". format(seed2, len(iseed2))) indices_pair = indices_pair2 # Assign sulcus IDs to seeds: seeds[indices_pair] = ID # Identify vertices with the label: label_array = -1 * np.ones(len(points)) indices_label = [fold[i] for i,x in enumerate(fold_labels) if x == label] if len(indices_label): label_array[indices_label] = 1 # Propagate from seeds to vertices with label: #indices_seeds = [] #for seed in range(int(max(seeds))+1): # indices_seeds.append([i for i,x # in enumerate(seeds) # if x == seed]) #sulci2 = segment(indices_label, neighbor_lists, # 50, indices_seeds, False, True, # labels) sulci2 = propagate(points, faces, label_array, seeds, sulci, max_iters=10000, tol=0.001, sigma=5) sulci[sulci2 != -1] = sulci2[sulci2 != -1] #------------------------------------------------------------------------- # Print out assigned sulci #------------------------------------------------------------------------- sulcus_numbers = [int(x) for x in np.unique(sulci) if x != -1] # if not np.isnan(x)] n_sulci = len(sulcus_numbers) print("Extracted {0} sulci from {1} folds ({2:.1f}s):". format(n_sulci, n_folds, time()-t0)) if sulcus_names: for sulcus_number in sulcus_numbers: print(" {0}: {1}".format(sulcus_number, sulcus_names[sulcus_number])) elif sulcus_numbers: print(" " + ", ".join([str(x) for x in sulcus_numbers])) #------------------------------------------------------------------------- # Print out unresolved sulci #------------------------------------------------------------------------- unresolved = [i for i in range(len(pair_lists)) if i not in sulcus_numbers] if len(unresolved) == 1: print("The following sulcus is unaccounted for:") else: print("The following {0} sulci are unaccounted for:". format(len(unresolved))) if sulcus_names: for sulcus_number in unresolved: print(" {0}: {1}".format(sulcus_number, sulcus_names[sulcus_number])) else: print(" " + ", ".join([str(x) for x in unresolved])) #------------------------------------------------------------------------- # Return sulci, number of sulci, and file name #------------------------------------------------------------------------- sulci = [int(x) for x in sulci] sulci_file = os.path.join(os.getcwd(), 'sulci.vtk') rewrite_scalars(labels_file, sulci_file, sulci, 'sulci', sulci) if not os.path.exists(sulci_file): raise(IOError(sulci_file + " not found")) return sulci, n_sulci, sulci_file
def write_face_vertex_averages(input_file, output_table='', area_file=''): """ Make table of average vertex values per face (divided by face area if area_file provided). Parameters ---------- input_file : string name of VTK file with scalars to average area_file : string name of VTK file with surface area scalar values output_table : string output table filename Returns ------- output_table : string output table filename Examples -------- >>> from mindboggle.mio.tables import write_face_vertex_averages >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> input_file = fetch_data(urls['left_travel_depth'], '', '.vtk') >>> area_file = fetch_data(urls['left_area'], '', '.vtk') >>> output_table = '' >>> output_table = write_face_vertex_averages(input_file, output_table, ... area_file) """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_vtk, read_scalars points, indices, lines, faces, scalars, scalar_names, \ npoints, input_vtk = read_vtk(input_file, True, True) if area_file: area_scalars, name = read_scalars(area_file, True, True) # -------------------------------------------------------------------- # For each face, average vertex values: # -------------------------------------------------------------------- columns = [] for face in faces: values = [] for index in face: if area_file: values.append(scalars[index] / area_scalars[index]) else: values.append(scalars[index]) columns.append(np.mean(values)) # ---------------------------------------------------------------- # Write to table: # ---------------------------------------------------------------- if not output_table: output_table = os.path.join(os.getcwd(), 'average_face_values.csv') df = pd.DataFrame({'': columns}) df.to_csv(output_table, index=False, encoding='utf-8') if not os.path.exists(output_table): raise IOError(output_table + " not found") return output_table
def concatenate_sulcus_scalars(scalar_files, fold_files, label_files, background_value=-1): """ Prepare data for estimating scalar distributions along and outside fundi. Extract (e.g., depth, curvature) scalar values in folds, along sulcus label boundaries as well as outside the sulcus label boundaries. Concatenate these scalar values across multiple files. Parameters ---------- scalar_files : list of strings names of surface mesh VTK files with scalar values to concatenate fold_files : list of strings (corr. to each list in scalar_files) VTK files with fold numbers as scalars (-1 for non-fold vertices) label_files : list of strings (corr. to fold_files) VTK files with label numbers (-1 for unlabeled vertices) background_value : integer or float background value Returns ------- border_scalars : list of floats concatenated scalar values within folds along sulcus label boundaries nonborder_scalars : list of floats concatenated scalar values within folds outside sulcus label boundaries Examples -------- >>> # Concatenate (duplicate) depth scalars: >>> import numpy as np >>> from mindboggle.shapes.likelihood import concatenate_sulcus_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk') >>> labels_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> scalar_files = [depth_file, depth_file] >>> fold_files = [folds_file, folds_file] >>> label_files = [labels_file, labels_file] >>> background_value = -1 >>> border, nonborder = concatenate_sulcus_scalars(scalar_files, ... fold_files, label_files, background_value) >>> print(np.array_str(np.array(border[0:5]), ... precision=5, suppress_small=True)) [ 3.48284 2.57157 4.27596 4.56549 3.84881] >>> print(np.array_str(np.array(nonborder[0:5]), ... precision=5, suppress_small=True)) [ 2.87204 2.89388 3.55364 2.81681 3.70736] """ import numpy as np from mindboggle.mio.vtks import read_scalars from mindboggle.guts.mesh import find_neighbors_from_file from mindboggle.guts.segment import extract_borders from mindboggle.mio.labels import DKTprotocol dkt = DKTprotocol() # Prepare (non-unique) list of sulcus label pairs: protocol_label_pairs = [x for lst in dkt.sulcus_label_pair_lists for x in lst] border_scalars = [] nonborder_scalars = [] # Loop through files with the scalar values: for ifile, scalar_file in enumerate(scalar_files): #print(scalar_file) # Load scalars, folds, and labels: folds_file = fold_files[ifile] labels_file = label_files[ifile] scalars, name = read_scalars(scalar_file, True, True) if scalars.shape: folds, name = read_scalars(folds_file) labels, name = read_scalars(labels_file) indices_folds = [i for i,x in enumerate(folds) if x != background_value] neighbor_lists = find_neighbors_from_file(labels_file) # Find all label border pairs within the folds: indices_label_pairs, label_pairs, unique_pairs = extract_borders( indices_folds, labels, neighbor_lists, ignore_values=[-1], return_label_pairs=True) indices_label_pairs = np.array(indices_label_pairs) # Find vertices with label pairs in the sulcus labeling protocol: Ipairs_in_protocol = [i for i,x in enumerate(label_pairs) if x in protocol_label_pairs] indices_label_pairs = indices_label_pairs[Ipairs_in_protocol] indices_outside_pairs = list(frozenset(indices_folds).difference( indices_label_pairs)) # Store scalar values in folds along label border pairs: border_scalars.extend(scalars[indices_label_pairs].tolist()) # Store scalar values in folds outside label border pairs: nonborder_scalars.extend(scalars[indices_outside_pairs].tolist()) return border_scalars, nonborder_scalars
def evaluate_deep_features(features_file, labels_file, sulci_file='', hemi='', excludeIDs=[-1], output_vtk_name='', verbose=True): """ Evaluate deep surface features by computing the minimum distance from each label border vertex to all of the feature vertices in the same sulcus, and from each feature vertex to all of the label border vertices in the same sulcus. The label borders run along the deepest parts of sulci and correspond to fundi in the DKT cortical labeling protocol. Parameters ---------- features_file : string VTK surface file with feature numbers for vertex scalars labels_file : string VTK surface file with label numbers for vertex scalars sulci_file : string VTK surface file with sulcus numbers for vertex scalars excludeIDs : list of integers feature/sulcus/label IDs to exclude (background set to -1) output_vtk_name : Boolean if not empty, output a VTK file beginning with output_vtk_name that contains a surface with mean distances as scalars verbose : Boolean print mean distances to standard output? Returns ------- feature_to_border_mean_distances : numpy array [number of features x 1] mean distance from each feature to sulcus label border feature_to_border_sd_distances : numpy array [number of features x 1] standard deviations of feature-to-border distances feature_to_border_distances_vtk : string VTK surface file containing feature-to-border distances border_to_feature_mean_distances : numpy array [number of features x 1] mean distances from each sulcus label border to feature border_to_feature_sd_distances : numpy array [number of features x 1] standard deviations of border-to-feature distances border_to_feature_distances_vtk : string VTK surface file containing border-to-feature distances """ import os import sys import numpy as np from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk from mindboggle.guts.mesh import find_neighbors, remove_faces from mindboggle.guts.segment import extract_borders from mindboggle.guts.compute import source_to_target_distances from mindboggle.mio.labels import DKTprotocol dkt = DKTprotocol() #------------------------------------------------------------------------- # Load labels, features, and sulci: #------------------------------------------------------------------------- faces, lines, indices, points, npoints, labels, scalar_names, \ input_vtk = read_vtk(labels_file, True, True) features, name = read_scalars(features_file, True, True) if sulci_file: sulci, name = read_scalars(sulci_file, True, True) # List of indices to sulcus vertices: sulcus_indices = [i for i,x in enumerate(sulci) if x != -1] segmentIDs = sulci sulcus_faces = remove_faces(faces, sulcus_indices) else: sulcus_indices = range(len(labels)) segmentIDs = [] sulcus_faces = faces #------------------------------------------------------------------------- # Prepare neighbors, label pairs, border IDs, and outputs: #------------------------------------------------------------------------- # Calculate neighbor lists for all points: print('Find neighbors for all vertices...') neighbor_lists = find_neighbors(faces, npoints) # Find label border points in any of the sulci: print('Find label border points in any of the sulci...') border_indices, border_label_tuples, unique_border_label_tuples = \ extract_borders(sulcus_indices, labels, neighbor_lists, ignore_values=[], return_label_pairs=True) if not len(border_indices): sys.exit('There are no label border points!') # Initialize an array of label border IDs # (label border vertices that define sulci in the labeling protocol): print('Build an array of label border IDs...') label_borders = -1 * np.ones(npoints) if hemi == 'lh': nsulcus_lists = len(dkt.left_sulcus_label_pair_lists) else: nsulcus_lists = len(dkt.right_sulcus_label_pair_lists) feature_to_border_mean_distances = -1 * np.ones(nsulcus_lists) feature_to_border_sd_distances = -1 * np.ones(nsulcus_lists) border_to_feature_mean_distances = -1 * np.ones(nsulcus_lists) border_to_feature_sd_distances = -1 * np.ones(nsulcus_lists) feature_to_border_distances_vtk = '' border_to_feature_distances_vtk = '' #------------------------------------------------------------------------- # Loop through sulci: #------------------------------------------------------------------------- # For each list of sorted label pairs (corresponding to a sulcus): for isulcus, label_pairs in enumerate(dkt.sulcus_label_pair_lists): # Keep the border points with label pair labels: label_pair_border_indices = [x for i,x in enumerate(border_indices) if np.unique(border_label_tuples[i]).tolist() in label_pairs] # Store the points as sulcus IDs in the border IDs array: if label_pair_border_indices: label_borders[label_pair_border_indices] = isulcus if len(np.unique(label_borders)) > 1: #--------------------------------------------------------------------- # Construct a feature-to-border distance matrix and VTK file: #--------------------------------------------------------------------- # Construct a distance matrix: print('Construct a feature-to-border distance matrix...') sourceIDs = features targetIDs = label_borders distances, distance_matrix = source_to_target_distances( sourceIDs, targetIDs, points, segmentIDs, excludeIDs) # Compute mean distances for each feature: nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists) for ifeature in range(nfeatures): feature_distances = [x for x in distance_matrix[:, ifeature] if x != -1] feature_to_border_mean_distances[ifeature] = \ np.mean(feature_distances) feature_to_border_sd_distances[ifeature] = \ np.std(feature_distances) if verbose: print('Feature-to-border mean distances:') print(feature_to_border_mean_distances) print('Feature-to-border standard deviations of distances:') print(feature_to_border_sd_distances) # Write resulting feature-label border distances to VTK file: if output_vtk_name: feature_to_border_distances_vtk = os.path.join(os.getcwd(), output_vtk_name + '_feature_to_border_mean_distances.vtk') print('Write feature-to-border distances to {0}...'. format(feature_to_border_distances_vtk)) write_vtk(feature_to_border_distances_vtk, points, [], [], sulcus_faces, [distances], ['feature-to-border_distances'], 'float') #--------------------------------------------------------------------- # Construct a border-to-feature distance matrix and VTK file: #--------------------------------------------------------------------- # Construct a distance matrix: print('Construct a border-to-feature distance matrix...') sourceIDs = label_borders targetIDs = features distances, distance_matrix = source_to_target_distances( sourceIDs, targetIDs, points, segmentIDs, excludeIDs) # Compute mean distances for each feature: nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists) for ifeature in range(nfeatures): border_distances = [x for x in distance_matrix[:, ifeature] if x != -1] border_to_feature_mean_distances[ifeature] = \ np.mean(border_distances) border_to_feature_sd_distances[ifeature] = \ np.std(border_distances) if verbose: print('border-to-feature mean distances:') print(border_to_feature_mean_distances) print('border-to-feature standard deviations of distances:') print(border_to_feature_sd_distances) # Write resulting feature-label border distances to VTK file: if output_vtk_name: border_to_feature_distances_vtk = os.path.join(os.getcwd(), output_vtk_name + '_border_to_feature_mean_distances.vtk') print('Write border-to-feature distances to {0}...'. format(border_to_feature_distances_vtk)) write_vtk(border_to_feature_distances_vtk, points, [], [], sulcus_faces, [distances], ['border-to-feature_distances'], 'float') #------------------------------------------------------------------------- # Return outputs: #------------------------------------------------------------------------- return feature_to_border_mean_distances, feature_to_border_sd_distances,\ feature_to_border_distances_vtk,\ border_to_feature_mean_distances, border_to_feature_sd_distances,\ border_to_feature_distances_vtk
def write_shape_stats(labels_or_file=[], sulci=[], fundi=[], affine_transform_files=[], inverse_booleans=[], transform_format='itk', area_file='', normalize_by_area=False, mean_curvature_file='', travel_depth_file='', geodesic_depth_file='', freesurfer_thickness_file='', freesurfer_curvature_file='', freesurfer_sulc_file='', labels_spectra=[], labels_spectra_IDs=[], sulci_spectra=[], sulci_spectra_IDs=[], labels_zernike=[], labels_zernike_IDs=[], sulci_zernike=[], sulci_zernike_IDs=[], exclude_labels=[-1]): """ Make tables of shape statistics per label, sulcus, and/or fundus. Note :: This function is tailored for Mindboggle outputs. Parameters ---------- labels_or_file : list or string label number for each vertex or name of VTK file with index scalars sulci : list of integers indices to sulci, one per vertex, with -1 indicating no sulcus fundi : list of integers indices to fundi, one per vertex, with -1 indicating no fundus affine_transform_files : list of strings affine transform files to standard space inverse_booleans : list of of zeros and ones for each transform, 1 to take the inverse, else 0 transform_format : string format for transform file Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format area_file : string name of VTK file with surface area scalar values normalize_by_area : Boolean normalize all shape measures by area of label/feature? (UNTESTED) mean_curvature_file : string name of VTK file with mean curvature scalar values travel_depth_file : string name of VTK file with travel depth scalar values geodesic_depth_file : string name of VTK file with geodesic depth scalar values freesurfer_thickness_file : string name of VTK file with FreeSurfer thickness scalar values freesurfer_curvature_file : string name of VTK file with FreeSurfer curvature (curv) scalar values freesurfer_sulc_file : string name of VTK file with FreeSurfer convexity (sulc) scalar values labels_spectra : list of lists of floats Laplace-Beltrami spectra for each labeled region labels_spectra_IDs : list of integers unique labels for labels_spectra sulci_spectra : list of lists of floats Laplace-Beltrami spectra for each sulcus sulci_spectra_IDs : list of integers unique sulcus IDs for sulci_spectra labels_zernike : list of lists of floats Zernike moments for each labeled region labels_zernike_IDs : list of integers unique labels for labels_zernike sulci_zernike : list of lists of floats Zernike moments for each sulcus sulci_zernike_IDs : list of integers unique sulcus IDs for sulci_zernike exclude_labels : list of lists of integers indices to be excluded (in addition to -1) Returns ------- label_table : string output table filename for label shapes sulcus_table : string output table filename for sulcus shapes fundus_table : string output table filename for fundus shapes Examples -------- >>> import os >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.mio.tables import write_shape_stats >>> path = '/homedir/mindboggled/Twins-2-1' >>> labels_or_file = os.path.join(path, 'labels', 'left_cortical_surface', 'freesurfer_cortex_labels.vtk') >>> sulci_file = os.path.join(path, 'features', 'left_cortical_surface', 'sulci.vtk') >>> fundi_file = os.path.join(path, 'features', 'left_cortical_surface', 'fundus_per_sulcus.vtk') >>> sulci, name = read_scalars(sulci_file) >>> fundi, name = read_scalars(fundi_file) >>> affine_transform_files = [] #os.path.join(path, 'arno', 'mri', 't1weighted_brain.MNI152Affine.txt') >>> inverse_booleans = [] >>> #transform_format = 'mat' >>> transform_format = 'itk' >>> area_file = os.path.join(path, 'shapes', 'left_cortical_surface', 'area.vtk') >>> normalize_by_area = False >>> mean_curvature_file = os.path.join(path, 'shapes', 'left_cortical_surface', 'mean_curvature.vtk') >>> travel_depth_file = os.path.join(path, 'shapes', 'left_cortical_surface', 'travel_depth.vtk') >>> geodesic_depth_file = os.path.join(path, 'shapes', 'left_cortical_surface', 'geodesic_depth.vtk') >>> freesurfer_thickness_file = '' >>> freesurfer_curvature_file = '' >>> freesurfer_sulc_file = '' >>> # >>> labels, name = read_scalars(labels_or_file) >>> labels_spectra = [] >>> labels_spectra_IDs = [] >>> sulci_spectra = [] >>> sulci_spectra_IDs = [] >>> labels_zernike = [] >>> labels_zernike_IDs = [] >>> sulci_zernike = [] >>> sulci_zernike_IDs = [] >>> exclude_labels = [-1] >>> # >>> write_shape_stats(labels_or_file, sulci, fundi, >>> affine_transform_files, inverse_booleans, transform_format, >>> area_file, normalize_by_area, >>> mean_curvature_file, travel_depth_file, geodesic_depth_file, >>> freesurfer_thickness_file, freesurfer_curvature_file, >>> freesurfer_sulc_file, >>> labels_spectra, labels_spectra_IDs, >>> sulci_spectra, sulci_spectra_IDs, >>> labels_zernike, labels_zernike_IDs, >>> sulci_zernike, sulci_zernike_IDs, >>> exclude_labels) """ import os import numpy as np import pandas as pd from mindboggle.guts.compute import means_per_label, stats_per_label, \ sum_per_label from mindboggle.mio.vtks import read_scalars, read_vtk, \ apply_affine_transforms from mindboggle.mio.labels import DKTprotocol dkt = DKTprotocol() # Make sure inputs are lists: if isinstance(labels_or_file, np.ndarray): labels = [int(x) for x in labels_or_file] elif isinstance(labels_or_file, list): labels = labels_or_file elif isinstance(labels_or_file, str): labels, name = read_scalars(labels_or_file) if isinstance(sulci, np.ndarray): sulci = [int(x) for x in sulci] if isinstance(fundi, np.ndarray): fundi = [int(x) for x in fundi] if not labels and not sulci and not fundi: import sys sys.exit('No feature data to tabulate in write_shape_stats().') spectrum_start = 1 # Store all columns of spectral components (0), # or start from higher frequency components (>=1) #------------------------------------------------------------------------- # Feature lists, shape names, and shape files: #------------------------------------------------------------------------- # Feature lists: feature_lists = [labels, sulci, fundi] feature_names = ['label', 'sulcus', 'fundus'] spectra_lists = [labels_spectra, sulci_spectra] spectra_ID_lists = [labels_spectra_IDs, sulci_spectra_IDs] zernike_lists = [labels_zernike, sulci_zernike] zernike_ID_lists = [labels_zernike_IDs, sulci_zernike_IDs] table_names = ['label_shapes.csv', 'sulcus_shapes.csv', 'fundus_shapes.csv'] # Shape names corresponding to shape files below: shape_names = ['area', 'travel depth', 'geodesic depth', 'mean curvature', 'freesurfer curvature', 'freesurfer thickness', 'freesurfer convexity (sulc)'] # Load shape files as a list of numpy arrays of per-vertex shape values: shape_files = [area_file, travel_depth_file, geodesic_depth_file, mean_curvature_file, freesurfer_curvature_file, freesurfer_thickness_file, freesurfer_sulc_file] shape_arrays = [] first_pass = True area_array = [] for ishape, shape_file in enumerate(shape_files): if os.path.exists(shape_file): if first_pass: points, indices, lines, faces, scalars_array, scalar_names, \ npoints, input_vtk = read_vtk(shape_file, True, True) points = np.array(points) first_pass = False if affine_transform_files and transform_format: affine_points, \ foo1 = apply_affine_transforms(affine_transform_files, inverse_booleans, transform_format, points, vtk_file_stem='') else: scalars_array, name = read_scalars(shape_file, True, True) if scalars_array.size: shape_arrays.append(scalars_array) # Store area array: if ishape == 0: area_array = scalars_array.copy() if normalize_by_area: use_area = area_array else: use_area = [] # Initialize table file names: label_table = '' sulcus_table = '' fundus_table = '' # Loop through features / tables: for itable, feature_list in enumerate(feature_lists): column_names = [] #----------------------------------------------------------------- # Label names: #----------------------------------------------------------------- label_title = 'name' if itable == 0: label_numbers = dkt.cerebrum_cortex_DKT31_numbers label_names = dkt.cerebrum_cortex_DKT31_names elif itable in [1, 2]: label_numbers = dkt.sulcus_numbers label_names = dkt.sulcus_names else: label_numbers = [] label_names = [] include_labels = label_numbers nlabels = len(label_numbers) #--------------------------------------------------------------------- # For each feature, construct a table of average shape values: #--------------------------------------------------------------------- if feature_list: feature_name = feature_names[itable] columns = [] #----------------------------------------------------------------- # Loop through shape measures: #----------------------------------------------------------------- column_names.extend(column_names[:]) for ishape, shape_array in enumerate(shape_arrays): shape = shape_names[ishape] print(' Compute statistics on {0} {1}...'. format(feature_name, shape)) #------------------------------------------------------------- # Append feature areas to columns: #------------------------------------------------------------- if ishape == 0 and np.size(area_array): sums, label_list = sum_per_label(shape_array, feature_list, include_labels, exclude_labels) column_names.append(shape) columns.append(sums) #------------------------------------------------------------- # Append feature shape statistics to columns: #------------------------------------------------------------- else: medians, mads, means, sdevs, skews, kurts, \ lower_quarts, upper_quarts, \ label_list = stats_per_label(shape_array, feature_list, include_labels, exclude_labels, area_array, precision=1) column_names.append(shape + ': median') column_names.append(shape + ': MAD') column_names.append(shape + ': mean') column_names.append(shape + ': SD') column_names.append(shape + ': skew') column_names.append(shape + ': kurtosis') column_names.append(shape + ': 25%') column_names.append(shape + ': 75%') columns.append(medians) columns.append(mads) columns.append(means) columns.append(sdevs) columns.append(skews) columns.append(kurts) columns.append(lower_quarts) columns.append(upper_quarts) #----------------------------------------------------------------- # Mean positions in the original space: #----------------------------------------------------------------- # Compute mean position per feature: positions, sdevs, label_list, foo = means_per_label(points, feature_list, include_labels, exclude_labels, use_area) # Append mean x,y,z position per feature to columns: xyz_positions = np.asarray(positions) for ixyz, xyz in enumerate(['x','y','z']): column_names.append('mean position: {0}'.format(xyz)) columns.append(xyz_positions[:, ixyz].tolist()) #----------------------------------------------------------------- # Mean positions in standard space: #----------------------------------------------------------------- if affine_transform_files and transform_format: # Compute standard space mean position per feature: standard_positions, sdevs, label_list, \ foo = means_per_label(affine_points, feature_list, include_labels, exclude_labels, use_area) # Append standard space x,y,z position per feature to columns: xyz_std_positions = np.asarray(standard_positions) for ixyz, xyz in enumerate(['x','y','z']): column_names.append('mean position in standard space:' ' {0}'.format(xyz)) columns.append(xyz_std_positions[:, ixyz].tolist()) #----------------------------------------------------------------- # Laplace-Beltrami spectra: #----------------------------------------------------------------- if itable in [0, 1]: spectra = spectra_lists[itable] if spectra: spectra_IDs = spectra_ID_lists[itable] # Construct a matrix of spectra: len_spectrum = len(spectra[0]) spectrum_matrix = np.zeros((nlabels, len_spectrum)) for ilabel, label in enumerate(include_labels): if label in spectra_IDs: spectrum = spectra[spectra_IDs.index(label)] spectrum_matrix[ilabel, 0:len_spectrum] = spectrum # Append spectral shape name and values to columns: for ispec in range(spectrum_start, len_spectrum): columns.append(spectrum_matrix[:, ispec].tolist()) column_names.append('Laplace-Beltrami spectrum:' ' component {0}'.format(ispec+1)) #----------------------------------------------------------------- # Zernike moments: #----------------------------------------------------------------- if itable in [0, 1]: zernike = zernike_lists[itable] if zernike: zernike_IDs = zernike_ID_lists[itable] # Construct a matrix of Zernike moments: len_moments = len(zernike[0]) moments_matrix = np.zeros((nlabels, len_moments)) for ilabel, label in enumerate(include_labels): if label in zernike_IDs: moments = zernike[zernike_IDs.index(label)] moments_matrix[ilabel, 0:len_moments] = moments # Append Zernike shape name and values to columns: for imoment in range(0, len_moments): columns.append(moments_matrix[:, imoment].tolist()) column_names.append('Zernike moments: component {0}'. format(imoment+1)) #----------------------------------------------------------------- # Write labels/IDs and values to table: #----------------------------------------------------------------- # Write labels/IDs to table: output_table = os.path.join(os.getcwd(), table_names[itable]) if columns: df1 = pd.DataFrame({'ID': label_numbers}) df2 = pd.DataFrame(np.transpose(columns), columns = column_names) df = pd.concat([df1, df2], axis=1) if label_names: df0 = pd.DataFrame({'name': label_names}) df = pd.concat([df0, df], axis=1) df.to_csv(output_table, index=False) if not os.path.exists(output_table): raise(IOError(output_table + " not found")) #----------------------------------------------------------------- # Return correct table file name: #----------------------------------------------------------------- if itable == 0: label_table = output_table elif itable == 1: sulcus_table = output_table elif itable == 2: fundus_table = output_table return label_table, sulcus_table, fundus_table
def rescale_by_label(input_vtk, labels_or_file, save_file=False, output_filestring='rescaled_scalars'): """ Rescale scalars for each label (such as depth values within each fold). Default is to normalize the scalar values of a VTK file by a percentile value in each vertex's surface mesh for each label. Parameters ---------- input_vtk : string name of VTK file with a scalar value for each vertex labels_or_file : list or string label number for each vertex or name of VTK file with index scalars save_file : Boolean save output VTK file? output_filestring : string (if save_file) name of output file Returns ------- rescaled_scalars : list of floats scalar values rescaled for each label, for label numbers not equal to -1 rescaled_scalars_file : string (if save_file) name of output VTK file with rescaled scalar values for each label Examples -------- >>> # Rescale depths by neighborhood within each label: >>> import os >>> from mindboggle.guts.mesh import rescale_by_label >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars >>> from mindboggle.mio.plots import plot_surfaces >>> path = os.environ['MINDBOGGLE_DATA'] >>> input_vtk = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk') >>> labels_or_file = os.path.join(path, 'arno', 'features', 'subfolds.vtk') >>> save_file = True >>> output_filestring = 'rescaled_scalars' >>> # >>> rescaled_scalars, rescaled_scalars_file = rescale_by_label(input_vtk, >>> labels_or_file, save_file, output_filestring) >>> # >>> # View rescaled scalar values per fold: >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> folds, name = read_scalars(folds_file) >>> # >>> rewrite_scalars(rescaled_scalars_file, rescaled_scalars_file, >>> rescaled_scalars, 'rescaled_depths', folds) >>> plot_surfaces(rescaled_scalars_file) """ import os import numpy as np from mindboggle.mio.vtks import read_scalars, rewrite_scalars # Load scalars and vertex neighbor lists: scalars, name = read_scalars(input_vtk, True, True) print(" Rescaling scalar values within each label...") # Load label numbers: if isinstance(labels_or_file, str): labels, name = read_scalars(labels_or_file, True, True) elif isinstance(labels_or_file, list): labels = labels_or_file unique_labels = np.unique(labels) unique_labels = [x for x in unique_labels if x >= 0] # Loop through labels: for label in unique_labels: #print(" Rescaling scalar values within label {0} of {1} labels...".format( # int(label), len(unique_labels))) indices = [i for i,x in enumerate(labels) if x == label] if indices: # Rescale by the maximum label scalar value: scalars[indices] = scalars[indices] / np.max(scalars[indices]) rescaled_scalars = scalars.tolist() #------------------------------------------------------------------------- # Return rescaled scalars and file name #------------------------------------------------------------------------- if save_file: rescaled_scalars_file = os.path.join(os.getcwd(), output_filestring + '.vtk') rewrite_scalars(input_vtk, rescaled_scalars_file, rescaled_scalars, 'rescaled_scalars', labels) if not os.path.exists(rescaled_scalars_file): raise(IOError(rescaled_scalars_file + " not found")) else: rescaled_scalars_file = None return rescaled_scalars, rescaled_scalars_file
def write_face_vertex_averages(input_file, output_table='', area_file=''): """ Make table of average vertex values per face (divided by face area if area_file provided). Parameters ---------- input_file : string name of VTK file with scalars to average area_file : string name of VTK file with surface area scalar values output_table : string output table filename Returns ------- output_table : string output table filename Examples -------- >>> import os >>> from mindboggle.mio.tables import write_face_vertex_averages >>> path = '/homedir/mindboggled' >>> input_file = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'freesurfer_thickness.vtk') >>> area_file = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'area.vtk') >>> output_table = '' >>> # >>> write_face_vertex_averages(input_file, output_table, area_file) """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_vtk, read_scalars points, indices, lines, faces, scalars, scalar_names, \ npoints, input_vtk = read_vtk(input_file, True, True) if area_file: area_scalars, name = read_scalars(area_file, True, True) #--------------------------------------------------------------------- # For each face, average vertex values: #--------------------------------------------------------------------- columns = [] for face in faces: values = [] for index in face: if area_file: values.append(scalars[index] / area_scalars[index]) else: values.append(scalars[index]) columns.append(np.mean(values)) #----------------------------------------------------------------- # Write to table: #----------------------------------------------------------------- if not output_table: output_table = os.path.join(os.getcwd(), 'average_face_values.csv') df = pd.DataFrame({'': columns}) df.to_csv(output_table, index=False) if not os.path.exists(output_table): raise(IOError(output_table + " not found")) return output_table
def write_vertex_measures(output_table, labels_or_file, sulci=[], fundi=[], affine_transform_files=[], inverse_booleans=[], transform_format='itk', area_file='', mean_curvature_file='', travel_depth_file='', geodesic_depth_file='', freesurfer_thickness_file='', freesurfer_curvature_file='', freesurfer_sulc_file=''): """ Make a table of shape values per vertex. Note :: This function is tailored for Mindboggle outputs. Parameters ---------- output_table : string output file (full path) labels_or_file : list or string label number for each vertex or name of VTK file with index scalars sulci : list of integers indices to sulci, one per vertex, with -1 indicating no sulcus fundi : list of integers indices to fundi, one per vertex, with -1 indicating no fundus affine_transform_files : list of strings affine transform files to standard space inverse_booleans : list of of zeros and ones for each transform, 1 to take the inverse, else 0 transform_format : string format for transform file Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format area_file : string name of VTK file with surface area scalar values mean_curvature_file : string name of VTK file with mean curvature scalar values travel_depth_file : string name of VTK file with travel depth scalar values geodesic_depth_file : string name of VTK file with geodesic depth scalar values freesurfer_thickness_file : string name of VTK file with FreeSurfer thickness scalar values freesurfer_curvature_file : string name of VTK file with FreeSurfer curvature (curv) scalar values freesurfer_sulc_file : string name of VTK file with FreeSurfer convexity (sulc) scalar values Returns ------- output_table : table file name for vertex shape values Examples -------- >>> import os >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.mio.tables import write_vertex_measures >>> output_table = '' #vertex_shapes.csv' >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> labels_or_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> sulci_file = fetch_data(urls['left_sulci'], '', '.vtk') >>> fundi_file = fetch_data(urls['left_fundus_per_sulcus'], '', '.vtk') >>> mean_curvature_file = fetch_data(urls['left_mean_curvature'], '', '.vtk') >>> travel_depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk') >>> geodesic_depth_file = fetch_data(urls['left_geodesic_depth'], '', '.vtk') >>> area_file = fetch_data(urls['left_area'], '', '.vtk') >>> freesurfer_thickness_file = fetch_data(urls['left_freesurfer_thickness'], '', '.vtk') >>> freesurfer_curvature_file = fetch_data(urls['left_freesurfer_curvature'], '', '.vtk') >>> freesurfer_sulc_file = fetch_data(urls['left_freesurfer_sulc'], '', '.vtk') >>> sulci, name = read_scalars(sulci_file) >>> if fundi_file: ... fundi, name = read_scalars(fundi_file) ... else: ... fundi = [] >>> affine_transform_file = fetch_data(urls['affine_mni_transform'], '', '.txt') >>> inverse_booleans = [1] >>> transform_format = 'itk' >>> swap_xy = True >>> affine_rename = affine_transform_file + '.txt' >>> os.rename(affine_transform_file, affine_rename) >>> os.rename(labels_or_file, labels_or_file + '.vtk') >>> os.rename(area_file, area_file + '.vtk') >>> os.rename(mean_curvature_file, mean_curvature_file + '.vtk') >>> os.rename(travel_depth_file, travel_depth_file + '.vtk') >>> os.rename(geodesic_depth_file, geodesic_depth_file + '.vtk') >>> os.rename(freesurfer_thickness_file, freesurfer_thickness_file + '.vtk') >>> os.rename(freesurfer_curvature_file, freesurfer_curvature_file + '.vtk') >>> os.rename(freesurfer_sulc_file, freesurfer_sulc_file + '.vtk') >>> labels_or_file = labels_or_file + '.vtk' >>> area_file = area_file + '.vtk' >>> mean_curvature_file = mean_curvature_file + '.vtk' >>> travel_depth_file = travel_depth_file + '.vtk' >>> geodesic_depth_file = geodesic_depth_file + '.vtk' >>> freesurfer_thickness_file = freesurfer_thickness_file + '.vtk' >>> freesurfer_curvature_file = freesurfer_curvature_file + '.vtk' >>> freesurfer_sulc_file = freesurfer_sulc_file + '.vtk' >>> affine_transform_files = [] # [affine_rename] # requires ANTs to test >>> output_table = write_vertex_measures(output_table, labels_or_file, ... sulci, fundi, affine_transform_files, inverse_booleans, ... transform_format, area_file, mean_curvature_file, ... travel_depth_file, geodesic_depth_file, freesurfer_thickness_file, ... freesurfer_curvature_file, freesurfer_sulc_file) """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_scalars, read_vtk, \ apply_affine_transforms # Make sure inputs are lists: if isinstance(labels_or_file, np.ndarray): labels = [int(x) for x in labels_or_file] elif isinstance(labels_or_file, list): labels = labels_or_file elif isinstance(labels_or_file, str): labels, name = read_scalars(labels_or_file) if isinstance(sulci, np.ndarray): sulci = [int(x) for x in sulci] if isinstance(fundi, np.ndarray): fundi = [int(x) for x in fundi] if not labels and not sulci and not fundi: raise IOError( 'No feature data to tabulate in write_vertex_measures().') # Feature names and corresponding feature lists: feature_names = ['label ID', 'sulcus ID', 'fundus ID'] feature_lists = [labels, sulci, fundi] # Shape names corresponding to shape files below: shape_names = [ 'area', 'travel depth', 'geodesic depth', 'mean curvature', 'freesurfer curvature', 'freesurfer thickness', 'freesurfer convexity (sulc)' ] # Load shape files as a list of numpy arrays of per-vertex shape values: shape_files = [ area_file, travel_depth_file, geodesic_depth_file, mean_curvature_file, freesurfer_curvature_file, freesurfer_thickness_file, freesurfer_sulc_file ] # Append columns of per-vertex scalar values: columns = [] column_names = [] for ifeature, values in enumerate(feature_lists): if values: columns.append(values) column_names.append(feature_names[ifeature]) first_pass = True for ishape, shape_file in enumerate(shape_files): if os.path.exists(shape_file): if first_pass: # Append x,y,z position per vertex to columns: points, indices, lines, faces, scalars, scalar_names, \ npoints, input_vtk = read_vtk(shape_file) xyz_positions = np.asarray(points) for ixyz, xyz in enumerate(['x', 'y', 'z']): column_names.append('position: {0}'.format(xyz)) columns.append(xyz_positions[:, ixyz].tolist()) first_pass = False # Append standard space x,y,z position to columns: if affine_transform_files and transform_format: affine_points, \ foo1 = apply_affine_transforms(affine_transform_files, inverse_booleans, transform_format, points, vtk_file_stem='') xyz_std_positions = affine_points for ixyz, xyz in enumerate(['x', 'y', 'z']): column_names.append('position in standard space:' ' {0}'.format(xyz)) columns.append(xyz_std_positions[:, ixyz].tolist()) else: scalars, name = read_scalars(shape_file) if len(scalars): columns.append(scalars) column_names.append(shape_names[ishape]) # Prepend with column of indices and write table if not output_table: output_table = os.path.join(os.getcwd(), 'vertices.csv') df = pd.DataFrame(np.transpose(columns), columns=column_names) df.to_csv(output_table, index=False, encoding='utf-8') if not os.path.exists(output_table): raise IOError(output_table + " not found") return output_table
def extract_fundi(folds, curv_file, depth_file, min_separation=10, erode_ratio=0.1, erode_min_size=1, save_file=False, output_file='', background_value=-1, verbose=False): """ Extract fundi from folds. A fundus is a branching curve that runs along the deepest and most highly curved portions of a fold. This function extracts one fundus from each fold by finding the deepest vertices inside the fold, finding endpoints along the edge of the fold, and connecting the former to the latter with tracks that run along deep and curved paths (through vertices with high values of travel depth multiplied by curvature), and a final filtration step. The deepest vertices are those with values at least two median absolute deviations above the median (non-zero) value, with the higher value chosen if two of the vertices are within (a default of) 10 edges from each other (to reduce the number of possible fundus paths as well as computation time). To find the endpoints, the find_outer_endpoints function propagates multiple tracks from seed vertices at median depth in the fold through concentric rings toward the fold’s edge, selecting maximal values within each ring, and terminating at candidate endpoints. The final endpoints are those candidates at the end of tracks that have a high median value, with the higher value chosen if two candidate endpoints are within (a default of) 10 edges from each other (otherwise, the resulting fundi can have spurious branching at the fold’s edge). The connect_points_erosion function connects the deepest fold vertices to the endpoints with a skeleton of 1-vertex-thick curves by erosion. It erodes by iteratively removing simple topological points and endpoints in order of lowest to highest values, where a simple topological point is a vertex that when added to or removed from an object on a surface mesh (such as a fundus curve) does not alter the object's topology. Steps :: 1. Find fundus endpoints (outer anchors) with find_outer_endpoints(). 2. Include inner anchor points. 3. Connect anchor points using connect_points_erosion(); inner anchors are removed if they result in endpoints. Note :: Follow this with segment_by_region() to segment fundi by sulci. Parameters ---------- folds : numpy array or list of integers fold number for each vertex curv_file : string surface mesh file in VTK format with mean curvature values depth_file : string surface mesh file in VTK format with rescaled depth values likelihoods : list of integers fundus likelihood value for each vertex min_separation : integer minimum number of edges between inner/outer anchor points erode_ratio : float fraction of indices to test for removal at each iteration in connect_points_erosion() save_file : bool save output VTK file? output_file : string output VTK file background_value : integer or float background value verbose : bool print statements? Returns ------- fundus_per_fold : list of integers fundus numbers for all vertices, labeled by fold (-1 for non-fundus vertices) n_fundi_in_folds : integer number of fundi fundus_per_fold_file : string (if save_file) output VTK file with fundus numbers (-1 for non-fundus vertices) Examples -------- >>> # Extract fundus from one or more folds: >>> import numpy as np >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.features.fundi import extract_fundi >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> curv_file = fetch_data(urls['left_mean_curvature'], '', '.vtk') >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> folds, name = read_scalars(folds_file, True, True) >>> # Limit number of folds to speed up the test: >>> limit_folds = True >>> if limit_folds: ... fold_numbers = [4] #[4, 6] ... i0 = [i for i,x in enumerate(folds) if x not in fold_numbers] ... folds[i0] = -1 >>> min_separation = 10 >>> erode_ratio = 0.10 >>> erode_min_size = 10 >>> save_file = True >>> output_file = 'extract_fundi_fold4.vtk' >>> background_value = -1 >>> verbose = False >>> o1, o2, fundus_per_fold_file = extract_fundi(folds, curv_file, ... depth_file, min_separation, erode_ratio, erode_min_size, ... save_file, output_file, background_value, verbose) >>> lens = [len([x for x in o1 if x == y]) ... for y in np.unique(o1) if y != background_value] >>> lens[0:10] # [66, 2914, 100, 363, 73, 331, 59, 30, 1, 14] # (if not limit_folds) [73] View result without background (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP >>> rewrite_scalars(fundus_per_fold_file, ... 'extract_fundi_fold4_no_background.vtk', o1, ... 'fundus_per_fold', folds) # doctest: +SKIP >>> plot_surfaces('extract_fundi_fold4_no_background.vtk') # doctest: +SKIP """ # Extract a skeleton to connect endpoints in a fold: import os import numpy as np from time import time from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars from mindboggle.guts.compute import median_abs_dev from mindboggle.guts.paths import find_max_values from mindboggle.guts.mesh import find_neighbors_from_file #from mindboggle.guts.mesh import find_complete_faces from mindboggle.guts.paths import find_outer_endpoints from mindboggle.guts.paths import connect_points_erosion if isinstance(folds, list): folds = np.array(folds) # Load values, inner anchor threshold, and neighbors: if os.path.isfile(curv_file): points, indices, lines, faces, curvs, scalar_names, npoints, \ input_vtk = read_vtk(curv_file, True, True) else: raise IOError("{0} doesn't exist!".format(curv_file)) if os.path.isfile(curv_file): depths, name = read_scalars(depth_file, True, True) else: raise IOError("{0} doesn't exist!".format(depth_file)) values = curvs * depths values0 = [x for x in values if x > 0] thr = np.median(values0) + 2 * median_abs_dev(values0) neighbor_lists = find_neighbors_from_file(curv_file) # ------------------------------------------------------------------------ # Loop through folds: # ------------------------------------------------------------------------ t1 = time() skeletons = [] unique_fold_IDs = [x for x in np.unique(folds) if x != background_value] if verbose: if len(unique_fold_IDs) == 1: print("Extract a fundus from 1 fold...") else: print("Extract a fundus from each of {0} folds...".format( len(unique_fold_IDs))) for fold_ID in unique_fold_IDs: indices_fold = [i for i, x in enumerate(folds) if x == fold_ID] if indices_fold: if verbose: print(' Fold {0}:'.format(int(fold_ID))) # ---------------------------------------------------------------- # Find outer anchor points on the boundary of the surface region, # to serve as fundus endpoints: # ---------------------------------------------------------------- outer_anchors, tracks = find_outer_endpoints( indices_fold, neighbor_lists, values, depths, min_separation, background_value, verbose) # ---------------------------------------------------------------- # Find inner anchor points: # ---------------------------------------------------------------- inner_anchors = find_max_values(points, values, min_separation, thr) # ---------------------------------------------------------------- # Connect anchor points to create skeleton: # ---------------------------------------------------------------- B = background_value * np.ones(npoints) B[indices_fold] = 1 skeleton = connect_points_erosion(B, neighbor_lists, outer_anchors, inner_anchors, values, erode_ratio, erode_min_size, [], '', background_value, verbose) if skeleton: skeletons.extend(skeleton) ## --------------------------------------------------------------- ## Remove fundus vertices if they make complete triangle faces: ## --------------------------------------------------------------- #Iremove = find_complete_faces(skeletons, faces) #if Iremove: # skeletons = list(frozenset(skeletons).difference(Iremove)) indices_skel = [x for x in skeletons if folds[x] != background_value] fundus_per_fold = background_value * np.ones(npoints) fundus_per_fold[indices_skel] = folds[indices_skel] n_fundi_in_folds = len( [x for x in np.unique(fundus_per_fold) if x != background_value]) if n_fundi_in_folds == 1: sdum = 'fold fundus' else: sdum = 'fold fundi' if verbose: print(' ...Extracted {0} {1}; {2} total ({3:.2f} seconds)'.format( n_fundi_in_folds, sdum, n_fundi_in_folds, time() - t1)) # ------------------------------------------------------------------------ # Return fundi, number of fundi, and file name: # ------------------------------------------------------------------------ fundus_per_fold_file = None if n_fundi_in_folds > 0: fundus_per_fold = [int(x) for x in fundus_per_fold] if save_file: if output_file: fundus_per_fold_file = output_file else: fundus_per_fold_file = os.path.join(os.getcwd(), 'fundus_per_fold.vtk') rewrite_scalars(curv_file, fundus_per_fold_file, fundus_per_fold, 'fundi', [], background_value) if not os.path.exists(fundus_per_fold_file): raise IOError(fundus_per_fold_file + " not found") return fundus_per_fold, n_fundi_in_folds, fundus_per_fold_file
def write_average_face_values_per_label(input_indices_vtk, input_values_vtk='', area_file='', output_stem='', exclude_values=[-1], background_value=-1, verbose=False): """ Write out a separate csv table file for each integer in (the first) scalar list of an input VTK file. Optionally write the values drawn from a second VTK file. Parameters ---------- input_indices_vtk : string path of the input VTK file that contains indices as scalars input_values_vtk : string path of the input VTK file that contains values as scalars output_stem : string path and stem of the output VTK file exclude_values : list or array values to exclude background_value : integer or float background value in output VTK files scalar_name : string name of a lookup table of scalars values verbose : bool print statements? Examples -------- >>> import os >>> from mindboggle.mio.tables import write_average_face_values_per_label >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> input_indices_vtk = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> input_values_vtk = fetch_data(urls['left_mean_curvature'], '', '.vtk') >>> area_file = fetch_data(urls['left_area'], '', '.vtk') >>> output_stem = 'labels_thickness' >>> exclude_values = [-1] >>> background_value = -1 >>> verbose = False >>> write_average_face_values_per_label(input_indices_vtk, ... input_values_vtk, area_file, output_stem, exclude_values, ... background_value, verbose) View vtk file (skip test): >>> from mindboggle.mio.plots import plot_surfaces >>> example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk') >>> plot_surfaces(example_vtk) # doctest: +SKIP """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_scalars, read_vtk, write_vtk from mindboggle.guts.mesh import keep_faces # Load VTK file: points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(input_indices_vtk, True, True) if area_file: area_scalars, name = read_scalars(area_file, True, True) if verbose: print("Explode the scalar list in {0}".format( os.path.basename(input_indices_vtk))) if input_values_vtk != input_indices_vtk: if verbose: print("Explode the scalar list of values in {0} " "with the scalar list of indices in {1}".format( os.path.basename(input_values_vtk), os.path.basename(input_indices_vtk))) # Loop through unique (non-excluded) scalar values: unique_scalars = [ int(x) for x in np.unique(scalars) if x not in exclude_values ] for scalar in unique_scalars: keep_indices = [x for sublst in faces for x in sublst] new_faces = keep_faces(faces, keep_indices) # Create array and indices for scalar value: select_scalars = np.copy(scalars) select_scalars[scalars != scalar] = background_value scalar_indices = [ i for i, x in enumerate(select_scalars) if x == scalar ] if verbose: print(" Scalar {0}: {1} vertices".format(scalar, len(scalar_indices))) # -------------------------------------------------------------------- # For each face, average vertex values: # -------------------------------------------------------------------- output_table = os.path.join(os.getcwd(), output_stem + str(scalar) + '.csv') columns = [] for face in new_faces: values = [] for index in face: if area_file: values.append(scalars[index] / area_scalars[index]) else: values.append(scalars[index]) columns.append(np.mean(values)) # ---------------------------------------------------------------- # Write to table: # ---------------------------------------------------------------- df = pd.DataFrame({'': columns}) df.to_csv(output_table, index=False, encoding='utf-8') if not os.path.exists(output_table): raise IOError(output_table + " not found")
def plot_mask_surface(vtk_file, mask_file='', nonmask_value=-1, masked_output='', remove_nonmask=False, program='vtkviewer', use_colormap=False, colormap_file='', background_value=-1): """ Use vtkviewer or mayavi2 to visualize VTK surface mesh data. If a mask_file is provided, a temporary masked file is saved, and it is this file that is viewed. If using vtkviewer, optionally provide colormap file or set $COLORMAP environment variable. Parameters ---------- vtk_file : string name of VTK surface mesh file mask_file : string name of VTK surface mesh file to mask vtk_file vertices nonmask_value : integer nonmask (usually background) value masked_output : string temporary masked output file name remove_nonmask : bool remove vertices that are not in mask? (otherwise assign nonmask_value) program : string {'vtkviewer', 'mayavi2'} program to visualize VTK file use_colormap : bool use Paraview-style XML colormap file set by $COLORMAP env variable? colormap_file : string use colormap in given file if use_colormap==True? if empty and use_colormap==True, use file set by $COLORMAP environment variable background_value : integer or float background value Examples -------- >>> import os >>> from mindboggle.mio.plots import plot_mask_surface >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['freesurfer_labels'], '', '.vtk') >>> os.rename(vtk_file, vtk_file + '.nii.gz') >>> vtk_file = vtk_file + '.nii.gz' >>> mask_file = '' >>> nonmask_value = 0 #-1 >>> masked_output = '' >>> remove_nonmask = True >>> program = 'vtkviewer' >>> use_colormap = True >>> colormap_file = '' >>> background_value = -1 >>> plot_mask_surface(vtk_file, mask_file, nonmask_value, masked_output, ... remove_nonmask, program, use_colormap, colormap_file, ... background_value) # doctest: +SKIP """ import os import numpy as np from mindboggle.guts.mesh import keep_faces, reindex_faces_points from mindboggle.guts.utilities import execute from mindboggle.mio.plots import plot_surfaces from mindboggle.mio.vtks import read_scalars, rewrite_scalars, \ read_vtk, write_vtk # ------------------------------------------------------------------------ # Filter mesh with non-background values from a second (same-size) mesh: # ------------------------------------------------------------------------ if mask_file: mask, name = read_scalars(mask_file, True, True) if not masked_output: masked_output = os.path.join(os.getcwd(), 'temp.vtk') file_to_plot = masked_output # -------------------------------------------------------------------- # Remove nonmask-valued vertices: # -------------------------------------------------------------------- if remove_nonmask: # ---------------------------------------------------------------- # Load VTK files: # ---------------------------------------------------------------- points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file, True, True) # ---------------------------------------------------------------- # Find mask indices, remove nonmask faces, and reindex: # ---------------------------------------------------------------- Imask = [i for i, x in enumerate(mask) if x != nonmask_value] mask_faces = keep_faces(faces, Imask) mask_faces, points, \ original_indices = reindex_faces_points(mask_faces, points) # ---------------------------------------------------------------- # Write VTK file with scalar values: # ---------------------------------------------------------------- if np.ndim(scalars) == 1: scalar_type = type(scalars[0]).__name__ elif np.ndim(scalars) == 2: scalar_type = type(scalars[0][0]).__name__ else: print("Undefined scalar type!") write_vtk(file_to_plot, points, [], [], mask_faces, scalars[original_indices].tolist(), scalar_names, scalar_type=scalar_type) else: scalars, name = read_scalars(vtk_file, True, True) scalars[mask == nonmask_value] = nonmask_value rewrite_scalars(vtk_file, file_to_plot, scalars, ['scalars'], [], background_value) else: file_to_plot = vtk_file # ------------------------------------------------------------------------ # Display with vtkviewer.py: # ------------------------------------------------------------------------ if program == 'vtkviewer': plot_surfaces(file_to_plot, use_colormap=use_colormap, colormap_file=colormap_file) # ------------------------------------------------------------------------ # Display with mayavi2: # ------------------------------------------------------------------------ elif program == 'mayavi2': cmd = ["mayavi2", "-d", file_to_plot, "-m", "Surface", "&"] execute(cmd, 'os')
def run_mindboggle_curvature(surface_file, neighborhood, method=0): # Process the arguments and make outputs names ccode_path = os.environ['vtk_cpp_tools'] command = os.path.join(ccode_path, 'curvature', 'CurvatureMain') basename = os.path.splitext(os.path.basename(surface_file))[0] extended_basename = '{}.mindboggle_m{}_n{}'.format(basename, method, neighborhood) stem = os.path.join(os.getcwd(), extended_basename) mean_curv_file = '{}_mean_curv.vtk'.format(stem) arguments = '-n {}'.format(neighborhood) if method == 0 or method == 1: gauss_curv_file = '{}_gauss_curv.vtk'.format(stem) arguments = '{} -g {}'.format(arguments, gauss_curv_file) if method == 0: max_curv_file = '{}_kappa1.vtk'.format(stem) min_curv_file = '{}_kappa2.vtk'.format(stem) min_dir_file = '{}_T2.txt'.format(stem) arguments = '{} -x {} -i {} -d {}'.format(arguments, max_curv_file, min_curv_file, min_dir_file) verbose = False curvatures_file = '{}_curvatures.csv'.format(stem) # Run the method default_mean_curv_file, _, _, _, _ = curvature(command, method, arguments, surface_file, verbose) # rename mean curvature output file os.rename(default_mean_curv_file, mean_curv_file) # remove unneeded output file os.remove(os.path.join(os.getcwd(), 'output.nipype')) # Get the curvatures from VTK files # [[x1, y1, z1], [x2, y2, z2], ...] points = np.array(read_points(mean_curv_file)) xyz = points.T # transposed: [[x1, x2, ...], [y1, y2, ...], [z1, z2, ...]] mean_curv, _ = read_scalars(mean_curv_file, return_first=True, return_array=True) assert (xyz.shape[1] == mean_curv.size) print('number of points: {}'.format(mean_curv.size)) if method == 0 or method == 1: gauss_curv, _ = read_scalars(gauss_curv_file, return_first=True, return_array=True) if method == 0: max_curv, _ = read_scalars(max_curv_file, return_first=True, return_array=True) min_curv, _ = read_scalars(min_curv_file, return_first=True, return_array=True) # Write the curvatures to a CSV file df = pd.DataFrame() df['x'] = xyz[0] df['y'] = xyz[1] df['z'] = xyz[2] df['mean_curvature'] = mean_curv if method == 0 or method == 1: df['gauss_curvature'] = gauss_curv if method == 0: df['kappa1'] = max_curv df['kappa2'] = min_curv df.to_csv(curvatures_file, sep=';')
def extract_sulci(labels_file, folds_or_file, hemi, min_boundary=1, sulcus_names=[], save_file=False, output_file='', background_value=-1, verbose=False): """ Identify sulci from folds in a brain surface according to a labeling protocol that includes a list of label pairs defining each sulcus. Since folds are defined as deep, connected areas of a surface, and since folds may be connected to each other in ways that differ across brains, there usually does not exist a one-to-one mapping between folds of one brain and those of another. To address the correspondence problem then, we need to find just those portions of the folds that correspond across brains. To accomplish this, Mindboggle segments folds into sulci, which do have a one-to-one correspondence across non-pathological brains. Mindboggle defines a sulcus as a folded portion of cortex whose opposing banks are labeled with one or more sulcus label pairs in the DKT labeling protocol, where each label pair is unique to one sulcus and represents a boundary between two adjacent gyri, and each vertex has one gyrus label. This function assigns vertices in a fold to a sulcus in one of two cases. In the first case, vertices whose labels are in only one label pair in the fold are assigned to the label pair’s sulcus if they are connected through similarly labeled vertices to the boundary between the two labels. In the second case, the segment_regions function propagates labels from label borders to vertices whose labels are in multiple label pairs in the fold. Steps for each fold :: 1. Remove fold if it has fewer than two labels. 2. Remove fold if its labels do not contain a sulcus label pair. 3. Find vertices with labels that are in only one of the fold's label boundary pairs. Assign the vertices the sulcus with the label pair if they are connected to the label boundary for that pair. 4. If there are remaining vertices, segment into sets of vertices connected to label boundaries, and assign a unique ID to each set. Parameters ---------- labels_file : string file name for surface mesh VTK containing labels for all vertices folds_or_file : numpy array, list or string fold number for each vertex / name of VTK file containing fold scalars hemi : string hemisphere abbreviation in {'lh', 'rh'} for sulcus labels min_boundary : integer minimum number of vertices for a sulcus label boundary segment sulcus_names : list of strings names of sulci save_file : bool save output VTK file? output_file : string name of output file in VTK format background_value : integer or float background value verbose : bool print statements? Returns ------- sulci : list of integers sulcus numbers for all vertices (-1 for non-sulcus vertices) n_sulci : integers number of sulci sulci_file : string output VTK file with sulcus numbers (-1 for non-sulcus vertices) Examples -------- >>> # Example 1: Extract sulcus from a fold with one sulcus label pair: >>> import numpy as np >>> from mindboggle.features.sulci import extract_sulci >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> # Load labels, folds, neighbor lists, and sulcus names and label pairs >>> labels_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> folds_or_file, name = read_scalars(folds_file, True, True) >>> save_file = True >>> output_file = 'extract_sulci_fold4_1sulcus.vtk' >>> background_value = -1 >>> # Limit number of folds to speed up the test: >>> limit_folds = True >>> if limit_folds: ... fold_numbers = [4] #[4, 6] ... i0 = [i for i,x in enumerate(folds_or_file) if x not in fold_numbers] ... folds_or_file[i0] = background_value >>> hemi = 'lh' >>> min_boundary = 10 >>> sulcus_names = [] >>> verbose = False >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file, ... hemi, min_boundary, sulcus_names, save_file, output_file, ... background_value, verbose) >>> n_sulci # 23 # (if not limit_folds) 1 >>> lens = [len([x for x in sulci if x==y]) ... for y in np.unique(sulci) if y != -1] >>> lens[0:10] # [6358, 3288, 7612, 5205, 4414, 6251, 3493, 2566, 4436, 739] # (if not limit_folds) [1151] View result without background (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP >>> output = 'extract_sulci_fold4_1sulcus_no_background.vtk' >>> rewrite_scalars(sulci_file, output, sulci, ... 'sulci', sulci) # doctest: +SKIP >>> plot_surfaces(output) # doctest: +SKIP Example 2: Extract sulcus from a fold with multiple sulcus label pairs: >>> folds_or_file, name = read_scalars(folds_file, True, True) >>> output_file = 'extract_sulci_fold7_2sulci.vtk' >>> # Limit number of folds to speed up the test: >>> limit_folds = True >>> if limit_folds: ... fold_numbers = [7] #[4, 6] ... i0 = [i for i,x in enumerate(folds_or_file) if x not in fold_numbers] ... folds_or_file[i0] = background_value >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file, ... hemi, min_boundary, sulcus_names, save_file, output_file, ... background_value, verbose) >>> n_sulci # 23 # (if not limit_folds) 2 >>> lens = [len([x for x in sulci if x==y]) ... for y in np.unique(sulci) if y != -1] >>> lens[0:10] # [6358, 3288, 7612, 5205, 4414, 6251, 3493, 2566, 4436, 739] # (if not limit_folds) [369, 93] View result without background (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP >>> output = 'extract_sulci_fold7_2sulci_no_background.vtk' >>> rewrite_scalars(sulci_file, output, sulci, ... 'sulci', sulci) # doctest: +SKIP >>> plot_surfaces(output) # doctest: +SKIP """ import os from time import time import numpy as np from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars from mindboggle.guts.mesh import find_neighbors from mindboggle.guts.segment import extract_borders, propagate, segment_regions from mindboggle.mio.labels import DKTprotocol # Load fold numbers if folds_or_file is a string: if isinstance(folds_or_file, str): folds, name = read_scalars(folds_or_file) elif isinstance(folds_or_file, list): folds = folds_or_file elif isinstance(folds_or_file, np.ndarray): folds = folds_or_file.tolist() dkt = DKTprotocol() if hemi == 'lh': pair_lists = dkt.left_sulcus_label_pair_lists elif hemi == 'rh': pair_lists = dkt.right_sulcus_label_pair_lists else: raise IOError( "Warning: hemisphere not properly specified ('lh' or 'rh').") # Load points, faces, and neighbors: points, indices, lines, faces, labels, scalar_names, npoints, \ input_vtk = read_vtk(labels_file) neighbor_lists = find_neighbors(faces, npoints) # Array of sulcus IDs for fold vertices, initialized as -1. # Since we do not touch gyral vertices and vertices whose labels # are not in the label list, or vertices having only one label, # their sulcus IDs will remain -1: sulci = background_value * np.ones(npoints) # ------------------------------------------------------------------------ # Loop through folds # ------------------------------------------------------------------------ fold_numbers = [int(x) for x in np.unique(folds) if x != background_value] n_folds = len(fold_numbers) if verbose: print("Extract sulci from {0} folds...".format(n_folds)) t0 = time() for n_fold in fold_numbers: fold_indices = [i for i, x in enumerate(folds) if x == n_fold] len_fold = len(fold_indices) # List the labels in this fold: fold_labels = [labels[x] for x in fold_indices] unique_fold_labels = [ int(x) for x in np.unique(fold_labels) if x != background_value ] # -------------------------------------------------------------------- # NO MATCH -- fold has fewer than two labels # -------------------------------------------------------------------- if verbose and len(unique_fold_labels) < 2: # Ignore: sulci already initialized with -1 values: if not unique_fold_labels: print(" Fold {0} ({1} vertices): " "NO MATCH -- fold has no labels".format( n_fold, len_fold)) else: print(" Fold {0} ({1} vertices): " "NO MATCH -- fold has only one label ({2})".format( n_fold, len_fold, unique_fold_labels[0])) # Ignore: sulci already initialized with -1 values else: # Find all label boundary pairs within the fold: indices_fold_pairs, fold_pairs, unique_fold_pairs = \ extract_borders(fold_indices, labels, neighbor_lists, ignore_values=[], return_label_pairs=True) # Find fold label pairs in the protocol (pairs are already sorted): fold_pairs_in_protocol = [ x for x in unique_fold_pairs if x in dkt.unique_sulcus_label_pairs ] if verbose and unique_fold_labels: print(" Fold {0} labels: {1} ({2} vertices)".format( n_fold, ', '.join([str(x) for x in unique_fold_labels]), len_fold)) # ---------------------------------------------------------------- # NO MATCH -- fold has no sulcus label pair # ---------------------------------------------------------------- if verbose and not fold_pairs_in_protocol: print(" Fold {0}: NO MATCH -- fold has no sulcus label pair". format(n_fold, len_fold)) # ---------------------------------------------------------------- # Possible matches # ---------------------------------------------------------------- else: if verbose: print(" Fold {0} label pairs in protocol: {1}".format( n_fold, ', '.join([str(x) for x in fold_pairs_in_protocol]))) # Labels in the protocol (includes repeats across label pairs): labels_in_pairs = [ x for lst in fold_pairs_in_protocol for x in lst ] # Labels that appear in one or more sulcus label boundary: unique_labels = [] nonunique_labels = [] for label in np.unique(labels_in_pairs): if len([x for x in labels_in_pairs if x == label]) == 1: unique_labels.append(label) else: nonunique_labels.append(label) # ------------------------------------------------------------ # Vertices whose labels are in only one sulcus label pair # ------------------------------------------------------------ # Find vertices with a label that is in only one of the fold's # label pairs (the other label in the pair can exist in other # pairs). Assign the vertices the sulcus with the label pair # if they are connected to the label boundary for that pair. # ------------------------------------------------------------ if unique_labels: for pair in fold_pairs_in_protocol: # If one or both labels in label pair is/are unique: unique_labels_in_pair = [ x for x in pair if x in unique_labels ] n_unique = len(unique_labels_in_pair) if n_unique: ID = None for i, pair_list in enumerate(pair_lists): if not isinstance(pair_list, list): pair_list = [pair_list] if pair in pair_list: ID = i break if ID: # Seeds from label boundary vertices # (fold_pairs and pair already sorted): indices_pair = [ x for i, x in enumerate(indices_fold_pairs) if fold_pairs[i] == pair ] # Vertices with unique label(s) in pair: indices_unique_labels = [ fold_indices[i] for i, x in enumerate(fold_labels) if x in unique_labels_in_pair ] #dkt.unique_sulcus_label_pairs] # Propagate sulcus ID from seeds to vertices # with "unique" labels (only exist in one # label pair in a fold); propagation ensures # that sulci consist of contiguous vertices # for each label boundary: sulci2 = segment_regions( indices_unique_labels, neighbor_lists, min_region_size=1, seed_lists=[indices_pair], keep_seeding=False, spread_within_labels=True, labels=labels, label_lists=[], values=[], max_steps='', background_value=background_value, verbose=False) sulci[sulci2 != background_value] = ID # Print statement: if verbose: if n_unique == 1: ps1 = 'One label' else: ps1 = 'Both labels' if len(sulcus_names): ps2 = sulcus_names[ID] else: ps2 = '' print(" {0} unique to one fold pair: " "{1} {2}".format( ps1, ps2, unique_labels_in_pair)) # ------------------------------------------------------------ # Vertex labels shared by multiple label pairs # ------------------------------------------------------------ # Propagate labels from label borders to vertices with labels # that are shared by multiple label pairs in the fold. # ------------------------------------------------------------ if len(nonunique_labels): # For each label shared by different label pairs: for label in nonunique_labels: # Print statement: if verbose: print( " Propagate sulcus borders with label {0}". format(int(label))) # Construct seeds from label boundary vertices: seeds = background_value * np.ones(npoints) for ID, pair_list in enumerate(pair_lists): if not isinstance(pair_list, list): pair_list = [pair_list] label_pairs = [x for x in pair_list if label in x] for label_pair in label_pairs: indices_pair = [ x for i, x in enumerate(indices_fold_pairs) if np.sort(fold_pairs[i]).tolist() == label_pair ] if indices_pair: # Do not include short boundary segments: if min_boundary > 1: indices_pair2 = [] seeds2 = segment_regions( indices_pair, neighbor_lists, 1, [], False, False, [], [], [], '', background_value, verbose) useeds2 = [ x for x in np.unique(seeds2) if x != background_value ] for seed2 in useeds2: iseed2 = [ i for i, x in enumerate(seeds2) if x == seed2 ] if len(iseed2) >= min_boundary: indices_pair2.extend(iseed2) elif verbose: if len(iseed2) == 1: print(" Remove " "assignment " "of ID {0} from " "1 vertex".format( seed2)) else: print( " Remove " "assignment " "of ID {0} from " "{1} vertices".format( seed2, len(iseed2))) indices_pair = indices_pair2 # Assign sulcus IDs to seeds: seeds[indices_pair] = ID # Identify vertices with the label: indices_label = [ fold_indices[i] for i, x in enumerate(fold_labels) if x == label ] if len(indices_label): # Propagate sulcus ID from seeds to vertices # with a given shared label: seg_vs_prop = False if seg_vs_prop: indices_seeds = [] for seed in [ x for x in np.unique(seeds) if x != background_value ]: indices_seeds.append([ i for i, x in enumerate(seeds) if x == seed ]) sulci2 = segment_regions( indices_label, neighbor_lists, 50, indices_seeds, False, True, labels, [], [], '', background_value, verbose) else: label_array = background_value * \ np.ones(npoints) label_array[indices_label] = 1 sulci2 = propagate( points, faces, label_array, seeds, sulci, max_iters=10000, tol=0.001, sigma=5, background_value=background_value, verbose=verbose) sulci[sulci2 != background_value] = \ sulci2[sulci2 != background_value] sulcus_numbers = [ int(x) for x in np.unique(sulci) if x != background_value ] n_sulci = len(sulcus_numbers) # ------------------------------------------------------------------------ # Print statements # ------------------------------------------------------------------------ if verbose: if n_sulci == 1: sulcus_str = 'sulcus' else: sulcus_str = 'sulci' if n_folds == 1: folds_str = 'fold' else: folds_str = 'folds' print("Extracted {0} {1} from {2} {3} ({4:.1f}s):".format( n_sulci, sulcus_str, n_folds, folds_str, time() - t0)) if sulcus_names: for sulcus_number in sulcus_numbers: print(" {0}: {1}".format(sulcus_number, sulcus_names[sulcus_number])) elif sulcus_numbers: print(" " + ", ".join([str(x) for x in sulcus_numbers])) unresolved = [ i for i in range(len(pair_lists)) if i not in sulcus_numbers ] if len(unresolved) == 1: print("The following sulcus is unaccounted for:") else: print("The following {0} sulci are unaccounted for:".format( len(unresolved))) if sulcus_names: for sulcus_number in unresolved: print(" {0}: {1}".format(sulcus_number, sulcus_names[sulcus_number])) else: print(" " + ", ".join([str(x) for x in unresolved])) # ------------------------------------------------------------------------ # Return sulci, number of sulci, and file name # ------------------------------------------------------------------------ sulci = [int(x) for x in sulci] sulci_file = os.path.join(os.getcwd(), 'sulci.vtk') rewrite_scalars(labels_file, sulci_file, sulci, 'sulci', [], background_value) if not os.path.exists(sulci_file): raise IOError(sulci_file + " not found") return sulci, n_sulci, sulci_file
def write_vertex_measures(output_table, labels_or_file, sulci=[], fundi=[], affine_transform_files=[], inverse_booleans=[], transform_format='itk', area_file='', mean_curvature_file='', travel_depth_file='', geodesic_depth_file='', freesurfer_thickness_file='', freesurfer_curvature_file='', freesurfer_sulc_file=''): """ Make a table of shape values per vertex. Note :: This function is tailored for Mindboggle outputs. Parameters ---------- output_table : string output file (full path) labels_or_file : list or string label number for each vertex or name of VTK file with index scalars sulci : list of integers indices to sulci, one per vertex, with -1 indicating no sulcus fundi : list of integers indices to fundi, one per vertex, with -1 indicating no fundus affine_transform_files : list of strings affine transform files to standard space inverse_booleans : list of of zeros and ones for each transform, 1 to take the inverse, else 0 transform_format : string format for transform file Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format area_file : string name of VTK file with surface area scalar values mean_curvature_file : string name of VTK file with mean curvature scalar values travel_depth_file : string name of VTK file with travel depth scalar values geodesic_depth_file : string name of VTK file with geodesic depth scalar values freesurfer_thickness_file : string name of VTK file with FreeSurfer thickness scalar values freesurfer_curvature_file : string name of VTK file with FreeSurfer curvature (curv) scalar values freesurfer_sulc_file : string name of VTK file with FreeSurfer convexity (sulc) scalar values Returns ------- output_table : table file name for vertex shape values Examples -------- >>> import os >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.mio.tables import write_vertex_measures >>> # >>> output_table = ''#vertex_shapes.csv' >>> path = os.environ['MINDBOGGLE_DATA'] >>> labels_or_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk') >>> sulci_file = os.path.join(path, 'arno', 'features', 'sulci.vtk') >>> fundi_file = os.path.join(path, 'arno', 'features', 'fundi.vtk') >>> sulci, name = read_scalars(sulci_file) >>> fundi, name = read_scalars(fundi_file) >>> affine_transform_files = [os.path.join(path, 'arno', 'mri', >>> 't1weighted_brain.MNI152Affine.txt')] >>> inverse_booleans = [1] >>> transform_format = 'itk' >>> swap_xy = True >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk') >>> mean_curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk') >>> travel_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk') >>> geodesic_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.geodesic_depth.vtk') >>> freesurfer_thickness_file = '' >>> freesurfer_curvature_file = '' >>> freesurfer_sulc_file = '' >>> # >>> write_vertex_measures(output_table, labels_or_file, sulci, fundi, >>> affine_transform_files, inverse_booleans, transform_format, area_file, >>> mean_curvature_file, travel_depth_file, geodesic_depth_file, >>> freesurfer_thickness_file, freesurfer_curvature_file, freesurfer_sulc_file) """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_scalars, read_vtk, \ apply_affine_transforms # Make sure inputs are lists: if isinstance(labels_or_file, np.ndarray): labels = [int(x) for x in labels_or_file] elif isinstance(labels_or_file, list): labels = labels_or_file elif isinstance(labels_or_file, str): labels, name = read_scalars(labels_or_file) if isinstance(sulci, np.ndarray): sulci = [int(x) for x in sulci] if isinstance(fundi, np.ndarray): fundi = [int(x) for x in fundi] if not labels and not sulci and not fundi: import sys sys.exit('No feature data to tabulate in write_vertex_measures().') # Feature names and corresponding feature lists: feature_names = ['label ID', 'sulcus ID', 'fundus ID'] feature_lists = [labels, sulci, fundi] # Shape names corresponding to shape files below: shape_names = ['area', 'travel depth', 'geodesic depth', 'mean curvature', 'freesurfer curvature', 'freesurfer thickness', 'freesurfer convexity (sulc)'] # Load shape files as a list of numpy arrays of per-vertex shape values: shape_files = [area_file, travel_depth_file, geodesic_depth_file, mean_curvature_file, freesurfer_curvature_file, freesurfer_thickness_file, freesurfer_sulc_file] # Append columns of per-vertex scalar values: columns = [] column_names = [] for ifeature, values in enumerate(feature_lists): if values: columns.append(values) column_names.append(feature_names[ifeature]) first_pass = True for ishape, shape_file in enumerate(shape_files): if os.path.exists(shape_file): if first_pass: # Append x,y,z position per vertex to columns: points, indices, lines, faces, scalars, scalar_names, \ npoints, input_vtk = read_vtk(shape_file) xyz_positions = np.asarray(points) for ixyz, xyz in enumerate(['x','y','z']): column_names.append('position: {0}'.format(xyz)) columns.append(xyz_positions[:, ixyz].tolist()) first_pass = False # Append standard space x,y,z position to columns: if affine_transform_files and transform_format: affine_points, \ foo1 = apply_affine_transforms(affine_transform_files, inverse_booleans, transform_format, points, vtk_file_stem='') xyz_std_positions = affine_points for ixyz, xyz in enumerate(['x','y','z']): column_names.append('position in standard space:' ' {0}'.format(xyz)) columns.append(xyz_std_positions[:, ixyz].tolist()) else: scalars, name = read_scalars(shape_file) if len(scalars): columns.append(scalars) column_names.append(shape_names[ishape]) # Prepend with column of indices and write table if not output_table: output_table = os.path.join(os.getcwd(), 'vertices.csv') df = pd.DataFrame(np.transpose(columns), columns = column_names) df.to_csv(output_table, index=False) if not os.path.exists(output_table): raise(IOError(output_table + " not found")) return output_table
def write_face_vertex_averages(input_file, output_table='', area_file=''): """ Make table of average vertex values per face (divided by face area if area_file provided). Parameters ---------- input_file : string name of VTK file with scalars to average area_file : string name of VTK file with surface area scalar values output_table : string output table filename Returns ------- output_table : string output table filename Examples -------- >>> from mindboggle.mio.tables import write_face_vertex_averages >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> input_file = fetch_data(urls['left_travel_depth']) >>> area_file = fetch_data(urls['left_area']) >>> output_table = '' >>> output_table = write_face_vertex_averages(input_file, output_table, ... area_file) """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_vtk, read_scalars points, indices, lines, faces, scalars, scalar_names, \ npoints, input_vtk = read_vtk(input_file, True, True) if area_file: area_scalars, name = read_scalars(area_file, True, True) #--------------------------------------------------------------------- # For each face, average vertex values: #--------------------------------------------------------------------- columns = [] for face in faces: values = [] for index in face: if area_file: values.append(scalars[index] / area_scalars[index]) else: values.append(scalars[index]) columns.append(np.mean(values)) #----------------------------------------------------------------- # Write to table: #----------------------------------------------------------------- if not output_table: output_table = os.path.join(os.getcwd(), 'average_face_values.csv') df = pd.DataFrame({'': columns}) df.to_csv(output_table, index=False) if not os.path.exists(output_table): raise IOError(output_table + " not found") return output_table
def write_average_face_values_per_label(input_indices_vtk, input_values_vtk='', area_file='', output_stem='', exclude_values=[-1], background_value=-1): """ Write out a separate VTK file for each integer in (the first) scalar list of an input VTK file. Optionally write the values drawn from a second VTK file. Parameters ---------- input_indices_vtk : string path of the input VTK file that contains indices as scalars input_values_vtk : string path of the input VTK file that contains values as scalars output_stem : string path and stem of the output VTK file exclude_values : list or array values to exclude background_value : integer or float background value in output VTK files scalar_name : string name of a lookup table of scalars values Examples -------- >>> import os >>> from mindboggle.mio.tables import write_average_face_values_per_label >>> path = '/homedir/mindboggled' >>> input_indices_vtk = os.path.join(path, 'Twins-2-1', 'labels', 'left_cortical_surface', 'freesurfer_cortex_labels.vtk') >>> input_values_vtk = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'freesurfer_thickness.vtk') >>> area_file = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'area.vtk') >>> output_stem = 'labels_thickness' >>> exclude_values = [-1] >>> background_value = -1 >>> # >>> write_average_face_values_per_label(input_indices_vtk, >>> input_values_vtk, area_file, output_stem, exclude_values, background_value) >>> # >>> # View: >>> #example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk') >>> #from mindboggle.mio.plots import plot_surfaces >>> #plot_surfaces(example_vtk) """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_scalars, read_vtk, write_vtk from mindboggle.guts.mesh import remove_faces # Load VTK file: points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(input_indices_vtk, True, True) if area_file: area_scalars, name = read_scalars(area_file, True, True) print("Explode the scalar list in {0}". format(os.path.basename(input_indices_vtk))) if input_values_vtk != input_indices_vtk: values, name = read_scalars(input_values_vtk, True, True) print("Explode the scalar list of values in {0} " "with the scalar list of indices in {1}". format(os.path.basename(input_values_vtk), os.path.basename(input_indices_vtk))) else: values = np.copy(scalars) # Loop through unique (non-excluded) scalar values: unique_scalars = [int(x) for x in np.unique(scalars) if x not in exclude_values] for scalar in unique_scalars: keep_indices = [x for sublst in faces for x in sublst] new_faces = remove_faces(faces, keep_indices) # Create array and indices for scalar value: select_scalars = np.copy(scalars) select_scalars[scalars != scalar] = background_value scalar_indices = [i for i,x in enumerate(select_scalars) if x==scalar] print(" Scalar {0}: {1} vertices".format(scalar, len(scalar_indices))) #--------------------------------------------------------------------- # For each face, average vertex values: #--------------------------------------------------------------------- output_table = os.path.join(os.getcwd(), output_stem+str(scalar)+'.csv') columns = [] for face in new_faces: values = [] for index in face: if area_file: values.append(scalars[index] / area_scalars[index]) else: values.append(scalars[index]) columns.append(np.mean(values)) #----------------------------------------------------------------- # Write to table: #----------------------------------------------------------------- df = pd.DataFrame({'': columns}) df.to_csv(output_table, index=False) # Write VTK file with scalar value: #output_vtk = os.path.join(os.getcwd(), output_stem + str(scalar) + '.vtk') #write_vtk(output_vtk, points, indices, lines, new_faces, # [select_values.tolist()], [output_scalar_name]) if not os.path.exists(output_table): raise(IOError(output_table + " not found"))
def write_average_face_values_per_label(input_indices_vtk, input_values_vtk='', area_file='', output_stem='', exclude_values=[-1], background_value=-1, verbose=False): """ Write out a separate VTK file for each integer in (the first) scalar list of an input VTK file. Optionally write the values drawn from a second VTK file. Parameters ---------- input_indices_vtk : string path of the input VTK file that contains indices as scalars input_values_vtk : string path of the input VTK file that contains values as scalars output_stem : string path and stem of the output VTK file exclude_values : list or array values to exclude background_value : integer or float background value in output VTK files scalar_name : string name of a lookup table of scalars values verbose : bool print statements? Examples -------- >>> import os >>> from mindboggle.mio.tables import write_average_face_values_per_label >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> input_indices_vtk = fetch_data(urls['left_freesurfer_labels']) >>> input_values_vtk = fetch_data(urls['left_mean_curvature']) >>> area_file = fetch_data(urls['left_area']) >>> output_stem = 'labels_thickness' >>> exclude_values = [-1] >>> background_value = -1 >>> verbose = False >>> write_average_face_values_per_label(input_indices_vtk, ... input_values_vtk, area_file, output_stem, exclude_values, ... background_value, verbose) View vtk file (skip test): >>> from mindboggle.mio.plots import plot_surfaces >>> example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk') >>> plot_surfaces(example_vtk) # doctest: +SKIP """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_scalars, read_vtk, write_vtk from mindboggle.guts.mesh import keep_faces # Load VTK file: points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(input_indices_vtk, True, True) if area_file: area_scalars, name = read_scalars(area_file, True, True) if verbose: print("Explode the scalar list in {0}". format(os.path.basename(input_indices_vtk))) if input_values_vtk != input_indices_vtk: if verbose: print("Explode the scalar list of values in {0} " "with the scalar list of indices in {1}". format(os.path.basename(input_values_vtk), os.path.basename(input_indices_vtk))) # Loop through unique (non-excluded) scalar values: unique_scalars = [int(x) for x in np.unique(scalars) if x not in exclude_values] for scalar in unique_scalars: keep_indices = [x for sublst in faces for x in sublst] new_faces = keep_faces(faces, keep_indices) # Create array and indices for scalar value: select_scalars = np.copy(scalars) select_scalars[scalars != scalar] = background_value scalar_indices = [i for i,x in enumerate(select_scalars) if x==scalar] if verbose: print(" Scalar {0}: {1} vertices".format(scalar, len(scalar_indices))) #--------------------------------------------------------------------- # For each face, average vertex values: #--------------------------------------------------------------------- output_table = os.path.join(os.getcwd(), output_stem+str(scalar)+'.csv') columns = [] for face in new_faces: values = [] for index in face: if area_file: values.append(scalars[index] / area_scalars[index]) else: values.append(scalars[index]) columns.append(np.mean(values)) #----------------------------------------------------------------- # Write to table: #----------------------------------------------------------------- df = pd.DataFrame({'': columns}) df.to_csv(output_table, index=False) if not os.path.exists(output_table): raise IOError(output_table + " not found")