def spectrum_per_label(vtk_file, spectrum_size=10, exclude_labels=[-1], normalization='areaindex', area_file='', largest_segment=True, verbose=False): """ Compute Laplace-Beltrami spectrum per labeled region in a file. Parameters ---------- vtk_file : string name of VTK surface mesh file containing index scalars (labels) spectrum_size : integer number of eigenvalues to be computed (the length of the spectrum) exclude_labels : list of integers labels to be excluded normalization : string the method used to normalize eigenvalues if None, no normalization is used if "area", use area of the 2D structure as in Reuter et al. 2006 if "index", divide eigenvalue by index to account for linear trend if "areaindex", do both (default) area_file : string (optional) name of VTK file with surface area scalar values largest_segment : bool compute spectrum only for largest segment with a given label? verbose : bool print statements? Returns ------- spectrum_lists : list of lists first eigenvalues for each label's Laplace-Beltrami spectrum label_list : list of integers list of unique labels for which spectra are obtained Examples -------- >>> # Uncomment "if label==22:" below to run example: >>> # Spectrum for Twins-2-1 left postcentral (22) pial surface: >>> import numpy as np >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> area_file = fetch_data(urls['left_area'], '', '.vtk') >>> spectrum_size = 6 >>> exclude_labels = [0] #[-1] >>> largest_segment = True >>> verbose = False >>> spectrum_lists, label_list = spectrum_per_label(vtk_file, ... spectrum_size, exclude_labels, None, area_file, largest_segment, ... verbose) >>> [np.float("{0:.{1}f}".format(x, 5)) for x in spectrum_lists[0]] [0.0, 0.00054, 0.00244, 0.00291, 0.00456, 0.00575] >>> label_list[0:10] [1029, 1005, 1011, 1021, 1008, 1025, 999, 1013, 1007, 1022] """ from mindboggle.mio.vtks import read_vtk, read_scalars from mindboggle.guts.mesh import keep_faces, reindex_faces_points from mindboggle.shapes.laplace_beltrami import fem_laplacian,\ spectrum_of_largest # Read VTK surface mesh file: points, indices, lines, faces, labels, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file) # Area file: if area_file: areas, u1 = read_scalars(area_file) else: areas = None # Loop through labeled regions: ulabels = [] [ ulabels.append(int(x)) for x in labels if x not in ulabels if x not in exclude_labels ] label_list = [] spectrum_lists = [] for label in ulabels: #if label == 22: # print("DEBUG: COMPUTE FOR ONLY ONE LABEL") # Determine the indices per label: Ilabel = [i for i, x in enumerate(labels) if x == label] if verbose: print('{0} vertices for label {1}'.format(len(Ilabel), label)) # Remove background faces: pick_faces = keep_faces(faces, Ilabel) pick_faces, pick_points, o1 = reindex_faces_points(pick_faces, points) # Compute Laplace-Beltrami spectrum for the label: if largest_segment: exclude_labels_inner = [-1] spectrum = spectrum_of_largest(pick_points, pick_faces, spectrum_size, exclude_labels_inner, normalization, areas, verbose) else: spectrum = fem_laplacian(pick_points, pick_faces, spectrum_size, normalization, verbose) # Append to a list of lists of spectra: spectrum_lists.append(spectrum) label_list.append(label) return spectrum_lists, label_list
def write_average_face_values_per_label(input_indices_vtk, input_values_vtk='', area_file='', output_stem='', exclude_values=[-1], background_value=-1, verbose=False): """ Write out a separate csv table file for each integer in (the first) scalar list of an input VTK file. Optionally write the values drawn from a second VTK file. Parameters ---------- input_indices_vtk : string path of the input VTK file that contains indices as scalars input_values_vtk : string path of the input VTK file that contains values as scalars output_stem : string path and stem of the output VTK file exclude_values : list or array values to exclude background_value : integer or float background value in output VTK files scalar_name : string name of a lookup table of scalars values verbose : bool print statements? Examples -------- >>> import os >>> from mindboggle.mio.tables import write_average_face_values_per_label >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> input_indices_vtk = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> input_values_vtk = fetch_data(urls['left_mean_curvature'], '', '.vtk') >>> area_file = fetch_data(urls['left_area'], '', '.vtk') >>> output_stem = 'labels_thickness' >>> exclude_values = [-1] >>> background_value = -1 >>> verbose = False >>> write_average_face_values_per_label(input_indices_vtk, ... input_values_vtk, area_file, output_stem, exclude_values, ... background_value, verbose) View vtk file (skip test): >>> from mindboggle.mio.plots import plot_surfaces >>> example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk') >>> plot_surfaces(example_vtk) # doctest: +SKIP """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_scalars, read_vtk, write_vtk from mindboggle.guts.mesh import keep_faces # Load VTK file: points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(input_indices_vtk, True, True) if area_file: area_scalars, name = read_scalars(area_file, True, True) if verbose: print("Explode the scalar list in {0}".format( os.path.basename(input_indices_vtk))) if input_values_vtk != input_indices_vtk: if verbose: print("Explode the scalar list of values in {0} " "with the scalar list of indices in {1}".format( os.path.basename(input_values_vtk), os.path.basename(input_indices_vtk))) # Loop through unique (non-excluded) scalar values: unique_scalars = [ int(x) for x in np.unique(scalars) if x not in exclude_values ] for scalar in unique_scalars: keep_indices = [x for sublst in faces for x in sublst] new_faces = keep_faces(faces, keep_indices) # Create array and indices for scalar value: select_scalars = np.copy(scalars) select_scalars[scalars != scalar] = background_value scalar_indices = [ i for i, x in enumerate(select_scalars) if x == scalar ] if verbose: print(" Scalar {0}: {1} vertices".format(scalar, len(scalar_indices))) # -------------------------------------------------------------------- # For each face, average vertex values: # -------------------------------------------------------------------- output_table = os.path.join(os.getcwd(), output_stem + str(scalar) + '.csv') columns = [] for face in new_faces: values = [] for index in face: if area_file: values.append(scalars[index] / area_scalars[index]) else: values.append(scalars[index]) columns.append(np.mean(values)) # ---------------------------------------------------------------- # Write to table: # ---------------------------------------------------------------- df = pd.DataFrame({'': columns}) df.to_csv(output_table, index=False, encoding='utf-8') if not os.path.exists(output_table): raise IOError(output_table + " not found")
def spectrum_per_label(vtk_file, spectrum_size=10, exclude_labels=[-1], normalization='area', area_file='', largest_segment=True, verbose=False): """ Compute Laplace-Beltrami spectrum per labeled region in a file. Parameters ---------- vtk_file : string name of VTK surface mesh file containing index scalars (labels) spectrum_size : integer number of eigenvalues to be computed (the length of the spectrum) exclude_labels : list of integers labels to be excluded normalization : string the method used to normalize eigenvalues ('area' or None) if "area", use area of the 2D structure as in Reuter et al. 2006 area_file : string (optional) name of VTK file with surface area scalar values largest_segment : bool compute spectrum only for largest segment with a given label? verbose : bool print statements? Returns ------- spectrum_lists : list of lists first eigenvalues for each label's Laplace-Beltrami spectrum label_list : list of integers list of unique labels for which spectra are obtained Examples -------- >>> # Uncomment "if label==22:" below to run example: >>> # Spectrum for Twins-2-1 left postcentral (22) pial surface: >>> import numpy as np >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> area_file = fetch_data(urls['left_area'], '', '.vtk') >>> spectrum_size = 6 >>> exclude_labels = [0] #[-1] >>> largest_segment = True >>> verbose = False >>> spectrum_lists, label_list = spectrum_per_label(vtk_file, ... spectrum_size, exclude_labels, None, area_file, largest_segment, ... verbose) >>> print(np.array_str(np.array(spectrum_lists[0][1::]), ... precision=5, suppress_small=True)) [ 0.00054 0.00244 0.00291 0.00456 0.00575] >>> label_list[0:10] [1029, 1005, 1011, 1021, 1008, 1025, 999, 1013, 1007, 1022] """ from mindboggle.mio.vtks import read_vtk, read_scalars from mindboggle.guts.mesh import keep_faces, reindex_faces_points from mindboggle.shapes.laplace_beltrami import fem_laplacian,\ spectrum_of_largest # Read VTK surface mesh file: points, indices, lines, faces, labels, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file) # Area file: if area_file: areas, u1 = read_scalars(area_file) else: areas = None # Loop through labeled regions: ulabels = [] [ulabels.append(int(x)) for x in labels if x not in ulabels if x not in exclude_labels] label_list = [] spectrum_lists = [] for label in ulabels: #if label == 22: # print("DEBUG: COMPUTE FOR ONLY ONE LABEL") # Determine the indices per label: Ilabel = [i for i,x in enumerate(labels) if x == label] if verbose: print('{0} vertices for label {1}'.format(len(Ilabel), label)) # Remove background faces: pick_faces = keep_faces(faces, Ilabel) pick_faces, pick_points, o1 = reindex_faces_points(pick_faces, points) # Compute Laplace-Beltrami spectrum for the label: if largest_segment: exclude_labels_inner = [-1] spectrum = spectrum_of_largest(pick_points, pick_faces, spectrum_size, exclude_labels_inner, normalization, areas, verbose) else: spectrum = fem_laplacian(pick_points, pick_faces, spectrum_size, normalization, verbose) # Append to a list of lists of spectra: spectrum_lists.append(spectrum) label_list.append(label) return spectrum_lists, label_list
def zernike_moments_per_label(vtk_file, order=10, exclude_labels=[-1], scale_input=True, decimate_fraction=0, decimate_smooth=25, verbose=False): """ Compute the Zernike moments per labeled region in a file. Optionally decimate the input mesh. Parameters ---------- vtk_file : string name of VTK surface mesh file containing index scalars (labels) order : integer number of moments to compute exclude_labels : list of integers labels to be excluded scale_input : bool translate and scale each object so it is bounded by a unit sphere? (this is the expected input to zernike_moments()) decimate_fraction : float fraction of mesh faces to remove for decimation (1 for no decimation) decimate_smooth : integer number of smoothing steps for decimation verbose : bool print statements? Returns ------- descriptors_lists : list of lists of floats Zernike descriptors per label label_list : list of integers list of unique labels for which moments are computed Examples -------- >>> # Zernike moments per label of a FreeSurfer-labeled left cortex. >>> # Uncomment "if label==22:" below to run example >>> # for left postcentral (22) pial surface: >>> import numpy as np >>> from mindboggle.shapes.zernike.zernike import zernike_moments_per_label >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk') >>> order = 3 >>> exclude_labels = [-1] >>> scale_input = True >>> verbose = False >>> descriptors_lists, label_list = zernike_moments_per_label(vtk_file, ... order, exclude_labels, scale_input, verbose) >>> label_list[0:10] [999, 1001, 1002, 1003, 1005, 1006, 1007, 1008, 1009, 1010] >>> print(np.array_str(np.array(descriptors_lists[0]), ... precision=5, suppress_small=True)) [ 0.00587 0.01143 0.0031 0.00881 0.00107 0.00041] >>> print(np.array_str(np.array(descriptors_lists[1]), ... precision=5, suppress_small=True)) [ 0.00004 0.00009 0.00003 0.00009 0.00002 0.00001] >>> print(np.array_str(np.array(descriptors_lists[2]), ... precision=5, suppress_small=True)) [ 0.00144 0.00232 0.00128 0.00304 0.00084 0.00051] >>> print(np.array_str(np.array(descriptors_lists[3]), ... precision=5, suppress_small=True)) [ 0.00393 0.006 0.00371 0.00852 0.00251 0.00153] >>> print(np.array_str(np.array(descriptors_lists[4]), ... precision=5, suppress_small=True)) [ 0.00043 0.0003 0.00095 0.00051 0.00115 0.00116] """ import numpy as np from mindboggle.mio.vtks import read_vtk from mindboggle.guts.mesh import keep_faces from mindboggle.shapes.zernike.zernike import zernike_moments min_points_faces = 4 # ------------------------------------------------------------------------ # Read VTK surface mesh file: # ------------------------------------------------------------------------ points, indices, lines, faces, labels, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file) # ------------------------------------------------------------------------ # Loop through labeled regions: # ------------------------------------------------------------------------ ulabels = [x for x in np.unique(labels) if x not in exclude_labels] label_list = [] descriptors_lists = [] for label in ulabels: #if label == 1022: # 22: # print("DEBUG: COMPUTE FOR ONLY ONE LABEL") # -------------------------------------------------------------------- # Determine the indices per label: # -------------------------------------------------------------------- Ilabel = [i for i, x in enumerate(labels) if x == label] if verbose: print(' {0} vertices for label {1}'.format(len(Ilabel), label)) if len(Ilabel) > min_points_faces: # ---------------------------------------------------------------- # Remove background faces: # ---------------------------------------------------------------- pick_faces = keep_faces(faces, Ilabel) if len(pick_faces) > min_points_faces: # ------------------------------------------------------------ # Compute Zernike moments for the label: # ------------------------------------------------------------ descriptors = zernike_moments(points, pick_faces, order, scale_input, decimate_fraction, decimate_smooth, verbose) # ------------------------------------------------------------ # Append to a list of lists of spectra: # ------------------------------------------------------------ descriptors_lists.append(descriptors) label_list.append(label) return descriptors_lists, label_list
def write_average_face_values_per_label(input_indices_vtk, input_values_vtk='', area_file='', output_stem='', exclude_values=[-1], background_value=-1, verbose=False): """ Write out a separate VTK file for each integer in (the first) scalar list of an input VTK file. Optionally write the values drawn from a second VTK file. Parameters ---------- input_indices_vtk : string path of the input VTK file that contains indices as scalars input_values_vtk : string path of the input VTK file that contains values as scalars output_stem : string path and stem of the output VTK file exclude_values : list or array values to exclude background_value : integer or float background value in output VTK files scalar_name : string name of a lookup table of scalars values verbose : bool print statements? Examples -------- >>> import os >>> from mindboggle.mio.tables import write_average_face_values_per_label >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> input_indices_vtk = fetch_data(urls['left_freesurfer_labels']) >>> input_values_vtk = fetch_data(urls['left_mean_curvature']) >>> area_file = fetch_data(urls['left_area']) >>> output_stem = 'labels_thickness' >>> exclude_values = [-1] >>> background_value = -1 >>> verbose = False >>> write_average_face_values_per_label(input_indices_vtk, ... input_values_vtk, area_file, output_stem, exclude_values, ... background_value, verbose) View vtk file (skip test): >>> from mindboggle.mio.plots import plot_surfaces >>> example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk') >>> plot_surfaces(example_vtk) # doctest: +SKIP """ import os import numpy as np import pandas as pd from mindboggle.mio.vtks import read_scalars, read_vtk, write_vtk from mindboggle.guts.mesh import keep_faces # Load VTK file: points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(input_indices_vtk, True, True) if area_file: area_scalars, name = read_scalars(area_file, True, True) if verbose: print("Explode the scalar list in {0}". format(os.path.basename(input_indices_vtk))) if input_values_vtk != input_indices_vtk: if verbose: print("Explode the scalar list of values in {0} " "with the scalar list of indices in {1}". format(os.path.basename(input_values_vtk), os.path.basename(input_indices_vtk))) # Loop through unique (non-excluded) scalar values: unique_scalars = [int(x) for x in np.unique(scalars) if x not in exclude_values] for scalar in unique_scalars: keep_indices = [x for sublst in faces for x in sublst] new_faces = keep_faces(faces, keep_indices) # Create array and indices for scalar value: select_scalars = np.copy(scalars) select_scalars[scalars != scalar] = background_value scalar_indices = [i for i,x in enumerate(select_scalars) if x==scalar] if verbose: print(" Scalar {0}: {1} vertices".format(scalar, len(scalar_indices))) #--------------------------------------------------------------------- # For each face, average vertex values: #--------------------------------------------------------------------- output_table = os.path.join(os.getcwd(), output_stem+str(scalar)+'.csv') columns = [] for face in new_faces: values = [] for index in face: if area_file: values.append(scalars[index] / area_scalars[index]) else: values.append(scalars[index]) columns.append(np.mean(values)) #----------------------------------------------------------------- # Write to table: #----------------------------------------------------------------- df = pd.DataFrame({'': columns}) df.to_csv(output_table, index=False) if not os.path.exists(output_table): raise IOError(output_table + " not found")
def zernike_moments_per_label(vtk_file, order=10, exclude_labels=[-1], scale_input=True, decimate_fraction=0, decimate_smooth=25, verbose=False): """ Compute the Zernike moments per labeled region in a file. Optionally decimate the input mesh. Parameters ---------- vtk_file : string name of VTK surface mesh file containing index scalars (labels) order : integer number of moments to compute exclude_labels : list of integers labels to be excluded scale_input : bool translate and scale each object so it is bounded by a unit sphere? (this is the expected input to zernike_moments()) decimate_fraction : float fraction of mesh faces to remove for decimation (1 for no decimation) decimate_smooth : integer number of smoothing steps for decimation verbose : bool print statements? Returns ------- descriptors_lists : list of lists of floats Zernike descriptors per label label_list : list of integers list of unique labels for which moments are computed Examples -------- >>> # Zernike moments per label of a FreeSurfer-labeled left cortex. >>> # Uncomment "if label==22:" below to run example >>> # for left postcentral (22) pial surface: >>> import numpy as np >>> from mindboggle.shapes.zernike.zernike import zernike_moments_per_label >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_freesurfer_labels']) >>> order = 3 >>> exclude_labels = [-1] >>> scale_input = True >>> verbose = False >>> descriptors_lists, label_list = zernike_moments_per_label(vtk_file, ... order, exclude_labels, scale_input, verbose) >>> label_list[0:10] [999, 1001, 1002, 1003, 1005, 1006, 1007, 1008, 1009, 1010] >>> print(np.array_str(np.array(descriptors_lists[0]), ... precision=5, suppress_small=True)) [ 0.00587 0.01143 0.0031 0.00881 0.00107 0.00041] >>> print(np.array_str(np.array(descriptors_lists[1]), ... precision=5, suppress_small=True)) [ 0.00004 0.00009 0.00003 0.00009 0.00002 0.00001] >>> print(np.array_str(np.array(descriptors_lists[2]), ... precision=5, suppress_small=True)) [ 0.00144 0.00232 0.00128 0.00304 0.00084 0.00051] >>> print(np.array_str(np.array(descriptors_lists[3]), ... precision=5, suppress_small=True)) [ 0.00393 0.006 0.00371 0.00852 0.00251 0.00153] >>> print(np.array_str(np.array(descriptors_lists[4]), ... precision=5, suppress_small=True)) [ 0.00043 0.0003 0.00095 0.00051 0.00115 0.00116] """ import numpy as np from mindboggle.mio.vtks import read_vtk from mindboggle.guts.mesh import keep_faces from mindboggle.shapes.zernike.zernike import zernike_moments min_points_faces = 4 #------------------------------------------------------------------------- # Read VTK surface mesh file: #------------------------------------------------------------------------- points, indices, lines, faces, labels, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file) #------------------------------------------------------------------------- # Loop through labeled regions: #------------------------------------------------------------------------- ulabels = [x for x in np.unique(labels) if x not in exclude_labels] label_list = [] descriptors_lists = [] for label in ulabels: #if label == 1022: # 22: # print("DEBUG: COMPUTE FOR ONLY ONE LABEL") #--------------------------------------------------------------------- # Determine the indices per label: #--------------------------------------------------------------------- Ilabel = [i for i,x in enumerate(labels) if x == label] if verbose: print(' {0} vertices for label {1}'.format(len(Ilabel), label)) if len(Ilabel) > min_points_faces: #----------------------------------------------------------------- # Remove background faces: #----------------------------------------------------------------- pick_faces = keep_faces(faces, Ilabel) if len(pick_faces) > min_points_faces: #------------------------------------------------------------- # Compute Zernike moments for the label: #------------------------------------------------------------- descriptors = zernike_moments(points, pick_faces, order, scale_input, decimate_fraction, decimate_smooth, verbose) #------------------------------------------------------------- # Append to a list of lists of spectra: #------------------------------------------------------------- descriptors_lists.append(descriptors) label_list.append(label) return descriptors_lists, label_list
def plot_mask_surface(vtk_file, mask_file='', nonmask_value=-1, masked_output='', remove_nonmask=False, program='vtkviewer', use_colormap=False, colormap_file='', background_value=-1): """ Use vtkviewer or mayavi2 to visualize VTK surface mesh data. If a mask_file is provided, a temporary masked file is saved, and it is this file that is viewed. If using vtkviewer, optionally provide colormap file or set $COLORMAP environment variable. Parameters ---------- vtk_file : string name of VTK surface mesh file mask_file : string name of VTK surface mesh file to mask vtk_file vertices nonmask_value : integer nonmask (usually background) value masked_output : string temporary masked output file name remove_nonmask : bool remove vertices that are not in mask? (otherwise assign nonmask_value) program : string {'vtkviewer', 'mayavi2'} program to visualize VTK file use_colormap : bool use Paraview-style XML colormap file set by $COLORMAP env variable? colormap_file : string use colormap in given file if use_colormap==True? if empty and use_colormap==True, use file set by $COLORMAP environment variable background_value : integer or float background value Examples -------- >>> import os >>> from mindboggle.mio.plots import plot_mask_surface >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['freesurfer_labels'], '', '.vtk') >>> os.rename(vtk_file, vtk_file + '.nii.gz') >>> vtk_file = vtk_file + '.nii.gz' >>> mask_file = '' >>> nonmask_value = 0 #-1 >>> masked_output = '' >>> remove_nonmask = True >>> program = 'vtkviewer' >>> use_colormap = True >>> colormap_file = '' >>> background_value = -1 >>> plot_mask_surface(vtk_file, mask_file, nonmask_value, masked_output, ... remove_nonmask, program, use_colormap, colormap_file, ... background_value) # doctest: +SKIP """ import os import numpy as np from mindboggle.guts.mesh import keep_faces, reindex_faces_points from mindboggle.guts.utilities import execute from mindboggle.mio.plots import plot_surfaces from mindboggle.mio.vtks import read_scalars, rewrite_scalars, \ read_vtk, write_vtk # ------------------------------------------------------------------------ # Filter mesh with non-background values from a second (same-size) mesh: # ------------------------------------------------------------------------ if mask_file: mask, name = read_scalars(mask_file, True, True) if not masked_output: masked_output = os.path.join(os.getcwd(), 'temp.vtk') file_to_plot = masked_output # -------------------------------------------------------------------- # Remove nonmask-valued vertices: # -------------------------------------------------------------------- if remove_nonmask: # ---------------------------------------------------------------- # Load VTK files: # ---------------------------------------------------------------- points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file, True, True) # ---------------------------------------------------------------- # Find mask indices, remove nonmask faces, and reindex: # ---------------------------------------------------------------- Imask = [i for i, x in enumerate(mask) if x != nonmask_value] mask_faces = keep_faces(faces, Imask) mask_faces, points, \ original_indices = reindex_faces_points(mask_faces, points) # ---------------------------------------------------------------- # Write VTK file with scalar values: # ---------------------------------------------------------------- if np.ndim(scalars) == 1: scalar_type = type(scalars[0]).__name__ elif np.ndim(scalars) == 2: scalar_type = type(scalars[0][0]).__name__ else: print("Undefined scalar type!") write_vtk(file_to_plot, points, [], [], mask_faces, scalars[original_indices].tolist(), scalar_names, scalar_type=scalar_type) else: scalars, name = read_scalars(vtk_file, True, True) scalars[mask == nonmask_value] = nonmask_value rewrite_scalars(vtk_file, file_to_plot, scalars, ['scalars'], [], background_value) else: file_to_plot = vtk_file # ------------------------------------------------------------------------ # Display with vtkviewer.py: # ------------------------------------------------------------------------ if program == 'vtkviewer': plot_surfaces(file_to_plot, use_colormap=use_colormap, colormap_file=colormap_file) # ------------------------------------------------------------------------ # Display with mayavi2: # ------------------------------------------------------------------------ elif program == 'mayavi2': cmd = ["mayavi2", "-d", file_to_plot, "-m", "Surface", "&"] execute(cmd, 'os')
def evaluate_deep_features(features_file, labels_file, sulci_file='', hemi='', excludeIDs=[-1], output_vtk_name='', verbose=True): """ Evaluate deep surface features by computing the minimum distance from each label border vertex to all of the feature vertices in the same sulcus, and from each feature vertex to all of the label border vertices in the same sulcus. The label borders run along the deepest parts of sulci and correspond to fundi in the DKT cortical labeling protocol. Parameters ---------- features_file : string VTK surface file with feature numbers for vertex scalars labels_file : string VTK surface file with label numbers for vertex scalars sulci_file : string VTK surface file with sulcus numbers for vertex scalars excludeIDs : list of integers feature/sulcus/label IDs to exclude (background set to -1) output_vtk_name : bool if not empty, output a VTK file beginning with output_vtk_name that contains a surface with mean distances as scalars verbose : bool print mean distances to standard output? Returns ------- feature_to_border_mean_distances : numpy array [number of features x 1] mean distance from each feature to sulcus label border feature_to_border_sd_distances : numpy array [number of features x 1] standard deviations of feature-to-border distances feature_to_border_distances_vtk : string VTK surface file containing feature-to-border distances border_to_feature_mean_distances : numpy array [number of features x 1] mean distances from each sulcus label border to feature border_to_feature_sd_distances : numpy array [number of features x 1] standard deviations of border-to-feature distances border_to_feature_distances_vtk : string VTK surface file containing border-to-feature distances """ import os import sys import numpy as np from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk from mindboggle.guts.mesh import find_neighbors, keep_faces from mindboggle.guts.segment import extract_borders from mindboggle.guts.compute import source_to_target_distances from mindboggle.mio.labels import DKTprotocol dkt = DKTprotocol() # ------------------------------------------------------------------------ # Load labels, features, and sulci: # ------------------------------------------------------------------------ points, indices, lines, faces, labels, scalar_names, npoints, \ input_vtk = read_vtk(labels_file, True, True) features, name = read_scalars(features_file, True, True) if sulci_file: sulci, name = read_scalars(sulci_file, True, True) # List of indices to sulcus vertices: sulcus_indices = [ i for i, x in enumerate(sulci) if x not in excludeIDs ] segmentIDs = sulci sulcus_faces = keep_faces(faces, sulcus_indices) else: sulcus_indices = list(range(len(labels))) segmentIDs = [] sulcus_faces = faces # ------------------------------------------------------------------------ # Prepare neighbors, label pairs, border IDs, and outputs: # ------------------------------------------------------------------------ # Calculate neighbor lists for all points: print('Find neighbors for all vertices...') neighbor_lists = find_neighbors(faces, npoints) # Find label border points in any of the sulci: print('Find label border points in any of the sulci...') border_indices, border_label_tuples, unique_border_label_tuples = \ extract_borders(sulcus_indices, labels, neighbor_lists, ignore_values=[], return_label_pairs=True) if not len(border_indices): sys.exit('There are no label border points!') # Initialize an array of label border IDs # (label border vertices that define sulci in the labeling protocol): print('Build an array of label border IDs...') label_borders = -1 * np.ones(npoints) if hemi == 'lh': nsulcus_lists = len(dkt.left_sulcus_label_pair_lists) else: nsulcus_lists = len(dkt.right_sulcus_label_pair_lists) feature_to_border_mean_distances = -1 * np.ones(nsulcus_lists) feature_to_border_sd_distances = -1 * np.ones(nsulcus_lists) border_to_feature_mean_distances = -1 * np.ones(nsulcus_lists) border_to_feature_sd_distances = -1 * np.ones(nsulcus_lists) feature_to_border_distances_vtk = '' border_to_feature_distances_vtk = '' # ------------------------------------------------------------------------ # Loop through sulci: # ------------------------------------------------------------------------ # For each list of sorted label pairs (corresponding to a sulcus): for isulcus, label_pairs in enumerate(dkt.sulcus_label_pair_lists): # Keep the border points with label pair labels: label_pair_border_indices = [ x for i, x in enumerate(border_indices) if np.unique(border_label_tuples[i]).tolist() in label_pairs ] # Store the points as sulcus IDs in the border IDs array: if label_pair_border_indices: label_borders[label_pair_border_indices] = isulcus if len(np.unique(label_borders)) > 1: # -------------------------------------------------------------------- # Construct a feature-to-border distance matrix and VTK file: # -------------------------------------------------------------------- # Construct a distance matrix: print('Construct a feature-to-border distance matrix...') sourceIDs = features targetIDs = label_borders distances, distance_matrix = source_to_target_distances( sourceIDs, targetIDs, points, segmentIDs, excludeIDs) # Compute mean distances for each feature: nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists) for ifeature in range(nfeatures): feature_distances = [ x for x in distance_matrix[:, ifeature] if x != -1 ] feature_to_border_mean_distances[ifeature] = \ np.mean(feature_distances) feature_to_border_sd_distances[ifeature] = \ np.std(feature_distances) if verbose: print('Feature-to-border mean distances:') print(feature_to_border_mean_distances) print('Feature-to-border standard deviations of distances:') print(feature_to_border_sd_distances) # Write resulting feature-label border distances to VTK file: if output_vtk_name: feature_to_border_distances_vtk = os.path.join( os.getcwd(), output_vtk_name + '_feature_to_border_mean_distances.vtk') print('Write feature-to-border distances to {0}...'.format( feature_to_border_distances_vtk)) write_vtk(feature_to_border_distances_vtk, points, [], [], sulcus_faces, [distances], ['feature-to-border_distances'], 'float') # -------------------------------------------------------------------- # Construct a border-to-feature distance matrix and VTK file: # -------------------------------------------------------------------- # Construct a distance matrix: print('Construct a border-to-feature distance matrix...') sourceIDs = label_borders targetIDs = features distances, distance_matrix = source_to_target_distances( sourceIDs, targetIDs, points, segmentIDs, excludeIDs) # Compute mean distances for each feature: nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists) for ifeature in range(nfeatures): border_distances = [ x for x in distance_matrix[:, ifeature] if x != -1 ] border_to_feature_mean_distances[ifeature] = \ np.mean(border_distances) border_to_feature_sd_distances[ifeature] = \ np.std(border_distances) if verbose: print('border-to-feature mean distances:') print(border_to_feature_mean_distances) print('border-to-feature standard deviations of distances:') print(border_to_feature_sd_distances) # Write resulting feature-label border distances to VTK file: if output_vtk_name: border_to_feature_distances_vtk = os.path.join( os.getcwd(), output_vtk_name + '_border_to_feature_mean_distances.vtk') print('Write border-to-feature distances to {0}...'.format( border_to_feature_distances_vtk)) write_vtk(border_to_feature_distances_vtk, points, [], [], sulcus_faces, [distances], ['border-to-feature_distances'], 'float') # ------------------------------------------------------------------------ # Return outputs: # ------------------------------------------------------------------------ return feature_to_border_mean_distances, feature_to_border_sd_distances,\ feature_to_border_distances_vtk,\ border_to_feature_mean_distances, border_to_feature_sd_distances,\ border_to_feature_distances_vtk
def evaluate_deep_features(features_file, labels_file, sulci_file='', hemi='', excludeIDs=[-1], output_vtk_name='', verbose=True): """ Evaluate deep surface features by computing the minimum distance from each label border vertex to all of the feature vertices in the same sulcus, and from each feature vertex to all of the label border vertices in the same sulcus. The label borders run along the deepest parts of sulci and correspond to fundi in the DKT cortical labeling protocol. Parameters ---------- features_file : string VTK surface file with feature numbers for vertex scalars labels_file : string VTK surface file with label numbers for vertex scalars sulci_file : string VTK surface file with sulcus numbers for vertex scalars excludeIDs : list of integers feature/sulcus/label IDs to exclude (background set to -1) output_vtk_name : bool if not empty, output a VTK file beginning with output_vtk_name that contains a surface with mean distances as scalars verbose : bool print mean distances to standard output? Returns ------- feature_to_border_mean_distances : numpy array [number of features x 1] mean distance from each feature to sulcus label border feature_to_border_sd_distances : numpy array [number of features x 1] standard deviations of feature-to-border distances feature_to_border_distances_vtk : string VTK surface file containing feature-to-border distances border_to_feature_mean_distances : numpy array [number of features x 1] mean distances from each sulcus label border to feature border_to_feature_sd_distances : numpy array [number of features x 1] standard deviations of border-to-feature distances border_to_feature_distances_vtk : string VTK surface file containing border-to-feature distances """ import os import sys import numpy as np from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk from mindboggle.guts.mesh import find_neighbors, keep_faces from mindboggle.guts.segment import extract_borders from mindboggle.guts.compute import source_to_target_distances from mindboggle.mio.labels import DKTprotocol dkt = DKTprotocol() # ------------------------------------------------------------------------ # Load labels, features, and sulci: # ------------------------------------------------------------------------ points, indices, lines, faces, labels, scalar_names, npoints, \ input_vtk = read_vtk(labels_file, True, True) features, name = read_scalars(features_file, True, True) if sulci_file: sulci, name = read_scalars(sulci_file, True, True) # List of indices to sulcus vertices: sulcus_indices = [i for i,x in enumerate(sulci) if x not in excludeIDs] segmentIDs = sulci sulcus_faces = keep_faces(faces, sulcus_indices) else: sulcus_indices = list(range(len(labels))) segmentIDs = [] sulcus_faces = faces # ------------------------------------------------------------------------ # Prepare neighbors, label pairs, border IDs, and outputs: # ------------------------------------------------------------------------ # Calculate neighbor lists for all points: print('Find neighbors for all vertices...') neighbor_lists = find_neighbors(faces, npoints) # Find label border points in any of the sulci: print('Find label border points in any of the sulci...') border_indices, border_label_tuples, unique_border_label_tuples = \ extract_borders(sulcus_indices, labels, neighbor_lists, ignore_values=[], return_label_pairs=True) if not len(border_indices): sys.exit('There are no label border points!') # Initialize an array of label border IDs # (label border vertices that define sulci in the labeling protocol): print('Build an array of label border IDs...') label_borders = -1 * np.ones(npoints) if hemi == 'lh': nsulcus_lists = len(dkt.left_sulcus_label_pair_lists) else: nsulcus_lists = len(dkt.right_sulcus_label_pair_lists) feature_to_border_mean_distances = -1 * np.ones(nsulcus_lists) feature_to_border_sd_distances = -1 * np.ones(nsulcus_lists) border_to_feature_mean_distances = -1 * np.ones(nsulcus_lists) border_to_feature_sd_distances = -1 * np.ones(nsulcus_lists) feature_to_border_distances_vtk = '' border_to_feature_distances_vtk = '' # ------------------------------------------------------------------------ # Loop through sulci: # ------------------------------------------------------------------------ # For each list of sorted label pairs (corresponding to a sulcus): for isulcus, label_pairs in enumerate(dkt.sulcus_label_pair_lists): # Keep the border points with label pair labels: label_pair_border_indices = [x for i,x in enumerate(border_indices) if np.unique(border_label_tuples[i]).tolist() in label_pairs] # Store the points as sulcus IDs in the border IDs array: if label_pair_border_indices: label_borders[label_pair_border_indices] = isulcus if len(np.unique(label_borders)) > 1: # -------------------------------------------------------------------- # Construct a feature-to-border distance matrix and VTK file: # -------------------------------------------------------------------- # Construct a distance matrix: print('Construct a feature-to-border distance matrix...') sourceIDs = features targetIDs = label_borders distances, distance_matrix = source_to_target_distances( sourceIDs, targetIDs, points, segmentIDs, excludeIDs) # Compute mean distances for each feature: nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists) for ifeature in range(nfeatures): feature_distances = [x for x in distance_matrix[:, ifeature] if x != -1] feature_to_border_mean_distances[ifeature] = \ np.mean(feature_distances) feature_to_border_sd_distances[ifeature] = \ np.std(feature_distances) if verbose: print('Feature-to-border mean distances:') print(feature_to_border_mean_distances) print('Feature-to-border standard deviations of distances:') print(feature_to_border_sd_distances) # Write resulting feature-label border distances to VTK file: if output_vtk_name: feature_to_border_distances_vtk = os.path.join(os.getcwd(), output_vtk_name + '_feature_to_border_mean_distances.vtk') print('Write feature-to-border distances to {0}...'. format(feature_to_border_distances_vtk)) write_vtk(feature_to_border_distances_vtk, points, [], [], sulcus_faces, [distances], ['feature-to-border_distances'], 'float') # -------------------------------------------------------------------- # Construct a border-to-feature distance matrix and VTK file: # -------------------------------------------------------------------- # Construct a distance matrix: print('Construct a border-to-feature distance matrix...') sourceIDs = label_borders targetIDs = features distances, distance_matrix = source_to_target_distances( sourceIDs, targetIDs, points, segmentIDs, excludeIDs) # Compute mean distances for each feature: nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists) for ifeature in range(nfeatures): border_distances = [x for x in distance_matrix[:, ifeature] if x != -1] border_to_feature_mean_distances[ifeature] = \ np.mean(border_distances) border_to_feature_sd_distances[ifeature] = \ np.std(border_distances) if verbose: print('border-to-feature mean distances:') print(border_to_feature_mean_distances) print('border-to-feature standard deviations of distances:') print(border_to_feature_sd_distances) # Write resulting feature-label border distances to VTK file: if output_vtk_name: border_to_feature_distances_vtk = os.path.join(os.getcwd(), output_vtk_name + '_border_to_feature_mean_distances.vtk') print('Write border-to-feature distances to {0}...'. format(border_to_feature_distances_vtk)) write_vtk(border_to_feature_distances_vtk, points, [], [], sulcus_faces, [distances], ['border-to-feature_distances'], 'float') # ------------------------------------------------------------------------ # Return outputs: # ------------------------------------------------------------------------ return feature_to_border_mean_distances, feature_to_border_sd_distances,\ feature_to_border_distances_vtk,\ border_to_feature_mean_distances, border_to_feature_sd_distances,\ border_to_feature_distances_vtk
def plot_mask_surface(vtk_file, mask_file='', nonmask_value=-1, masked_output='', remove_nonmask=False, program='vtkviewer', use_colormap=False, colormap_file='', background_value=-1): """ Use vtkviewer or mayavi2 to visualize VTK surface mesh data. If a mask_file is provided, a temporary masked file is saved, and it is this file that is viewed. If using vtkviewer, optionally provide colormap file or set $COLORMAP environment variable. Parameters ---------- vtk_file : string name of VTK surface mesh file mask_file : string name of VTK surface mesh file to mask vtk_file vertices nonmask_value : integer nonmask (usually background) value masked_output : string temporary masked output file name remove_nonmask : bool remove vertices that are not in mask? (otherwise assign nonmask_value) program : string {'vtkviewer', 'mayavi2'} program to visualize VTK file use_colormap : bool use Paraview-style XML colormap file set by $COLORMAP env variable? colormap_file : string use colormap in given file if use_colormap==True? if empty and use_colormap==True, use file set by $COLORMAP environment variable background_value : integer or float background value Examples -------- >>> import os >>> from mindboggle.mio.plots import plot_mask_surface >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['freesurfer_labels'], '', '.vtk') >>> os.rename(vtk_file, vtk_file + '.nii.gz') >>> vtk_file = vtk_file + '.nii.gz' >>> mask_file = '' >>> nonmask_value = 0 #-1 >>> masked_output = '' >>> remove_nonmask = True >>> program = 'vtkviewer' >>> use_colormap = True >>> colormap_file = '' >>> background_value = -1 >>> plot_mask_surface(vtk_file, mask_file, nonmask_value, masked_output, ... remove_nonmask, program, use_colormap, colormap_file, ... background_value) # doctest: +SKIP """ import os import numpy as np from mindboggle.guts.mesh import keep_faces, reindex_faces_points from mindboggle.guts.utilities import execute from mindboggle.mio.plots import plot_surfaces from mindboggle.mio.vtks import read_scalars, rewrite_scalars, \ read_vtk, write_vtk # ------------------------------------------------------------------------ # Filter mesh with non-background values from a second (same-size) mesh: # ------------------------------------------------------------------------ if mask_file: mask, name = read_scalars(mask_file, True, True) if not masked_output: masked_output = os.path.join(os.getcwd(), 'temp.vtk') file_to_plot = masked_output # -------------------------------------------------------------------- # Remove nonmask-valued vertices: # -------------------------------------------------------------------- if remove_nonmask: # ---------------------------------------------------------------- # Load VTK files: # ---------------------------------------------------------------- points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file, True, True) # ---------------------------------------------------------------- # Find mask indices, remove nonmask faces, and reindex: # ---------------------------------------------------------------- Imask = [i for i,x in enumerate(mask) if x != nonmask_value] mask_faces = keep_faces(faces, Imask) mask_faces, points, \ original_indices = reindex_faces_points(mask_faces, points) # ---------------------------------------------------------------- # Write VTK file with scalar values: # ---------------------------------------------------------------- if np.ndim(scalars) == 1: scalar_type = type(scalars[0]).__name__ elif np.ndim(scalars) == 2: scalar_type = type(scalars[0][0]).__name__ else: print("Undefined scalar type!") write_vtk(file_to_plot, points, [], [], mask_faces, scalars[original_indices].tolist(), scalar_names, scalar_type=scalar_type) else: scalars, name = read_scalars(vtk_file, True, True) scalars[mask == nonmask_value] = nonmask_value rewrite_scalars(vtk_file, file_to_plot, scalars, ['scalars'], [], background_value) else: file_to_plot = vtk_file # ------------------------------------------------------------------------ # Display with vtkviewer.py: # ------------------------------------------------------------------------ if program == 'vtkviewer': plot_surfaces(file_to_plot, use_colormap=use_colormap, colormap_file=colormap_file) # ------------------------------------------------------------------------ # Display with mayavi2: # ------------------------------------------------------------------------ elif program == 'mayavi2': cmd = ["mayavi2", "-d", file_to_plot, "-m", "Surface", "&"] execute(cmd, 'os')
def close_surface_pair(faces, points1, points2, scalars, background_value=-1): """ Close a surface patch by connecting its border vertices with corresponding vertices in a second surface file. Assumes no lines or indices when reading VTK files in. Note :: Scalar values different than background define the surface patch. The two sets of points have a 1-to-1 mapping; they are from two surfaces whose corresponding vertices are shifted in position. For pial vs. gray-white matter, the two surfaces are not parallel, so connecting the vertices leads to intersecting faces. Parameters ---------- faces : list of lists of integers each sublist contains 3 indices of vertices that form a face on a surface mesh points1 : list of lists of floats each sublist contains 3-D coordinates of a vertex on a surface mesh points2 : list of lists of floats points from second surface with 1-to-1 correspondence with points1 scalars : numpy array of integers labels used to find foreground vertices background_value : integer scalar value for background vertices Returns ------- closed_faces : list of lists of integers indices of vertices that form a face on the closed surface mesh closed_points : list of lists of floats 3-D coordinates from points1 and points2 closed_scalars : list of integers scalar values for points1 and points2 Examples -------- >>> # Build a cube by closing two parallel planes: >>> from mindboggle.guts.morph import close_surface_pair >>> # Build plane: >>> background_value = -1 >>> n = 10 # plane edge length >>> points1 = [] >>> for x in range(n): ... for y in range(n): ... points1.append([x,y,0]) >>> points2 = [[x[0],x[1],1] for x in points1] >>> scalars = [background_value for x in range(len(points1))] >>> p = n*(n-1)/2 - 1 >>> for i in [p, p+1, p+n, p+n+1]: ... scalars[i] = 1 >>> faces = [] >>> for x in range(n-1): ... for y in range(n-1): ... faces.append([x+y*n,x+n+y*n,x+n+1+y*n]) ... faces.append([x+y*n,x+1+y*n,x+n+1+y*n]) >>> #write_vtk('plane.vtk', points1, [], [], faces, scalars) >>> #plot_surfaces('plane.vtk') >>> closed_faces, closed_points, closed_scalars = close_surface_pair(faces, ... points1, points2, scalars, background_value) >>> closed_faces[0:4] [[44, 54, 55], [44, 45, 55], [144, 154, 155], [144, 145, 155]] View cube (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> from mindboggle.mio.vtks import write_vtk # doctest: +SKIP >>> write_vtk('cube.vtk', closed_points, [],[], closed_faces, ... closed_scalars, 'int') # doctest: +SKIP >>> plot_surfaces('cube.vtk') # doctest: +SKIP """ import sys import numpy as np from mindboggle.guts.mesh import find_neighbors, keep_faces from mindboggle.guts.segment import extract_borders if isinstance(scalars, list): scalars = np.array(scalars) N = len(points1) closed_points = points1 + points2 # Find all vertex neighbors and surface patch border vertices: neighbor_lists = find_neighbors(faces, N) I = np.where(scalars != background_value)[0] scalars[scalars == background_value] = background_value + 1 scalars[I] = background_value + 2 scalars = scalars.tolist() borders, u1, u2 = extract_borders(list(range(N)), scalars, neighbor_lists) if not len(borders): sys.exit('There are no border vertices!') borders = [x for x in borders if x in I] # Reindex copy of faces and combine with original (both zero-index): indices = list(range(N)) indices2 = list(range(N, 2 * N)) reindex = dict([(index, indices2[i]) for i, index in enumerate(indices)]) faces = keep_faces(faces, I) faces2 = [[reindex[i] for i in face] for face in faces] closed_faces = faces + faces2 # Connect border vertices between surface patches and add new faces: add_faces = [] taken_already = [] for index in borders: if index not in taken_already: neighbors = list(set(neighbor_lists[index]).intersection(borders)) taken_already.append(index) #taken_already.extend([index] + neighbors) for neighbor in neighbors: add_faces.append([index, index + N, neighbor]) add_faces.append([index + N, neighbor, neighbor + N]) closed_faces = closed_faces + add_faces closed_scalars = scalars * 2 return closed_faces, closed_points, closed_scalars