def main(argv): from mindboggle.mio.vtks import write_vtk closed_fundus_lines, points, faces = propagate_fundus_lines(argv[1], argv[2], argv[3]) write_vtk(argv[4], points, faces=faces, scalars=closed_fundus_lines, scalar_type='int')
def main(argv): from mindboggle.mio.vtks import write_vtk closed_fundus_lines, points, faces = propagate_fundus_lines(argv[1], argv[2], argv[3]) write_vtk(argv[4], points, faces=faces, scalars=closed_fundus_lines, scalar_type='int')
def downsample_vtk(vtk_file, sample_rate): """Sample rate: number between 0 and 1.""" from mindboggle.mio.vtks import read_vtk, write_vtk from mindboggle.guts.mesh import decimate_file if (sample_rate < 0 or sample_rate > 1): raise ValueError('0 <= sample_rate <= 1; you input %f' % sample_rate) # Downsample decimate_file(vtk_file, reduction=1 - sample_rate, output_vtk=vtk_file, save_vtk=True, smooth_steps=0) # Hack to re-save in vtk_data = read_vtk(vtk_file) write_vtk(vtk_file, *vtk_data[:-2])
def relabel_surface(vtk_file, hemi='', old_labels=[], new_labels=[], erase_remaining=True, erase_labels=[], erase_value=-1, output_file=''): """ Relabel surface in a VTK file. Parameters ---------- vtk_file : string input labeled VTK file hemi : string hemisphere ('lh' or 'rh' or '') if set, add 1000 to left and 2000 to right hemisphere labels; old_labels : list of integers old labels (empty list if labels drawn from vtk scalars); may be used in conjunction with hemi new_labels : list of integers new labels (empty list if labels drawn from vtk scalars); may be used in conjunction with hemi erase_remaining : bool set all values not in old_labels to erase_value? erase_labels : list of integers values to erase (set to erase_value) erase_value : integer set vertices with labels in erase_labels to this value output_file : string new vtk file name Returns ------- output_file : string new vtk file name Examples -------- >>> import numpy as np >>> from mindboggle.guts.relabel import relabel_surface >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_freesurfer_labels']) >>> hemi = 'lh' >>> old_labels = [1003,1009,1030] >>> new_labels = [0,500,1000] >>> erase_remaining = True >>> erase_labels = [0] >>> erase_value = -1 >>> output_file = '' >>> output_file = relabel_surface(vtk_file, hemi, old_labels, new_labels, ... erase_remaining, erase_labels, erase_value, output_file) >>> labels, name = read_scalars(output_file, True, True) >>> np.unique(labels) array([ -1, 1000, 1500, 2000]) View relabeled surface file (skip test): >>> from mindboggle.mio.plots import plot_surfaces >>> plot_surfaces(output_file) # doctest: +SKIP """ import os import numpy as np from mindboggle.mio.vtks import read_vtk, write_vtk # Load labeled vtk surfaces: points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file, return_first=True, return_array=True) new_scalars = scalars[:] # Raise an error if inputs set incorrectly: if (new_labels and not old_labels) or \ (hemi and hemi not in ['lh','rh']) or \ (new_labels and len(old_labels) != len(new_labels)) or \ (erase_remaining and not old_labels): raise IOError("Please check inputs for relabel_surface().") # Loop through unique labels in scalars: ulabels = np.unique(scalars) for label in ulabels: I = np.where(scalars == label)[0] # If label in erase_labels list, replace with erase_value: if label in erase_labels: new_scalars[I] = erase_value # If label in old_labels list, replace with corresponding new label, # and if hemi set, add 1000 or 2000 to the new label: elif label in old_labels and (len(old_labels) == len(new_labels)): new_label = new_labels[old_labels.index(label)] if hemi == 'lh': new_scalars[I] = 1000 + new_label elif hemi == 'rh': new_scalars[I] = 2000 + new_label else: new_scalars[I] = new_label # If labels not set then optionally add hemi value: elif hemi and not new_labels: if hemi == 'lh': new_scalars[I] = 1000 + label elif hemi == 'rh': new_scalars[I] = 2000 + label # If label unaccounted for and erase_remaining, set to erase_value: elif erase_remaining: new_scalars[I] = erase_value # Ensure that the new scalars are integer values: new_scalars = [int(x) for x in new_scalars] # Write output VTK file: if not output_file: output_file = os.path.join(os.getcwd(), 'relabeled_' + os.path.basename(vtk_file)) write_vtk(output_file, points, indices, lines, faces, [new_scalars], ['Labels'], scalar_type='int') if not os.path.exists(output_file): raise IOError("relabel_surface() did not create " + output_file + ".") return output_file
def evaluate_deep_features(features_file, labels_file, sulci_file='', hemi='', excludeIDs=[-1], output_vtk_name='', verbose=True): """ Evaluate deep surface features by computing the minimum distance from each label border vertex to all of the feature vertices in the same sulcus, and from each feature vertex to all of the label border vertices in the same sulcus. The label borders run along the deepest parts of sulci and correspond to fundi in the DKT cortical labeling protocol. Parameters ---------- features_file : string VTK surface file with feature numbers for vertex scalars labels_file : string VTK surface file with label numbers for vertex scalars sulci_file : string VTK surface file with sulcus numbers for vertex scalars excludeIDs : list of integers feature/sulcus/label IDs to exclude (background set to -1) output_vtk_name : Boolean if not empty, output a VTK file beginning with output_vtk_name that contains a surface with mean distances as scalars verbose : Boolean print mean distances to standard output? Returns ------- feature_to_border_mean_distances : numpy array [number of features x 1] mean distance from each feature to sulcus label border feature_to_border_sd_distances : numpy array [number of features x 1] standard deviations of feature-to-border distances feature_to_border_distances_vtk : string VTK surface file containing feature-to-border distances border_to_feature_mean_distances : numpy array [number of features x 1] mean distances from each sulcus label border to feature border_to_feature_sd_distances : numpy array [number of features x 1] standard deviations of border-to-feature distances border_to_feature_distances_vtk : string VTK surface file containing border-to-feature distances """ import os import sys import numpy as np from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk from mindboggle.guts.mesh import find_neighbors, remove_faces from mindboggle.guts.segment import extract_borders from mindboggle.guts.compute import source_to_target_distances from mindboggle.mio.labels import DKTprotocol dkt = DKTprotocol() #------------------------------------------------------------------------- # Load labels, features, and sulci: #------------------------------------------------------------------------- faces, lines, indices, points, npoints, labels, scalar_names, \ input_vtk = read_vtk(labels_file, True, True) features, name = read_scalars(features_file, True, True) if sulci_file: sulci, name = read_scalars(sulci_file, True, True) # List of indices to sulcus vertices: sulcus_indices = [i for i,x in enumerate(sulci) if x != -1] segmentIDs = sulci sulcus_faces = remove_faces(faces, sulcus_indices) else: sulcus_indices = range(len(labels)) segmentIDs = [] sulcus_faces = faces #------------------------------------------------------------------------- # Prepare neighbors, label pairs, border IDs, and outputs: #------------------------------------------------------------------------- # Calculate neighbor lists for all points: print('Find neighbors for all vertices...') neighbor_lists = find_neighbors(faces, npoints) # Find label border points in any of the sulci: print('Find label border points in any of the sulci...') border_indices, border_label_tuples, unique_border_label_tuples = \ extract_borders(sulcus_indices, labels, neighbor_lists, ignore_values=[], return_label_pairs=True) if not len(border_indices): sys.exit('There are no label border points!') # Initialize an array of label border IDs # (label border vertices that define sulci in the labeling protocol): print('Build an array of label border IDs...') label_borders = -1 * np.ones(npoints) if hemi == 'lh': nsulcus_lists = len(dkt.left_sulcus_label_pair_lists) else: nsulcus_lists = len(dkt.right_sulcus_label_pair_lists) feature_to_border_mean_distances = -1 * np.ones(nsulcus_lists) feature_to_border_sd_distances = -1 * np.ones(nsulcus_lists) border_to_feature_mean_distances = -1 * np.ones(nsulcus_lists) border_to_feature_sd_distances = -1 * np.ones(nsulcus_lists) feature_to_border_distances_vtk = '' border_to_feature_distances_vtk = '' #------------------------------------------------------------------------- # Loop through sulci: #------------------------------------------------------------------------- # For each list of sorted label pairs (corresponding to a sulcus): for isulcus, label_pairs in enumerate(dkt.sulcus_label_pair_lists): # Keep the border points with label pair labels: label_pair_border_indices = [x for i,x in enumerate(border_indices) if np.unique(border_label_tuples[i]).tolist() in label_pairs] # Store the points as sulcus IDs in the border IDs array: if label_pair_border_indices: label_borders[label_pair_border_indices] = isulcus if len(np.unique(label_borders)) > 1: #--------------------------------------------------------------------- # Construct a feature-to-border distance matrix and VTK file: #--------------------------------------------------------------------- # Construct a distance matrix: print('Construct a feature-to-border distance matrix...') sourceIDs = features targetIDs = label_borders distances, distance_matrix = source_to_target_distances( sourceIDs, targetIDs, points, segmentIDs, excludeIDs) # Compute mean distances for each feature: nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists) for ifeature in range(nfeatures): feature_distances = [x for x in distance_matrix[:, ifeature] if x != -1] feature_to_border_mean_distances[ifeature] = \ np.mean(feature_distances) feature_to_border_sd_distances[ifeature] = \ np.std(feature_distances) if verbose: print('Feature-to-border mean distances:') print(feature_to_border_mean_distances) print('Feature-to-border standard deviations of distances:') print(feature_to_border_sd_distances) # Write resulting feature-label border distances to VTK file: if output_vtk_name: feature_to_border_distances_vtk = os.path.join(os.getcwd(), output_vtk_name + '_feature_to_border_mean_distances.vtk') print('Write feature-to-border distances to {0}...'. format(feature_to_border_distances_vtk)) write_vtk(feature_to_border_distances_vtk, points, [], [], sulcus_faces, [distances], ['feature-to-border_distances'], 'float') #--------------------------------------------------------------------- # Construct a border-to-feature distance matrix and VTK file: #--------------------------------------------------------------------- # Construct a distance matrix: print('Construct a border-to-feature distance matrix...') sourceIDs = label_borders targetIDs = features distances, distance_matrix = source_to_target_distances( sourceIDs, targetIDs, points, segmentIDs, excludeIDs) # Compute mean distances for each feature: nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists) for ifeature in range(nfeatures): border_distances = [x for x in distance_matrix[:, ifeature] if x != -1] border_to_feature_mean_distances[ifeature] = \ np.mean(border_distances) border_to_feature_sd_distances[ifeature] = \ np.std(border_distances) if verbose: print('border-to-feature mean distances:') print(border_to_feature_mean_distances) print('border-to-feature standard deviations of distances:') print(border_to_feature_sd_distances) # Write resulting feature-label border distances to VTK file: if output_vtk_name: border_to_feature_distances_vtk = os.path.join(os.getcwd(), output_vtk_name + '_border_to_feature_mean_distances.vtk') print('Write border-to-feature distances to {0}...'. format(border_to_feature_distances_vtk)) write_vtk(border_to_feature_distances_vtk, points, [], [], sulcus_faces, [distances], ['border-to-feature_distances'], 'float') #------------------------------------------------------------------------- # Return outputs: #------------------------------------------------------------------------- return feature_to_border_mean_distances, feature_to_border_sd_distances,\ feature_to_border_distances_vtk,\ border_to_feature_mean_distances, border_to_feature_sd_distances,\ border_to_feature_distances_vtk
def evaluate_deep_features(features_file, labels_file, sulci_file='', hemi='', excludeIDs=[-1], output_vtk_name='', verbose=True): """ Evaluate deep surface features by computing the minimum distance from each label border vertex to all of the feature vertices in the same sulcus, and from each feature vertex to all of the label border vertices in the same sulcus. The label borders run along the deepest parts of sulci and correspond to fundi in the DKT cortical labeling protocol. Parameters ---------- features_file : string VTK surface file with feature numbers for vertex scalars labels_file : string VTK surface file with label numbers for vertex scalars sulci_file : string VTK surface file with sulcus numbers for vertex scalars excludeIDs : list of integers feature/sulcus/label IDs to exclude (background set to -1) output_vtk_name : Boolean if not empty, output a VTK file beginning with output_vtk_name that contains a surface with mean distances as scalars verbose : Boolean print mean distances to standard output? Returns ------- feature_to_border_mean_distances : numpy array [number of features x 1] mean distance from each feature to sulcus label border feature_to_border_sd_distances : numpy array [number of features x 1] standard deviations of feature-to-border distances feature_to_border_distances_vtk : string VTK surface file containing feature-to-border distances border_to_feature_mean_distances : numpy array [number of features x 1] mean distances from each sulcus label border to feature border_to_feature_sd_distances : numpy array [number of features x 1] standard deviations of border-to-feature distances border_to_feature_distances_vtk : string VTK surface file containing border-to-feature distances """ import os import sys import numpy as np from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk from mindboggle.guts.mesh import find_neighbors, remove_faces from mindboggle.guts.segment import extract_borders from mindboggle.guts.compute import source_to_target_distances from mindboggle.mio.labels import DKTprotocol dkt = DKTprotocol() #------------------------------------------------------------------------- # Load labels, features, and sulci: #------------------------------------------------------------------------- faces, lines, indices, points, npoints, labels, scalar_names, \ input_vtk = read_vtk(labels_file, True, True) features, name = read_scalars(features_file, True, True) if sulci_file: sulci, name = read_scalars(sulci_file, True, True) # List of indices to sulcus vertices: sulcus_indices = [i for i, x in enumerate(sulci) if x != -1] segmentIDs = sulci sulcus_faces = remove_faces(faces, sulcus_indices) else: sulcus_indices = range(len(labels)) segmentIDs = [] sulcus_faces = faces #------------------------------------------------------------------------- # Prepare neighbors, label pairs, border IDs, and outputs: #------------------------------------------------------------------------- # Calculate neighbor lists for all points: print('Find neighbors for all vertices...') neighbor_lists = find_neighbors(faces, npoints) # Find label border points in any of the sulci: print('Find label border points in any of the sulci...') border_indices, border_label_tuples, unique_border_label_tuples = \ extract_borders(sulcus_indices, labels, neighbor_lists, ignore_values=[], return_label_pairs=True) if not len(border_indices): sys.exit('There are no label border points!') # Initialize an array of label border IDs # (label border vertices that define sulci in the labeling protocol): print('Build an array of label border IDs...') label_borders = -1 * np.ones(npoints) if hemi == 'lh': nsulcus_lists = len(dkt.left_sulcus_label_pair_lists) else: nsulcus_lists = len(dkt.right_sulcus_label_pair_lists) feature_to_border_mean_distances = -1 * np.ones(nsulcus_lists) feature_to_border_sd_distances = -1 * np.ones(nsulcus_lists) border_to_feature_mean_distances = -1 * np.ones(nsulcus_lists) border_to_feature_sd_distances = -1 * np.ones(nsulcus_lists) feature_to_border_distances_vtk = '' border_to_feature_distances_vtk = '' #------------------------------------------------------------------------- # Loop through sulci: #------------------------------------------------------------------------- # For each list of sorted label pairs (corresponding to a sulcus): for isulcus, label_pairs in enumerate(dkt.sulcus_label_pair_lists): # Keep the border points with label pair labels: label_pair_border_indices = [ x for i, x in enumerate(border_indices) if np.unique(border_label_tuples[i]).tolist() in label_pairs ] # Store the points as sulcus IDs in the border IDs array: if label_pair_border_indices: label_borders[label_pair_border_indices] = isulcus if len(np.unique(label_borders)) > 1: #--------------------------------------------------------------------- # Construct a feature-to-border distance matrix and VTK file: #--------------------------------------------------------------------- # Construct a distance matrix: print('Construct a feature-to-border distance matrix...') sourceIDs = features targetIDs = label_borders distances, distance_matrix = source_to_target_distances( sourceIDs, targetIDs, points, segmentIDs, excludeIDs) # Compute mean distances for each feature: nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists) for ifeature in range(nfeatures): feature_distances = [ x for x in distance_matrix[:, ifeature] if x != -1 ] feature_to_border_mean_distances[ifeature] = \ np.mean(feature_distances) feature_to_border_sd_distances[ifeature] = \ np.std(feature_distances) if verbose: print('Feature-to-border mean distances:') print(feature_to_border_mean_distances) print('Feature-to-border standard deviations of distances:') print(feature_to_border_sd_distances) # Write resulting feature-label border distances to VTK file: if output_vtk_name: feature_to_border_distances_vtk = os.path.join( os.getcwd(), output_vtk_name + '_feature_to_border_mean_distances.vtk') print('Write feature-to-border distances to {0}...'.format( feature_to_border_distances_vtk)) write_vtk(feature_to_border_distances_vtk, points, [], [], sulcus_faces, [distances], ['feature-to-border_distances'], 'float') #--------------------------------------------------------------------- # Construct a border-to-feature distance matrix and VTK file: #--------------------------------------------------------------------- # Construct a distance matrix: print('Construct a border-to-feature distance matrix...') sourceIDs = label_borders targetIDs = features distances, distance_matrix = source_to_target_distances( sourceIDs, targetIDs, points, segmentIDs, excludeIDs) # Compute mean distances for each feature: nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists) for ifeature in range(nfeatures): border_distances = [ x for x in distance_matrix[:, ifeature] if x != -1 ] border_to_feature_mean_distances[ifeature] = \ np.mean(border_distances) border_to_feature_sd_distances[ifeature] = \ np.std(border_distances) if verbose: print('border-to-feature mean distances:') print(border_to_feature_mean_distances) print('border-to-feature standard deviations of distances:') print(border_to_feature_sd_distances) # Write resulting feature-label border distances to VTK file: if output_vtk_name: border_to_feature_distances_vtk = os.path.join( os.getcwd(), output_vtk_name + '_border_to_feature_mean_distances.vtk') print('Write border-to-feature distances to {0}...'.format( border_to_feature_distances_vtk)) write_vtk(border_to_feature_distances_vtk, points, [], [], sulcus_faces, [distances], ['border-to-feature_distances'], 'float') #------------------------------------------------------------------------- # Return outputs: #------------------------------------------------------------------------- return feature_to_border_mean_distances, feature_to_border_sd_distances,\ feature_to_border_distances_vtk,\ border_to_feature_mean_distances, border_to_feature_sd_distances,\ border_to_feature_distances_vtk
def plot_mask_surface(vtk_file, mask_file='', nonmask_value=-1, masked_output='', remove_nonmask=False, program='vtkviewer', use_colormap=False, colormap_file='', background_value=-1): """ Use vtkviewer or mayavi2 to visualize VTK surface mesh data. If a mask_file is provided, a temporary masked file is saved, and it is this file that is viewed. If using vtkviewer, optionally provide colormap file or set $COLORMAP environment variable. Parameters ---------- vtk_file : string name of VTK surface mesh file mask_file : string name of VTK surface mesh file to mask vtk_file vertices nonmask_value : integer nonmask (usually background) value masked_output : string temporary masked output file name remove_nonmask : bool remove vertices that are not in mask? (otherwise assign nonmask_value) program : string {'vtkviewer', 'mayavi2'} program to visualize VTK file use_colormap : bool use Paraview-style XML colormap file set by $COLORMAP env variable? colormap_file : string use colormap in given file if use_colormap==True? if empty and use_colormap==True, use file set by $COLORMAP environment variable background_value : integer or float background value Examples -------- >>> import os >>> from mindboggle.mio.plots import plot_mask_surface >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['freesurfer_labels'], '', '.vtk') >>> os.rename(vtk_file, vtk_file + '.nii.gz') >>> vtk_file = vtk_file + '.nii.gz' >>> mask_file = '' >>> nonmask_value = 0 #-1 >>> masked_output = '' >>> remove_nonmask = True >>> program = 'vtkviewer' >>> use_colormap = True >>> colormap_file = '' >>> background_value = -1 >>> plot_mask_surface(vtk_file, mask_file, nonmask_value, masked_output, ... remove_nonmask, program, use_colormap, colormap_file, ... background_value) # doctest: +SKIP """ import os import numpy as np from mindboggle.guts.mesh import keep_faces, reindex_faces_points from mindboggle.guts.utilities import execute from mindboggle.mio.plots import plot_surfaces from mindboggle.mio.vtks import read_scalars, rewrite_scalars, \ read_vtk, write_vtk # ------------------------------------------------------------------------ # Filter mesh with non-background values from a second (same-size) mesh: # ------------------------------------------------------------------------ if mask_file: mask, name = read_scalars(mask_file, True, True) if not masked_output: masked_output = os.path.join(os.getcwd(), 'temp.vtk') file_to_plot = masked_output # -------------------------------------------------------------------- # Remove nonmask-valued vertices: # -------------------------------------------------------------------- if remove_nonmask: # ---------------------------------------------------------------- # Load VTK files: # ---------------------------------------------------------------- points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file, True, True) # ---------------------------------------------------------------- # Find mask indices, remove nonmask faces, and reindex: # ---------------------------------------------------------------- Imask = [i for i, x in enumerate(mask) if x != nonmask_value] mask_faces = keep_faces(faces, Imask) mask_faces, points, \ original_indices = reindex_faces_points(mask_faces, points) # ---------------------------------------------------------------- # Write VTK file with scalar values: # ---------------------------------------------------------------- if np.ndim(scalars) == 1: scalar_type = type(scalars[0]).__name__ elif np.ndim(scalars) == 2: scalar_type = type(scalars[0][0]).__name__ else: print("Undefined scalar type!") write_vtk(file_to_plot, points, [], [], mask_faces, scalars[original_indices].tolist(), scalar_names, scalar_type=scalar_type) else: scalars, name = read_scalars(vtk_file, True, True) scalars[mask == nonmask_value] = nonmask_value rewrite_scalars(vtk_file, file_to_plot, scalars, ['scalars'], [], background_value) else: file_to_plot = vtk_file # ------------------------------------------------------------------------ # Display with vtkviewer.py: # ------------------------------------------------------------------------ if program == 'vtkviewer': plot_surfaces(file_to_plot, use_colormap=use_colormap, colormap_file=colormap_file) # ------------------------------------------------------------------------ # Display with mayavi2: # ------------------------------------------------------------------------ elif program == 'mayavi2': cmd = ["mayavi2", "-d", file_to_plot, "-m", "Surface", "&"] execute(cmd, 'os')
def realign_boundaries_to_fundus_lines(surf_file, init_label_file, fundus_lines_file, thickness_file, out_label_file=None): """ Fix label boundaries to fundus lines. Parameters ---------- surf_file : file containing the surface geometry in vtk format init_label_file : file containing scalars that represent the initial guess at labels fundus_lines_file : file containing scalars representing fundus lines. thickness_file: file containing cortical thickness scalar data (for masking out the medial wall only) out_label_file : if specified, the realigned labels will be writen to this file Returns ------- numpy array representing the realigned label for each surface vertex. """ import numpy as np from mindboggle.guts.segment import extract_borders import mindboggle.guts.graph as go from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk from mindboggle.guts.mesh import find_neighbors import propagate_fundus_lines ## read files points, indices, lines, faces, scalars, scalar_names, num_points, \ input_vtk = read_vtk(surf_file, return_first=True, return_array=True) indices = range(num_points) init_labels, _ = read_scalars(init_label_file, return_first=True, return_array=True) fundus_lines, _ = read_scalars(fundus_lines_file, return_first=True, return_array=True) thickness, _ = read_scalars(thickness_file, return_first=True, return_array=True) # remove labels from vertices with zero thickness (get around # DKT40 annotations having the label '3' for all the Corpus # Callosum vertices). cc_inds = [x for x in indices if thickness[x] < 0.001] init_labels[cc_inds] = 0 ## setup seeds from initial label boundaries neighbor_lists = find_neighbors(faces, num_points) # extract all vertices that are on a boundary between labels boundary_indices, label_pairs, _ = extract_borders(indices, init_labels, neighbor_lists, return_label_pairs=True) # split boundary vertices into segments with common boundary pairs. boundary_segments = {} for boundary_index, label_pair in zip(boundary_indices, label_pairs): key = ((label_pair[0], label_pair[1]) if label_pair[0] < label_pair[1] else (label_pair[1], label_pair[0])) if key not in boundary_segments: boundary_segments[key] = [] boundary_segments[key].append(boundary_index) boundary_matrix, boundary_matrix_keys = _build_boundary_matrix( boundary_segments, num_points) # build the affinity matrix affinity_matrix = go.weight_graph(np.array(points), indices, np.array(faces), sigma=10, add_to_graph=False) ## propagate boundaries to fundus line vertices learned_matrix = _propagate_labels(affinity_matrix, boundary_matrix, boundary_indices, 100, 1) # assign labels to fundus line vertices based on highest probability new_boundaries = -1 * np.ones(init_labels.shape) fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5] # tile the surface into connected components delimited by fundus lines closed_fundus_lines, _, _ = propagate_fundus_lines.propagate_fundus_lines( points, faces, fundus_line_indices, thickness) closed_fundus_line_indices = np.where(closed_fundus_lines > 0)[0] # split surface into connected components connected_component_faces = _remove_boundary_faces( points, faces, closed_fundus_line_indices) # label components based on most probable label assignment new_labels = _label_components(connected_component_faces, num_points, boundary_indices, learned_matrix, boundary_matrix_keys) # propagate new labels to fill holes label_matrix, label_map = _build_label_matrix(new_labels) new_learned_matrix = _propagate_labels( affinity_matrix, label_matrix, [i for i in range(num_points) if new_labels[i] >= 0], 100, 1) # assign most probable labels for idx in [i for i in range(num_points) if new_labels[i] == -1]: max_idx = np.argmax(new_learned_matrix[idx]) new_labels[idx] = label_map[max_idx] # save if out_label_file is not None: write_vtk(out_label_file, points, faces=faces, scalars=[int(x) for x in new_labels], scalar_type='int') return new_labels
def relabel_surface(vtk_file, hemi='', old_labels=[], new_labels=[], erase_remaining=True, erase_labels=[], erase_value=-1, output_file=''): """ Relabel surface in a VTK file. Parameters ---------- vtk_file : string input labeled VTK file hemi : string hemisphere ('lh' or 'rh' or '') if set, add 1000 to left and 2000 to right hemisphere labels; old_labels : list of integers old labels (empty list if labels drawn from vtk scalars); may be used in conjunction with hemi new_labels : list of integers new labels (empty list if labels drawn from vtk scalars); may be used in conjunction with hemi erase_remaining : Boolean set all values not in old_labels to erase_value? erase_labels : list of integers values to erase (set to erase_value) erase_value : integer set vertices with labels in erase_labels to this value output_file : string new vtk file name Returns ------- output_file : string new vtk file name Examples -------- >>> import os >>> from mindboggle.guts.relabel import relabel_surface >>> from mindboggle.mio.plots import plot_surfaces >>> path = os.environ['MINDBOGGLE_DATA'] >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk') >>> hemi = 'lh' >>> old_labels = [1003,1009,1030] >>> new_labels = [3,9,30] >>> erase_remaining = False >>> erase_labels = [0] >>> erase_value = -1 >>> output_file = '' >>> # >>> relabel_surface(vtk_file, hemi, old_labels, new_labels, erase_remaining, erase_labels, erase_value, output_file) >>> # View >>> plot_surfaces('relabeled_FreeSurfer_cortex_labels.vtk') >>> #plot_surfaces('relabeled_rh.labels.DKT31.manual.vtk') """ import os import numpy as np from mindboggle.mio.vtks import read_vtk, write_vtk # Load labeled vtk surfaces: faces, lines, indices, points, npoints, scalars, \ name, input_vtk = read_vtk(vtk_file, return_first=True, return_array=True) new_scalars = scalars[:] # Raise an error if inputs set incorrectly: if (new_labels and not old_labels) or \ (hemi and hemi not in ['lh','rh']) or \ (new_labels and len(old_labels) != len(new_labels)) or \ (erase_remaining and not old_labels): raise IOError("Please check inputs for relabel_surface().") # Loop through unique labels in scalars: ulabels = np.unique(scalars) for label in ulabels: I = np.where(scalars == label)[0] # If label in erase_labels list, replace with erase_value: if label in erase_labels: new_scalars[I] = erase_value # If label in old_labels list, replace with corresponding new label, # and if hemi set, add 1000 or 2000 to the new label: elif label in old_labels and (len(old_labels) == len(new_labels)): new_label = new_labels[old_labels.index(label)] if hemi == 'lh': new_scalars[I] = 1000 + new_label elif hemi == 'rh': new_scalars[I] = 2000 + new_label else: new_scalars[I] = new_label # If labels not set then optionally add hemi value: elif hemi and not new_labels: if hemi == 'lh': new_scalars[I] = 1000 + label elif hemi == 'rh': new_scalars[I] = 2000 + label # If label unaccounted for and erase_remaining, set to erase_value: elif erase_remaining: new_scalars[I] = erase_value # Ensure that the new scalars are integer values: new_scalars = [int(x) for x in new_scalars] # Write output VTK file: if not output_file: output_file = os.path.join(os.getcwd(), 'relabeled_' + os.path.basename(vtk_file)) write_vtk(output_file, points, indices, lines, faces, [new_scalars], ['Labels'], scalar_type='int') if not os.path.exists(output_file): s = "relabel_surface() did not create " + output_file + "." raise (IOError(s)) return output_file
def plot_mask_surface(vtk_file, mask_file='', nonmask_value=-1, masked_output='', remove_nonmask=False, program='vtkviewer', use_colormap=False, colormap_file=''): """ Use vtkviewer or mayavi2 to visualize VTK surface mesh data. If a mask_file is provided, a temporary masked file is saved, and it is this file that is viewed. If using vtkviewer, can optionally provide colormap file or set $COLORMAP environment variable. Parameters ---------- vtk_file : string name of VTK surface mesh file mask_file : string name of VTK surface mesh file to mask vtk_file vertices nonmask_value : integer nonmask (usually background) value masked_output : string temporary masked output file name remove_nonmask : Boolean remove vertices that are not in mask? (otherwise assign nonmask_value) program : string {'vtkviewer', 'mayavi2'} program to visualize VTK file use_colormap : Boolean use Paraview-style XML colormap file set by $COLORMAP env variable? colormap_file : string use colormap in given file if use_colormap==True? if empty and use_colormap==True, use file set by $COLORMAP environment variable Examples -------- >>> import os >>> from mindboggle.mio.plots import plot_mask_surface >>> path = os.environ['MINDBOGGLE_DATA'] >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk') >>> mask_file = os.path.join(path, 'test_one_label.vtk') >>> nonmask_value = 0 #-1 >>> masked_output = '' >>> remove_nonmask = True >>> program = 'vtkviewer' >>> use_colormap = True >>> colormap_file = '' #'/software/surface_cpp_tools/colormap.xml' >>> plot_mask_surface(vtk_file, mask_file, nonmask_value, masked_output, remove_nonmask, program, use_colormap, colormap_file) """ import os import numpy as np from mindboggle.guts.mesh import remove_faces, reindex_faces_points from mindboggle.guts.utilities import execute from mindboggle.mio.plots import plot_surfaces from mindboggle.mio.vtks import read_scalars, rewrite_scalars, \ read_vtk, write_vtk #------------------------------------------------------------------------- # Filter mesh with non-background values from a second (same-size) mesh: #------------------------------------------------------------------------- if mask_file: mask, name = read_scalars(mask_file, True, True) if not masked_output: masked_output = os.path.join(os.getcwd(), 'temp.vtk') file_to_plot = masked_output #--------------------------------------------------------------------- # Remove nonmask-valued vertices: #--------------------------------------------------------------------- if remove_nonmask: #----------------------------------------------------------------- # Load VTK files: #----------------------------------------------------------------- points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file, True, True) #----------------------------------------------------------------- # Find mask indices, remove nonmask faces, and reindex: #----------------------------------------------------------------- Imask = [i for i,x in enumerate(mask) if x != nonmask_value] mask_faces = remove_faces(faces, Imask) mask_faces, points, \ original_indices = reindex_faces_points(mask_faces, points) #----------------------------------------------------------------- # Write VTK file with scalar values: #----------------------------------------------------------------- if np.ndim(scalars) == 1: scalar_type = type(scalars[0]).__name__ elif np.ndim(scalars) == 2: scalar_type = type(scalars[0][0]).__name__ else: print("Undefined scalar type!") write_vtk(file_to_plot, points, [], [], mask_faces, scalars[original_indices].tolist(), scalar_names, scalar_type=scalar_type) else: scalars, name = read_scalars(vtk_file, True, True) scalars[mask == nonmask_value] = nonmask_value rewrite_scalars(vtk_file, file_to_plot, scalars) else: file_to_plot = vtk_file #------------------------------------------------------------------------- # Display with vtkviewer.py: #------------------------------------------------------------------------- if program == 'vtkviewer': plot_surfaces(file_to_plot, use_colormap=use_colormap, colormap_file=colormap_file) #------------------------------------------------------------------------- # Display with mayavi2: #------------------------------------------------------------------------- elif program == 'mayavi2': cmd = ["mayavi2", "-d", file_to_plot, "-m", "Surface", "&"] execute(cmd, 'os')
def realign_boundaries_to_fundus_lines( surf_file, init_label_file, fundus_lines_file, thickness_file, out_label_file=None): """ Fix label boundaries to fundus lines. Parameters ---------- surf_file : file containing the surface geometry in vtk format init_label_file : file containing scalars that represent the initial guess at labels fundus_lines_file : file containing scalars representing fundus lines. thickness_file: file containing cortical thickness scalar data (for masking out the medial wall only) out_label_file : if specified, the realigned labels will be writen to this file Returns ------- numpy array representing the realigned label for each surface vertex. """ import numpy as np from mindboggle.guts.segment import extract_borders import mindboggle.guts.graph as go from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk from mindboggle.guts.mesh import find_neighbors import propagate_fundus_lines ## read files faces, _, indices, points, num_points, _, _, _ = read_vtk( surf_file, return_first=True, return_array=True) indices = range(num_points) init_labels, _ = read_scalars(init_label_file, return_first=True, return_array=True) fundus_lines, _ = read_scalars(fundus_lines_file, return_first=True, return_array=True) thickness, _ = read_scalars(thickness_file, return_first=True, return_array=True) # remove labels from vertices with zero thickness (get around # DKT40 annotations having the label '3' for all the Corpus # Callosum vertices). cc_inds = [x for x in indices if thickness[x] < 0.001] init_labels[cc_inds] = 0 ## setup seeds from initial label boundaries neighbor_lists = find_neighbors(faces, num_points) # extract all vertices that are on a boundary between labels boundary_indices, label_pairs, _ = extract_borders( indices, init_labels, neighbor_lists, return_label_pairs=True) # split boundary vertices into segments with common boundary pairs. boundary_segments = {} for boundary_index, label_pair in zip(boundary_indices, label_pairs): key = ((label_pair[0], label_pair[1]) if label_pair[0] < label_pair[1] else (label_pair[1], label_pair[0])) if key not in boundary_segments: boundary_segments[key] = [] boundary_segments[key].append(boundary_index) boundary_matrix, boundary_matrix_keys = _build_boundary_matrix( boundary_segments, num_points) # build the affinity matrix affinity_matrix = go.weight_graph( np.array(points), indices, np.array(faces), sigma=10, add_to_graph=False) ## propagate boundaries to fundus line vertices learned_matrix = _propagate_labels( affinity_matrix, boundary_matrix, boundary_indices, 100, 1) # assign labels to fundus line vertices based on highest probability new_boundaries = -1 * np.ones(init_labels.shape) fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5] # tile the surface into connected components delimited by fundus lines closed_fundus_lines, _, _ = propagate_fundus_lines.propagate_fundus_lines( points, faces, fundus_line_indices, thickness) closed_fundus_line_indices = np.where(closed_fundus_lines > 0)[0] # split surface into connected components connected_component_faces = _remove_boundary_faces( points, faces, closed_fundus_line_indices) # label components based on most probable label assignment new_labels = _label_components( connected_component_faces, num_points, boundary_indices, learned_matrix, boundary_matrix_keys) # propagate new labels to fill holes label_matrix, label_map = _build_label_matrix(new_labels) new_learned_matrix = _propagate_labels( affinity_matrix, label_matrix, [i for i in range(num_points) if new_labels[i] >= 0], 100, 1) # assign most probable labels for idx in [i for i in range(num_points) if new_labels[i] == -1]: max_idx = np.argmax(new_learned_matrix[idx]) new_labels[idx] = label_map[max_idx] # save if out_label_file is not None: write_vtk(out_label_file, points, faces=faces, scalars=[int(x) for x in new_labels], scalar_type='int') return new_labels
def close_surface_pair_from_files(patch_surface1, whole_surface2, background_value=-1, output_vtk=''): """ Close a surface patch by connecting its border vertices with corresponding vertices in a second surface file. Assumes no lines or indices when reading VTK files in. Note :: The first VTK file contains scalar values different than background for a surface patch. The second VTK file contains the (entire) surface whose corresponding vertices are shifted in position. For pial vs. gray-white matter, the two surfaces are not parallel, so connecting the vertices leads to intersecting faces. Parameters ---------- patch_surface1 : string vtk file with surface patch of non-background scalar values whole_surface2 : string second vtk file with 1-to-1 vertex correspondence with patch_surface1 (whole surface so as to derive vertex neighbor lists) background_value : integer scalar value for background vertices output_vtk : string output vtk file name with closed surface patch Returns ------- output_vtk : string output vtk file name with closed surface patch Examples -------- >>> import os >>> from mindboggle.guts.morph import close_surface_pair_from_files >>> from mindboggle.mio.plots import plot_surfaces >>> from mindboggle.mio.vtks import read_scalars, read_vtk, read_points, write_vtk >>> path = os.environ['MINDBOGGLE_DATA'] >>> patch_surface1 = 'fold.pial.vtk' >>> whole_surface2 = 'fold.white.vtk' >>> # Select a single fold: >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> points = read_points(folds_file) >>> folds, name = read_scalars(folds_file, True, True) >>> fold_number = 11 >>> folds[folds != fold_number] = -1 >>> white_surface = os.path.join(path, 'arno', 'freesurfer', 'lh.white.vtk') >>> points2, indices, lines, faces, scalars, scalar_names, npoints, input_vtk = read_vtk(white_surface) >>> write_vtk(patch_surface1, points, [], [], faces, folds, name) >>> write_vtk(whole_surface2, points2, [], [], faces, folds, name) >>> background_value = -1 >>> output_vtk = '' >>> close_surface_pair_from_files(patch_surface1, whole_surface2, background_value, output_vtk) >>> # View: >>> plot_surfaces('closed.vtk') # doctest: +SKIP """ import os import numpy as np from mindboggle.mio.vtks import read_vtk, write_vtk from mindboggle.guts.morph import close_surface_pair # Read VTK surface mesh files: points1, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(patch_surface1, True, True) points2, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(whole_surface2, True, True) # Close surface: closed_faces, closed_points, closed_scalars = close_surface_pair( faces, points1, points2, scalars, background_value) # Write output file: if not output_vtk: output_vtk = os.path.join(os.getcwd(), 'closed.vtk') # closed_scalars is a list if np.ndim(closed_scalars) == 1: scalar_type = type(closed_scalars[0]).__name__ elif np.ndim(closed_scalars) == 2: scalar_type = type(closed_scalars[0][0]).__name__ else: print("Undefined scalar type!") write_vtk(output_vtk, closed_points, [], [], closed_faces, closed_scalars, scalar_names, scalar_type=scalar_type) return output_vtk
def relabel_surface( vtk_file, hemi="", old_labels=[], new_labels=[], erase_remaining=True, erase_labels=[], erase_value=-1, output_file="", ): """ Relabel surface in a VTK file. Parameters ---------- vtk_file : string input labeled VTK file hemi : string hemisphere ('lh' or 'rh' or '') if set, add 1000 to left and 2000 to right hemisphere labels; old_labels : list of integers old labels (empty list if labels drawn from vtk scalars); may be used in conjunction with hemi new_labels : list of integers new labels (empty list if labels drawn from vtk scalars); may be used in conjunction with hemi erase_remaining : Boolean set all values not in old_labels to erase_value? erase_labels : list of integers values to erase (set to erase_value) erase_value : integer set vertices with labels in erase_labels to this value output_file : string new vtk file name Returns ------- output_file : string new vtk file name Examples -------- >>> import os >>> from mindboggle.guts.relabel import relabel_surface >>> from mindboggle.mio.plots import plot_surfaces >>> path = os.environ['MINDBOGGLE_DATA'] >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk') >>> hemi = 'lh' >>> old_labels = [1003,1009,1030] >>> new_labels = [3,9,30] >>> erase_remaining = False >>> erase_labels = [0] >>> erase_value = -1 >>> output_file = '' >>> # >>> relabel_surface(vtk_file, hemi, old_labels, new_labels, erase_remaining, erase_labels, erase_value, output_file) >>> # View >>> plot_surfaces('relabeled_FreeSurfer_cortex_labels.vtk') >>> #plot_surfaces('relabeled_rh.labels.DKT31.manual.vtk') """ import os import numpy as np from mindboggle.mio.vtks import read_vtk, write_vtk # Load labeled vtk surfaces: faces, lines, indices, points, npoints, scalars, name, input_vtk = read_vtk( vtk_file, return_first=True, return_array=True ) new_scalars = scalars[:] # Raise an error if inputs set incorrectly: if ( (new_labels and not old_labels) or (hemi and hemi not in ["lh", "rh"]) or (new_labels and len(old_labels) != len(new_labels)) or (erase_remaining and not old_labels) ): raise IOError("Please check inputs for relabel_surface().") # Loop through unique labels in scalars: ulabels = np.unique(scalars) for label in ulabels: I = np.where(scalars == label)[0] # If label in erase_labels list, replace with erase_value: if label in erase_labels: new_scalars[I] = erase_value # If label in old_labels list, replace with corresponding new label, # and if hemi set, add 1000 or 2000 to the new label: elif label in old_labels and (len(old_labels) == len(new_labels)): new_label = new_labels[old_labels.index(label)] if hemi == "lh": new_scalars[I] = 1000 + new_label elif hemi == "rh": new_scalars[I] = 2000 + new_label else: new_scalars[I] = new_label # If labels not set then optionally add hemi value: elif hemi and not new_labels: if hemi == "lh": new_scalars[I] = 1000 + label elif hemi == "rh": new_scalars[I] = 2000 + label # If label unaccounted for and erase_remaining, set to erase_value: elif erase_remaining: new_scalars[I] = erase_value # Ensure that the new scalars are integer values: new_scalars = [int(x) for x in new_scalars] # Write output VTK file: if not output_file: output_file = os.path.join(os.getcwd(), "relabeled_" + os.path.basename(vtk_file)) write_vtk(output_file, points, indices, lines, faces, [new_scalars], ["Labels"], scalar_type="int") if not os.path.exists(output_file): s = "relabel_surface() did not create " + output_file + "." raise (IOError(s)) return output_file
def relabel_surface(vtk_file, hemi='', old_labels=[], new_labels=[], erase_remaining=True, erase_labels=[], erase_value=-1, output_file=''): """ Relabel surface in a VTK file. Parameters ---------- vtk_file : string input labeled VTK file hemi : string hemisphere ('lh' or 'rh' or '') if set, add 1000 to left and 2000 to right hemisphere labels; old_labels : list of integers old labels (empty list if labels drawn from vtk scalars); may be used in conjunction with hemi new_labels : list of integers new labels (empty list if labels drawn from vtk scalars); may be used in conjunction with hemi erase_remaining : bool set all values not in old_labels to erase_value? erase_labels : list of integers values to erase (set to erase_value) erase_value : integer set vertices with labels in erase_labels to this value output_file : string new vtk file name Returns ------- output_file : string new vtk file name Examples -------- >>> import numpy as np >>> from mindboggle.guts.relabel import relabel_surface >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_freesurfer_labels']) >>> hemi = 'lh' >>> old_labels = [1003,1009,1030] >>> new_labels = [0,500,1000] >>> erase_remaining = True >>> erase_labels = [0] >>> erase_value = -1 >>> output_file = '' >>> output_file = relabel_surface(vtk_file, hemi, old_labels, new_labels, ... erase_remaining, erase_labels, erase_value, output_file) >>> labels, name = read_scalars(output_file, True, True) >>> np.unique(labels) array([ -1, 1000, 1500, 2000]) View relabeled surface file (skip test): >>> from mindboggle.mio.plots import plot_surfaces >>> plot_surfaces(output_file) # doctest: +SKIP """ import os import numpy as np from mindboggle.mio.vtks import read_vtk, write_vtk # Load labeled vtk surfaces: points, indices, lines, faces, scalars, scalar_names, npoints, \ input_vtk = read_vtk(vtk_file, return_first=True, return_array=True) new_scalars = scalars[:] # Raise an error if inputs set incorrectly: if (new_labels and not old_labels) or \ (hemi and hemi not in ['lh','rh']) or \ (new_labels and len(old_labels) != len(new_labels)) or \ (erase_remaining and not old_labels): raise IOError("Please check inputs for relabel_surface().") # Loop through unique labels in scalars: ulabels = np.unique(scalars) for label in ulabels: I = np.where(scalars == label)[0] # If label in erase_labels list, replace with erase_value: if label in erase_labels: new_scalars[I] = erase_value # If label in old_labels list, replace with corresponding new label, # and if hemi set, add 1000 or 2000 to the new label: elif label in old_labels and (len(old_labels) == len(new_labels)): new_label = new_labels[old_labels.index(label)] if hemi == 'lh': new_scalars[I] = 1000 + new_label elif hemi == 'rh': new_scalars[I] = 2000 + new_label else: new_scalars[I] = new_label # If labels not set then optionally add hemi value: elif hemi and not new_labels: if hemi == 'lh': new_scalars[I] = 1000 + label elif hemi == 'rh': new_scalars[I] = 2000 + label # If label unaccounted for and erase_remaining, set to erase_value: elif erase_remaining: new_scalars[I] = erase_value # Ensure that the new scalars are integer values: new_scalars = [int(x) for x in new_scalars] # Write output VTK file: if not output_file: output_file = os.path.join(os.getcwd(), 'relabeled_' + os.path.basename(vtk_file)) write_vtk(output_file, points, indices, lines, faces, [new_scalars], ['Labels'], scalar_type='int') if not os.path.exists(output_file): raise IOError("relabel_surface() did not create " + output_file + ".") return output_file
def close_surface_pair_from_files(patch_surface1, whole_surface2, background_value=-1, output_vtk=''): """ Close a surface patch by connecting its border vertices with corresponding vertices in a second surface file. Assumes no lines or indices when reading VTK files in. Note :: The first VTK file contains scalar values different than background for a surface patch. The second VTK file contains the (entire) surface whose corresponding vertices are shifted in position. For pial vs. gray-white matter, the two surfaces are not parallel, so connecting the vertices leads to intersecting faces. Parameters ---------- patch_surface1 : string vtk file with surface patch of non-background scalar values whole_surface2 : string second vtk file with 1-to-1 vertex correspondence with patch_surface1 (whole surface so as to derive vertex neighbor lists) background_value : integer scalar value for background vertices output_vtk : string output vtk file name with closed surface patch Returns ------- output_vtk : string output vtk file name with closed surface patch Examples -------- >>> import os >>> from mindboggle.guts.morph import close_surface_pair_from_files >>> from mindboggle.mio.plots import plot_surfaces >>> from mindboggle.mio.vtks import read_scalars, read_vtk, read_points, write_vtk >>> path = os.environ['MINDBOGGLE_DATA'] >>> patch_surface1 = 'fold.pial.vtk' >>> whole_surface2 = 'fold.white.vtk' >>> # Select a single fold: >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> points = read_points(folds_file) >>> folds, name = read_scalars(folds_file, True, True) >>> fold_number = 11 >>> folds[folds != fold_number] = -1 >>> white_surface = os.path.join(path, 'arno', 'freesurfer', 'lh.white.vtk') >>> faces, u1, u2, points2, N, u3, u4, u5 = read_vtk(white_surface) >>> write_vtk(patch_surface1, points, [], [], faces, folds, name) >>> write_vtk(whole_surface2, points2, [], [], faces, folds, name) >>> background_value = -1 >>> output_vtk = '' >>> close_surface_pair_from_files(patch_surface1, whole_surface2, background_value, output_vtk) >>> # View: >>> plot_surfaces('closed.vtk') # doctest: +SKIP """ import os import numpy as np from mindboggle.mio.vtks import read_vtk, write_vtk from mindboggle.guts.morph import close_surface_pair # Read VTK surface mesh files: u1, u2, u3, points1, N, scalars, name, u4 = read_vtk(patch_surface1, True, True) faces, u1, u2, points2, N, u3, u4, u5 = read_vtk(whole_surface2, True, True) # Close surface: closed_faces, closed_points, closed_scalars = close_surface_pair(faces, points1, points2, scalars, background_value) # Write output file: if not output_vtk: output_vtk = os.path.join(os.getcwd(), 'closed.vtk') # closed_scalars is a list if np.ndim(closed_scalars) == 1: scalar_type = type(closed_scalars[0]).__name__ elif np.ndim(closed_scalars) == 2: scalar_type = type(closed_scalars[0][0]).__name__ else: print("Undefined scalar type!") write_vtk(output_vtk, closed_points, [], [], closed_faces, closed_scalars, name, scalar_type=scalar_type) return output_vtk