def plot_volumes(volume_files): """ Use fslview to visualize image volume data. Inputs ------ volume_files : list of strings names of image volume files Examples -------- >>> import os >>> from mindboggle.utils.plots import plot_volumes >>> path = os.environ['MINDBOGGLE_DATA'] >>> volume_file1 = os.path.join(path, 'arno', 'mri', 't1weighted.nii.gz') >>> volume_file2 = os.path.join(path, 'arno', 'mri', 't1weighted_brain.nii.gz') >>> volume_files = [volume_file1, volume_file2] >>> plot_volumes(volume_files) """ from mindboggle.utils.utils import execute if isinstance(volume_files, str): volume_files = [volume_files] elif not isinstance(volume_files, list): import sys sys.error('plot_volumes() requires volume_files to be a list or string.') cmd = ["fslview"] cmd.extend(volume_files) cmd.extend('&') execute(cmd, 'os')
def ComposeMultiTransform(transform_files, inverse_Booleans, output_transform_file='', ext='.txt'): """ Run ANTs ComposeMultiTransform function to create a single transform. Parameters ---------- transform_files : list of strings transform file names inverse_Booleans : list of Booleans Booleans to indicate which transforms to take the inverse of output_transform_file : string transform file name ext : string '.txt' to save transform file as text, '.mat' for data file Returns ------- output_transform_file : string single composed transform file name Examples -------- >>> from mindboggle.utils.ants import ComposeMultiTransform >>> transform_files = ['affine1.mat', 'affine2.mat'] >>> transform_files = ['/data/Brains/Mindboggle101/antsCorticalThickness/OASIS-TRT-20_volumes/OASIS-TRT-20-1/antsTemplateToSubject0GenericAffine.mat','/data/Brains/Mindboggle101/antsCorticalThickness/OASIS-TRT-20_volumes/OASIS-TRT-20-1/antsTemplateToSubject0GenericAffine.mat'] >>> inverse_Booleans = [False, False] >>> output_transform_file = '' >>> ext = '.txt' >>> ComposeMultiTransform(transform_files, inverse_Booleans, output_transform_file, ext) """ import os from mindboggle.utils.utils import execute if not output_transform_file: output_transform_file = os.path.join(os.getcwd(), 'affine' + ext) xfms = [] for ixfm, xfm in enumerate(transform_files): if inverse_Booleans[ixfm]: xfms.append('-i') xfms.append(xfm) cmd = ['ComposeMultiTransform 3', output_transform_file, ' '.join(xfms)] print(cmd) execute(cmd, 'os') #if not os.path.exists(output_transform_file): # raise(IOError(output_transform_file + " not found")) return output_transform_file
def ThresholdImage(volume, output_file='', threshlo=1, threshhi=10000): """ Use the ThresholdImage function in ANTS to threshold image volume:: Usage: ThresholdImage ImageDimension ImageIn.ext outImage.ext threshlo threshhi <insideValue> <outsideValue> Parameters ---------- volume : string nibabel-readable image volume output_file : string nibabel-readable image volume threshlo : integer lower threshold threshhi : integer upper threshold Returns ------- output_file : string name of output nibabel-readable image volume Examples -------- >>> import os >>> from mindboggle.utils.ants import ThresholdImage >>> from mindboggle.utils.plots import plot_volumes >>> path = os.path.join(os.environ['MINDBOGGLE_DATA']) >>> volume = os.path.join(path, 'arno', 'mri', 't1weighted.nii.gz') >>> output_file = '' >>> threshlo = 500 >>> threshhi = 10000 >>> output_file = ThresholdImage(volume, output_file, threshlo, threshhi) >>> # View >>> plot_volumes(output_file) """ import os from mindboggle.utils.utils import execute if not output_file: output_file = os.path.join(os.getcwd(), 'threshold_' + os.path.basename(volume)) cmd = 'ThresholdImage 3 {0} {1} {2} {3}'.format(volume, output_file, threshlo, threshhi) execute(cmd, 'os') if not os.path.exists(output_file): raise(IOError(output_file + " not found")) return output_file
def ThresholdImage(volume, output_file='', threshlo=1, threshhi=10000): """ Use the ThresholdImage function in ANTs to threshold image volume:: Usage: ThresholdImage ImageDimension ImageIn.ext outImage.ext threshlo threshhi <insideValue> <outsideValue> Parameters ---------- volume : string nibabel-readable image volume output_file : string nibabel-readable image volume threshlo : integer lower threshold threshhi : integer upper threshold Returns ------- output_file : string name of output nibabel-readable image volume Examples -------- >>> import os >>> from mindboggle.utils.ants import ThresholdImage >>> from mindboggle.utils.plots import plot_volumes >>> path = os.path.join(os.environ['MINDBOGGLE_DATA']) >>> volume = os.path.join(path, 'arno', 'mri', 't1weighted.nii.gz') >>> output_file = '' >>> threshlo = 500 >>> threshhi = 10000 >>> output_file = ThresholdImage(volume, output_file, threshlo, threshhi) >>> # View >>> plot_volumes(output_file) """ import os from mindboggle.utils.utils import execute if not output_file: output_file = os.path.join(os.getcwd(), 'threshold_' + os.path.basename(volume)) cmd = 'ThresholdImage 3 {0} {1} {2} {3}'.format(volume, output_file, threshlo, threshhi) execute(cmd, 'os') if not os.path.exists(output_file): raise (IOError("ThresholdImage did not create " + output_file + ".")) return output_file
def plot_vtk(vtk_file, mask_file='', masked_output=''): """ Use mayavi2 to visualize VTK surface mesh data. Inputs ------ vtk_file : string name of VTK surface mesh file Examples -------- >>> import os >>> from mindboggle.utils.plots import plot_vtk >>> path = os.environ['MINDBOGGLE_DATA'] >>> vtk_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk') >>> mask_file = os.path.join(path, 'arno', 'features', 'folds.vtk') >>> masked_output = '' >>> plot_vtk(vtk_file, mask_file, masked_output) """ from mindboggle.utils.utils import execute # Filter mesh with the non -1 values from a second (same-size) mesh: if mask_file: from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars scalars, name = read_scalars(vtk_file) mask, name = read_scalars(mask_file) if not masked_output: masked_output = 'temp.vtk' rewrite_scalars(vtk_file, masked_output, scalars, 'masked', mask) cmd = ["mayavi2", "-d", masked_output, "-m", "Surface"] else: cmd = ["mayavi2", "-d", vtk_file, "-m", "Surface"] cmd.extend('&') execute(cmd, 'os')
def plot_volumes(volume_files, command='fslview'): """ Use fslview to visualize image volume data. Parameters ---------- volume_files : list of strings names of image volume files command : string plotting software command Examples -------- >>> import os >>> from mindboggle.utils.plots import plot_volumes >>> path = os.environ['MINDBOGGLE_DATA'] >>> volume_file1 = os.path.join(path, 'Twins-2-1', 'mri', 't1weighted.nii.gz') >>> volume_file2 = os.path.join(path, 'Twins-2-1', 'mri', 't1weighted_brain.nii.gz') >>> volume_files = [volume_file1, volume_file2] >>> command = 'fslview' >>> command = '/Applications/ITK-SNAP.app/Contents/MacOS/InsightSNAP' >>> plot_volumes(volume_files, command=command) """ from mindboggle.utils.utils import execute if isinstance(volume_files, str): volume_files = [volume_files] elif not isinstance(volume_files, list): raise(IOError('plot_volumes() requires volume_files to be a list or string.')) if not isinstance(command, str): raise(IOError('plot_volumes() requires command to be a string.')) else: command = [command] command.extend(volume_files) command.extend('&') execute(command, 'os')
def plot_mask_surface(vtk_file, mask_file='', nonmask_value=-1, masked_output='', remove_nonmask=False, program='vtkviewer', use_colormap=False, colormap_file=''): """ Use vtkviewer or mayavi2 to visualize VTK surface mesh data. If a mask_file is provided, a temporary masked file is saved, and it is this file that is viewed. If using vtkviewer, can optionally provide colormap file or set $COLORMAP environment variable. Parameters ---------- vtk_file : string name of VTK surface mesh file mask_file : string name of VTK surface mesh file to mask vtk_file vertices nonmask_value : integer nonmask (usually background) value masked_output : string temporary masked output file name remove_nonmask : Boolean remove vertices that are not in mask? (otherwise assign nonmask_value) program : string {'vtkviewer', 'mayavi2'} program to visualize VTK file use_colormap : Boolean use Paraview-style XML colormap file set by $COLORMAP env variable? colormap_file : string use colormap in given file if use_colormap==True? if empty and use_colormap==True, use file set by $COLORMAP environment variable Examples -------- >>> import os >>> from mindboggle.utils.plots import plot_mask_surface >>> path = os.environ['MINDBOGGLE_DATA'] >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk') >>> mask_file = os.path.join(path, 'test_one_label.vtk') >>> nonmask_value = 0 #-1 >>> masked_output = '' >>> remove_nonmask = True >>> program = 'vtkviewer' >>> use_colormap = True >>> colormap_file = '' #'/software/mindboggle_tools/colormap.xml' >>> plot_mask_surface(vtk_file, mask_file, nonmask_value, masked_output, remove_nonmask, program, use_colormap, colormap_file) """ import os import numpy as np from mindboggle.utils.mesh import remove_faces, reindex_faces_points from mindboggle.utils.utils import execute from mindboggle.utils.plots import plot_surfaces from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars, \ read_vtk, write_vtk #------------------------------------------------------------------------- # Filter mesh with non-background values from a second (same-size) mesh: #------------------------------------------------------------------------- if mask_file: mask, name = read_scalars(mask_file, True, True) if not masked_output: masked_output = os.path.join(os.getcwd(), 'temp.vtk') file_to_plot = masked_output #--------------------------------------------------------------------- # Remove nonmask-valued vertices: #--------------------------------------------------------------------- if remove_nonmask: #----------------------------------------------------------------- # Load VTK files: #----------------------------------------------------------------- faces, lines, indices, points, npoints, scalars, scalar_names, \ o1 = read_vtk(vtk_file, True, True) #----------------------------------------------------------------- # Find mask indices, remove nonmask faces, and reindex: #----------------------------------------------------------------- Imask = [i for i,x in enumerate(mask) if x != nonmask_value] mask_faces = remove_faces(faces, Imask) mask_faces, points, \ original_indices = reindex_faces_points(mask_faces, points) #----------------------------------------------------------------- # Write VTK file with scalar values: #----------------------------------------------------------------- if np.ndim(scalars) == 1: scalar_type = type(scalars[0]).__name__ elif np.ndim(scalars) == 2: scalar_type = type(scalars[0][0]).__name__ else: print("Undefined scalar type!") write_vtk(file_to_plot, points, [], [], mask_faces, scalars[original_indices].tolist(), scalar_names, scalar_type=scalar_type) else: scalars, name = read_scalars(vtk_file, True, True) scalars[mask == nonmask_value] = nonmask_value rewrite_scalars(vtk_file, file_to_plot, scalars) else: file_to_plot = vtk_file #------------------------------------------------------------------------- # Display with vtkviewer.py: #------------------------------------------------------------------------- if program == 'vtkviewer': plot_surfaces(file_to_plot, use_colormap=use_colormap, colormap_file=colormap_file) #------------------------------------------------------------------------- # Display with mayavi2: #------------------------------------------------------------------------- elif program == 'mayavi2': cmd = ["mayavi2", "-d", file_to_plot, "-m", "Surface", "&"] execute(cmd, 'os')
def thickinthehead(segmented_file, labeled_file, cortex_value=2, noncortex_value=3, labels=[], names=[], resize=True, propagate=True, output_dir='', save_table=False): """ Compute a simple thickness measure for each labeled cortex region. Note:: - Cortex, noncortex, and labeled files are the same coregistered brain. - Calls ANTs functions: ImageMath, Threshold, ResampleImageBySpacing Example preprocessing steps :: 1. Run Freesurfer and antsCorticalThickness.sh on T1-weighted image. 2. Convert FreeSurfer volume labels (e.g., wmparc.mgz or aparc+aseg.mgz) to cortex (2) and noncortex (3) segments using relabel_volume() function [refer to LABELS.py or FreeSurferColorLUT labels file]. 3. Convert ANTs Atropos-segmented volume (tmpBrainSegmentation.nii.gz) to cortex and noncortex segments, by converting 1-labels to 0 and 4-labels to 3 with the relabel_volume() function (the latter is to include deep-gray matter with noncortical tissues). 4. Combine FreeSurfer and ANTs segmentation volumes to obtain a single cortex (2) and noncortex (3) segmentation file using the function combine_2labels_in_2volumes(). This function takes the union of cortex voxels from the segmentations, the union of the noncortex voxels from the segmentations, and overwrites intersecting cortex and noncortex voxels with noncortex (3) labels. ANTs tends to include more cortical gray matter at the periphery of the brain than Freesurfer, and FreeSurfer tends to include more white matter that extends deep into gyral folds than ANTs, so the above attempts to remedy their differences by overlaying ANTs cortical gray with FreeSurfer white matter. 5. Optional, see Step 2 below: Fill segmented cortex with cortex labels and noncortex with noncortex labels using the PropagateLabelsThroughMask() function (which calls ImageMath ... PropagateLabelsThroughMask in ANTs). The labels can be initialized using FreeSurfer (e.g. wmparc.mgz) or ANTs (by applying the nonlinear inverse transform generated by antsCorticalThickness.sh to labels in the Atropos template space). [Note: Any further labeling steps may be applied, such as overwriting cerebrum with intersecting cerebellum labels.] Steps :: 1. Extract noncortex and cortex. 2. Either mask labels with cortex or fill cortex with labels. 3. Resample cortex and noncortex files from 1x1x1 to 0.5x0.5x0.5 to better represent the contours of the boundaries of the cortex. 4. Extract outer and inner boundary voxels of the cortex, by eroding 1 (resampled) voxel for cortex voxels (2) bordering the outside of the brain (0) and bordering noncortex (3). 5. Estimate middle cortical surface area by the average volume of the outer and inner boundary voxels of the cortex. 6. Compute the volume of a labeled region of cortex. 7. Estimate the thickness of the labeled cortical region as the volume of the labeled region (#6) divided by the surface area (#5). Parameters ---------- segmented_file : string image volume with cortex and noncortex (and any other) labels labeled_file : string corresponding image volume with index labels cortex_value : integer cortex label value in segmented_file noncortex_value : integer noncortex label value in segmented_file labels : list of integers label indices names : list of strings label names resize : Boolean resize (2x) segmented_file for more accurate thickness estimates? propagate : Boolean propagate labels through cortex? output_dir : string output directory save_table : Boolean save output table file with labels and thickness values? Returns ------- label_volume_thickness : list of lists of integers and floats label indices, volumes, and thickness values (default -1) output_table : string name of output thickness table file (if save_table==True) Examples -------- >>> from mindboggle.utils.ants import thickinthehead >>> segmented_file = '/Users/arno/Data/antsCorticalThickness/OASIS-TRT-20-1/tmp23314/tmpBrainSegmentation.nii.gz' >>> labeled_file = '/appsdir/freesurfer/subjects/OASIS-TRT-20-1/mri/labels.DKT31.manual.nii.gz' >>> cortex_value = 2 >>> noncortex_value = 3 >>> #labels = [2] >>> labels = range(1002,1036) + range(2002,2036) >>> labels.remove(1004) >>> labels.remove(2004) >>> labels.remove(1032) >>> labels.remove(2032) >>> labels.remove(1033) >>> labels.remove(2033) >>> names = [] >>> resize = True >>> propagate = False >>> output_dir = '' >>> save_table = True >>> label_volume_thickness, output_table = thickinthehead(segmented_file, labeled_file, cortex_value, noncortex_value, labels, names, resize, propagate, output_dir, save_table) """ import os import numpy as np import nibabel as nb from mindboggle.utils.utils import execute #------------------------------------------------------------------------- # Output files: #------------------------------------------------------------------------- if output_dir: if not os.path.exists(output_dir): os.mkdir(output_dir) else: output_dir = os.getcwd() cortex = os.path.join(output_dir, 'cortex.nii.gz') noncortex = os.path.join(output_dir, 'noncortex.nii.gz') temp = os.path.join(output_dir, 'temp.nii.gz') inner_edge = os.path.join(output_dir, 'cortex_inner_edge.nii.gz') use_outer_edge = True if use_outer_edge: outer_edge = os.path.join(output_dir, 'cortex_outer_edge.nii.gz') if save_table: output_table = os.path.join(os.getcwd(), 'thickinthehead.csv') fid = open(output_table, 'w') if names: fid.write("Label name, Label number, Volume, " "Thickness (thickinthehead)\n") else: fid.write("Label number, Volume, Thickness (thickinthehead)\n") else: output_table = '' #------------------------------------------------------------------------- # Extract noncortex and cortex: #------------------------------------------------------------------------- cmd = ['ThresholdImage 3', segmented_file, noncortex, str(noncortex_value), str(noncortex_value), '1 0'] execute(cmd) cmd = ['ThresholdImage 3', segmented_file, cortex, str(cortex_value), str(cortex_value), '1 0'] execute(cmd) #------------------------------------------------------------------------- # Either mask labels with cortex or fill cortex with labels: #------------------------------------------------------------------------- if propagate: cmd = ['ImageMath', '3', cortex, 'PropagateLabelsThroughMask', cortex, labeled_file] execute(cmd) else: cmd = ['ImageMath 3', cortex, 'm', cortex, labeled_file] execute(cmd) #------------------------------------------------------------------------- # Resample cortex and noncortex files from 1x1x1 to 0.5x0.5x0.5 # to better represent the contours of the boundaries of the cortex: #------------------------------------------------------------------------- if resize: rescale = 2.0 dims = ' '.join([str(1/rescale), str(1/rescale), str(1/rescale)]) cmd = ['ResampleImageBySpacing 3', cortex, cortex, dims, '0 0 1'] execute(cmd) cmd = ['ResampleImageBySpacing 3', noncortex, noncortex, dims, '0 0 1'] execute(cmd) #------------------------------------------------------------------------- # Extract outer and inner boundary voxels of the cortex, # by eroding 1 (resampled) voxel for cortex voxels (2) bordering # the outside of the brain (0) and bordering noncortex (3): #------------------------------------------------------------------------- cmd = ['ImageMath 3', inner_edge, 'MD', noncortex, '1'] execute(cmd) cmd = ['ImageMath 3', inner_edge, 'm', cortex, inner_edge] execute(cmd) if use_outer_edge: cmd = ['ThresholdImage 3', cortex, outer_edge, '1 10000 1 0'] execute(cmd) cmd = ['ImageMath 3', outer_edge, 'ME', outer_edge, '1'] execute(cmd) cmd = ['ThresholdImage 3', outer_edge, outer_edge, '1 1 0 1'] execute(cmd) cmd = ['ImageMath 3', outer_edge, 'm', cortex, outer_edge] execute(cmd) cmd = ['ThresholdImage 3', inner_edge, temp, '1 10000 1 0'] execute(cmd) cmd = ['ThresholdImage 3', temp, temp, '1 1 0 1'] execute(cmd) cmd = ['ImageMath 3', outer_edge, 'm', temp, outer_edge] execute(cmd) #------------------------------------------------------------------------- # Load data: #------------------------------------------------------------------------- compute_real_volume = True if compute_real_volume: img = nb.load(cortex) hdr = img.get_header() vv = np.prod(hdr.get_zooms()) cortex_data = img.get_data().ravel() else: vv = 1 cortex_data = nb.load(cortex).get_data().ravel() inner_edge_data = nb.load(inner_edge).get_data().ravel() if use_outer_edge: outer_edge_data = nb.load(outer_edge).get_data().ravel() #------------------------------------------------------------------------- # Loop through labels: #------------------------------------------------------------------------- if not labels: labeled_data = nb.load(labeled_file).get_data().ravel() labels = np.unique(labeled_data) labels = [int(x) for x in labels] label_volume_thickness = -1 * np.ones((len(labels), 3)) label_volume_thickness[:, 0] = labels for ilabel, label in enumerate(labels): if names: name = names[ilabel] #--------------------------------------------------------------------- # Compute thickness as a ratio of label volume and edge volume: # - Estimate middle cortical surface area by the average volume # of the outer and inner boundary voxels of the cortex. # - Compute the volume of a labeled region of cortex. # - Estimate the thickness of the labeled cortical region as the # volume of the labeled region divided by the surface area. #--------------------------------------------------------------------- label_cortex_volume = vv * len(np.where(cortex_data==label)[0]) label_inner_edge_volume = vv * len(np.where(inner_edge_data==label)[0]) if label_inner_edge_volume: if use_outer_edge: label_outer_edge_volume = \ vv * len(np.where(outer_edge_data==label)[0]) label_area = (label_inner_edge_volume + label_outer_edge_volume) / 2.0 else: label_area = label_inner_edge_volume thickness = label_cortex_volume / label_area label_volume_thickness[ilabel, 1] = label_cortex_volume label_volume_thickness[ilabel, 2] = thickness #print('label {0} volume: cortex={1:2.2f}, inner={2:2.2f}, ' # 'outer={3:2.2f}, area51={4:2.2f}, thickness={5:2.2f}mm'. # format(name, label, label_cortex_volume, label_inner_edge_volume, # label_outer_edge_volume, label_area, thickness)) if names: print('{0} ({1}) volume={2:2.2f}, thickness={3:2.2f}mm'. format(name, label, label_cortex_volume, thickness)) else: print('{0}, volume={2:2.2f}, thickness={3:2.2f}mm'. format(label, label_cortex_volume, thickness)) if save_table: if names: fid.write('{0}, {1}, {2:2.4f}, {3:2.4f}\n'.format(name, label, label_cortex_volume, thickness)) else: fid.write('{0}, {1:2.4f}, {2:2.4f}\n'.format(label, label_cortex_volume, thickness)) label_volume_thickness = label_volume_thickness.transpose().tolist() return label_volume_thickness, output_table
def PropagateLabelsThroughMask(mask, labels, mask_index=None, output_file='', binarize=True, stopvalue=''): """ Use ANTs to fill a binary volume mask with initial labels. This program uses ThresholdImage and the ImageMath PropagateLabelsThroughMask functions in ANTS. ThresholdImage ImageDimension ImageIn.ext outImage.ext threshlo threshhi <insideValue> <outsideValue> PropagateLabelsThroughMask: Final output is the propagated label image. ImageMath ImageDimension Out.ext PropagateLabelsThroughMask speed/binaryimagemask.nii.gz initiallabelimage.nii.gz ... Parameters ---------- mask : string nibabel-readable image volume labels : string nibabel-readable image volume with integer labels mask_index : integer (optional) mask with just voxels having this value output_file : string nibabel-readable labeled image volume binarize : Boolean binarize mask? stopvalue : integer stopping value Returns ------- output_file : string name of labeled output nibabel-readable image volume Examples -------- >>> import os >>> from mindboggle.utils.ants import PropagateLabelsThroughMask >>> from mindboggle.utils.plots import plot_volumes >>> path = os.path.join(os.environ['MINDBOGGLE_DATA']) >>> labels = os.path.join(path, 'arno', 'labels', 'labels.DKT25.manual.nii.gz') >>> mask = os.path.join(path, 'arno', 'mri', 't1weighted_brain.nii.gz') >>> mask_index = None >>> output_file = '' >>> binarize = True >>> stopvalue = None >>> output_file = PropagateLabelsThroughMask(mask, labels, mask_index, output_file, binarize, stopvalue) >>> # View >>> plot_volumes(output_file) """ import os from mindboggle.utils.utils import execute if not output_file: output_file = os.path.join(os.getcwd(), 'PropagateLabelsThroughMask.nii.gz') output_file = os.path.join(os.getcwd(), os.path.basename(labels) + '_through_' + os.path.basename(mask)) print('mask: {0}, labels: {1}'.format(mask, labels)) # Binarize image volume: if binarize: temp_file = os.path.join(os.getcwd(), 'PropagateLabelsThroughMask.nii.gz') cmd = ['ThresholdImage', '3', mask, temp_file, '0 1 0 1'] execute(cmd, 'os') mask = temp_file # Mask with just voxels having mask_index value: if mask_index: mask2 = os.path.join(os.getcwd(), 'temp.nii.gz') cmd = 'ThresholdImage 3 {0} {1} {2} {3} 1 0'.format(mask, mask2, mask_index, mask_index) execute(cmd) else: mask2 = mask # Propagate labels: cmd = ['ImageMath', '3', output_file, 'PropagateLabelsThroughMask', mask2, labels] if stopvalue: cmd.extend(stopvalue) execute(cmd, 'os') if not os.path.exists(output_file): raise(IOError(output_file + " not found")) return output_file
def ImageMath(volume1, volume2, operator='m', output_file=''): """ Use the ImageMath function in ANTs to perform operation on two volumes:: m : Multiply --- use vm for vector multiply + : Add --- use v+ for vector add - : Subtract --- use v- for vector subtract / : Divide ^ : Power exp : Take exponent exp(imagevalue*value) addtozero : add image-b to image-a only over points where image-a has zero values overadd : replace image-a pixel with image-b pixel if image-b pixel is non-zero abs : absolute value total : Sums up values in an image or in image1*image2 (img2 is the probability mask) mean : Average of values in an image or in image1*image2 (img2 is the probability mask) vtotal : Sums up volumetrically weighted values in an image or in image1*image2 (img2 is the probability mask) Decision : Computes result=1./(1.+exp(-1.0*( pix1-0.25)/pix2)) Neg : Produce image negative Parameters ---------- volume1 : string nibabel-readable image volume volume2 : string nibabel-readable image volume operator : string ImageMath string corresponding to mathematical operator output_file : string nibabel-readable image volume Returns ------- output_file : string name of output nibabel-readable image volume Examples -------- >>> import os >>> from mindboggle.utils.ants import ImageMath >>> from mindboggle.utils.plots import plot_volumes >>> path = os.path.join(os.environ['MINDBOGGLE_DATA']) >>> volume1 = os.path.join(path, 'arno', 'mri', 't1weighted.nii.gz') >>> volume2 = os.path.join(path, 'arno', 'mri', 'mask.nii.gz') >>> operator = 'm' >>> output_file = '' >>> output_file = ImageMath(volume1, volume2, operator, output_file) >>> # View >>> plot_volumes(output_file) """ import os from mindboggle.utils.utils import execute if not output_file: output_file = os.path.join( os.getcwd(), os.path.basename(volume1) + '_' + os.path.basename(volume2)) cmd = ['ImageMath', '3', output_file, operator, volume1, volume2] execute(cmd, 'os') if not os.path.exists(output_file): raise (IOError("ImageMath did not create " + output_file + ".")) return output_file
def ImageMath(volume1, volume2, operator='m', output_file=''): """ Use the ImageMath function in ANTS to perform operation on two volumes:: m : Multiply --- use vm for vector multiply + : Add --- use v+ for vector add - : Subtract --- use v- for vector subtract / : Divide ^ : Power exp : Take exponent exp(imagevalue*value) addtozero : add image-b to image-a only over points where image-a has zero values overadd : replace image-a pixel with image-b pixel if image-b pixel is non-zero abs : absolute value total : Sums up values in an image or in image1*image2 (img2 is the probability mask) mean : Average of values in an image or in image1*image2 (img2 is the probability mask) vtotal : Sums up volumetrically weighted values in an image or in image1*image2 (img2 is the probability mask) Decision : Computes result=1./(1.+exp(-1.0*( pix1-0.25)/pix2)) Neg : Produce image negative Parameters ---------- volume1 : string nibabel-readable image volume volume2 : string nibabel-readable image volume operator : string ImageMath string corresponding to mathematical operator output_file : string nibabel-readable image volume Returns ------- output_file : string name of output nibabel-readable image volume Examples -------- >>> import os >>> from mindboggle.utils.ants import ImageMath >>> from mindboggle.utils.plots import plot_volumes >>> path = os.path.join(os.environ['MINDBOGGLE_DATA']) >>> volume1 = os.path.join(path, 'arno', 'mri', 't1weighted.nii.gz') >>> volume2 = os.path.join(path, 'arno', 'mri', 'mask.nii.gz') >>> operator = 'm' >>> output_file = '' >>> output_file = ImageMath(volume1, volume2, operator, output_file) >>> # View >>> plot_volumes(output_file) """ import os from mindboggle.utils.utils import execute if not output_file: output_file = os.path.join(os.getcwd(), os.path.basename(volume1) + '_' + os.path.basename(volume2)) cmd = ['ImageMath', '3', output_file, operator, volume1, volume2] execute(cmd, 'os') if not os.path.exists(output_file): raise(IOError(output_file + " not found")) return output_file
def antsApplyTransformsToPoints(points, transform_files, inverse_booleans=[0]): """ Run ANTs antsApplyTransformsToPoints function to transform points. (Creates pre- and post-transformed .csv points files for ANTs.) Parameters ---------- points : list of lists of three integers point coordinate data transform_files : list transform file names inverse_booleans : list for each transform, one to apply inverse of transform (otherwise zero) Returns ------- transformed_points : list of lists of three integers transformed point coordinate data Examples -------- >>> from mindboggle.utils.ants import antsApplyTransformsToPoints >>> from mindboggle.utils.io_vtk import read_vtk >>> transform_files = ['/Users/arno/mindboggle_working/OASIS-TRT-20-1/Mindboggle/Compose_affine_transform/affine.txt'] >>> vtk_file = '/Users/arno/mindboggle_working/OASIS-TRT-20-1/Mindboggle/_hemi_lh/Surface_to_vtk/lh.pial.vtk' >>> faces, lines, indices, points, npoints, scalars, name, foo1 = read_vtk(vtk_file) >>> inverse_booleans = [0] >>> transformed_points = antsApplyTransformsToPoints(points, transform_files, inverse_booleans) """ import os from mindboggle.utils.utils import execute #------------------------------------------------------------------------- # Write points (x,y,z,1) to a .csv file: #------------------------------------------------------------------------- points_file = os.path.join(os.getcwd(), 'points.csv') fid = open(points_file, 'wa') fid.write('x,y,z\n') for point in points: fid.write(','.join([str(x) for x in point]) + '\n') fid.close() #------------------------------------------------------------------------- # Apply transforms to points in .csv file: #------------------------------------------------------------------------- transformed_points_file = os.path.join(os.getcwd(), 'transformed_points.csv') transform_string = '' for ixfm, transform_file in enumerate(transform_files): transform_string += "--t [{0},{1}]".format(transform_file, str(inverse_booleans[ixfm])) cmd = ['antsApplyTransformsToPoints', '-d', '3', '-i', points_file, '-o', transformed_points_file, transform_string] execute(cmd, 'os') if not os.path.exists(transformed_points_file): raise(IOError(transformed_points_file + " not found")) #------------------------------------------------------------------------- # Return transformed points: #------------------------------------------------------------------------- fid = open(transformed_points_file, 'r') lines = fid.readlines() fid.close() transformed_points = [] for iline, line in enumerate(lines): if iline > 0: point_xyz1 = [float(x) for x in line.split(',')] transformed_points.append(point_xyz1[0:3]) return transformed_points
def PropagateLabelsThroughMask(mask_volume, label_volume, output_file='', binarize=True): """ Use ANTs to fill a binary volume mask with initial labels. This program uses ThresholdImage and the ImageMath PropagateLabelsThroughMask functions in ANTS. ThresholdImage ImageDimension ImageIn.ext outImage.ext threshlo threshhi <insideValue> <outsideValue> PropagateLabelsThroughMask: Final output is the propagated label image. ImageMath ImageDimension Out.ext PropagateLabelsThroughMask speed/binaryimagemask.nii.gz initiallabelimage.nii.gz ... Parameters ---------- label_volume : string nibabel-readable image volume with integer labels mask_volume : string nibabel-readable image volume output_file : string nibabel-readable labeled image volume binarize : Boolean binarize mask_volume? Returns ------- output_file : string name of labeled output nibabel-readable image volume Examples -------- >>> import os >>> from mindboggle.utils.ants import PropagateLabelsThroughMask >>> from mindboggle.utils.plots import plot_volumes >>> path = os.path.join(os.environ['MINDBOGGLE_DATA']) >>> label_volume = os.path.join(path, 'arno', 'labels', 'labels.DKT25.manual.nii.gz') >>> mask_volume = os.path.join(path, 'arno', 'mri', 't1weighted_brain.nii.gz') >>> output_file = '' >>> binarize = True >>> output_file = PropagateLabelsThroughMask(mask_volume, label_volume, >>> output_file, binarize) >>> # View >>> plot_volumes(output_file) """ import os from mindboggle.utils.utils import execute if not output_file: output_file = os.path.join(os.getcwd(), 'propagated_labels.nii.gz') # Binarize image volume: if binarize: temp_file = os.path.join(os.getcwd(), 'propagated_labels.nii.gz') cmd = ['ThresholdImage', '3', mask_volume, temp_file, '0 1 0 1'] execute(cmd, 'os') mask_volume = temp_file # Propagate labels: cmd = ['ImageMath', '3', output_file, 'PropagateLabelsThroughMask', mask_volume, label_volume] execute(cmd, 'os') if not os.path.exists(output_file): raise(IOError(output_file + " not found")) return output_file
def thickinthehead(segmented_file, labeled_file, cortex_value=2, noncortex_value=3, labels=[], names=[], resize=True, propagate=True, output_dir='', save_table=False, output_table=''): """ Compute a simple thickness measure for each labeled cortex region. Note:: - Cortex, noncortex, & label files are from the same coregistered brain. - Calls ANTs functions: ImageMath, Threshold, ResampleImageBySpacing - There may be slight discrepancies between volumes computed by thickinthehead() and volumes computed by volume_per_label(); in 31 of 600+ ADNI 1.5T images, some volume_per_label() volumes were slightly larger (in the third decimal place), presumably due to label propagation through the cortex. Example preprocessing steps :: 1. Run Freesurfer and antsCorticalThickness.sh on T1-weighted image. 2. Convert FreeSurfer volume labels (e.g., wmparc.mgz or aparc+aseg.mgz) to cortex (2) and noncortex (3) segments using relabel_volume() function [refer to LABELS.py or FreeSurferColorLUT labels file]. 3. Convert ANTs Atropos-segmented volume (tmpBrainSegmentation.nii.gz) to cortex and noncortex segments, by converting 1-labels to 0 and 4-labels to 3 with the relabel_volume() function (the latter is to include deep-gray matter with noncortical tissues). 4. Combine FreeSurfer and ANTs segmentation volumes to obtain a single cortex (2) and noncortex (3) segmentation file using the function combine_2labels_in_2volumes(). This function takes the union of cortex voxels from the segmentations, the union of the noncortex voxels from the segmentations, and overwrites intersecting cortex and noncortex voxels with noncortex (3) labels. ANTs tends to include more cortical gray matter at the periphery of the brain than Freesurfer, and FreeSurfer tends to include more white matter that extends deep into gyral folds than ANTs, so the above attempts to remedy their differences by overlaying ANTs cortical gray with FreeSurfer white matter. 5. Optional, see Step 2 below: Fill segmented cortex with cortex labels and noncortex with noncortex labels using the PropagateLabelsThroughMask() function (which calls ImageMath ... PropagateLabelsThroughMask in ANTs). The labels can be initialized using FreeSurfer (e.g. wmparc.mgz) or ANTs (by applying the nonlinear inverse transform generated by antsCorticalThickness.sh to labels in the Atropos template space). [Note: Any further labeling steps may be applied, such as overwriting cerebrum with intersecting cerebellum labels.] Steps :: 1. Extract noncortex and cortex. 2. Either mask labels with cortex or fill cortex with labels. 3. Resample cortex and noncortex files from 1x1x1 to 0.5x0.5x0.5 to better represent the contours of the boundaries of the cortex. 4. Extract outer and inner boundary voxels of the cortex, by eroding 1 (resampled) voxel for cortex voxels (2) bordering the outside of the brain (0) and bordering noncortex (3). 5. Estimate middle cortical surface area by the average volume of the outer and inner boundary voxels of the cortex. 6. Compute the volume of a labeled region of cortex. 7. Estimate the thickness of the labeled cortical region as the volume of the labeled region (#6) divided by the surface area (#5). Parameters ---------- segmented_file : string image volume with cortex and noncortex (and any other) labels labeled_file : string corresponding image volume with index labels cortex_value : integer cortex label value in segmented_file noncortex_value : integer noncortex label value in segmented_file labels : list of integers label indices names : list of strings label names resize : Boolean resize (2x) segmented_file for more accurate thickness estimates? propagate : Boolean propagate labels through cortex? output_dir : string output directory save_table : Boolean save output table file with label volumes and thickness values? output_table : string name of output table file with label volumes and thickness values Returns ------- label_volume_thickness : list of lists of integers and floats label indices, volumes, and thickness values (default -1) output_table : string name of output table file with label volumes and thickness values Examples -------- >>> from mindboggle.utils.ants import thickinthehead >>> segmented_file = '/Users/arno/Data/antsCorticalThickness/OASIS-TRT-20-1/antsBrainSegmentation.nii.gz' >>> labeled_file = '/appsdir/freesurfer/subjects/OASIS-TRT-20-1/mri/labels.DKT31.manual.nii.gz' >>> cortex_value = 2 >>> noncortex_value = 3 >>> #labels = [2] >>> labels = range(1002,1036) + range(2002,2036) >>> labels.remove(1004) >>> labels.remove(2004) >>> labels.remove(1032) >>> labels.remove(2032) >>> labels.remove(1033) >>> labels.remove(2033) >>> names = [] >>> resize = True >>> propagate = False >>> output_dir = '' >>> save_table = True >>> output_table = '' >>> label_volume_thickness, output_table = thickinthehead(segmented_file, labeled_file, cortex_value, noncortex_value, labels, names, resize, propagate, output_dir, save_table, output_table) """ import os import numpy as np import nibabel as nb from mindboggle.utils.utils import execute #------------------------------------------------------------------------- # Output files: #------------------------------------------------------------------------- if output_dir: if not os.path.exists(output_dir): os.mkdir(output_dir) else: output_dir = os.getcwd() cortex = os.path.join(output_dir, 'cortex.nii.gz') noncortex = os.path.join(output_dir, 'noncortex.nii.gz') temp = os.path.join(output_dir, 'temp.nii.gz') inner_edge = os.path.join(output_dir, 'cortex_inner_edge.nii.gz') use_outer_edge = True if use_outer_edge: outer_edge = os.path.join(output_dir, 'cortex_outer_edge.nii.gz') if save_table: if output_table: output_table = os.path.join(os.getcwd(), output_table) else: output_table = os.path.join(os.getcwd(), 'thickinthehead_per_label.csv') fid = open(output_table, 'w') if names: fid.write( "Label name,Label number,Volume,Thickness (thickinthehead)\n") else: fid.write("Label number,Volume,Thickness (thickinthehead)\n") else: output_table = '' #------------------------------------------------------------------------- # Extract noncortex and cortex: #------------------------------------------------------------------------- cmd = [ 'ThresholdImage 3', segmented_file, noncortex, str(noncortex_value), str(noncortex_value), '1 0' ] execute(cmd) cmd = [ 'ThresholdImage 3', segmented_file, cortex, str(cortex_value), str(cortex_value), '1 0' ] execute(cmd) #------------------------------------------------------------------------- # Either mask labels with cortex or fill cortex with labels: #------------------------------------------------------------------------- if propagate: cmd = [ 'ImageMath', '3', cortex, 'PropagateLabelsThroughMask', cortex, labeled_file ] execute(cmd) else: cmd = ['ImageMath 3', cortex, 'm', cortex, labeled_file] execute(cmd) #------------------------------------------------------------------------- # Load data and dimensions: #------------------------------------------------------------------------- if resize: rescale = 2.0 else: rescale = 1.0 compute_real_volume = True if compute_real_volume: img = nb.load(cortex) hdr = img.get_header() vv_orig = np.prod(hdr.get_zooms()) vv = np.prod([x / rescale for x in hdr.get_zooms()]) cortex_data = img.get_data().ravel() else: vv = 1 / rescale cortex_data = nb.load(cortex).get_data().ravel() #------------------------------------------------------------------------- # Resample cortex and noncortex files from 1x1x1 to 0.5x0.5x0.5 # to better represent the contours of the boundaries of the cortex: #------------------------------------------------------------------------- if resize: dims = ' '.join([str(1 / rescale), str(1 / rescale), str(1 / rescale)]) cmd = ['ResampleImageBySpacing 3', cortex, cortex, dims, '0 0 1'] execute(cmd) cmd = ['ResampleImageBySpacing 3', noncortex, noncortex, dims, '0 0 1'] execute(cmd) #------------------------------------------------------------------------- # Extract outer and inner boundary voxels of the cortex, # by eroding 1 (resampled) voxel for cortex voxels (2) bordering # the outside of the brain (0) and bordering noncortex (3): #------------------------------------------------------------------------- cmd = ['ImageMath 3', inner_edge, 'MD', noncortex, '1'] execute(cmd) cmd = ['ImageMath 3', inner_edge, 'm', cortex, inner_edge] execute(cmd) if use_outer_edge: cmd = ['ThresholdImage 3', cortex, outer_edge, '1 10000 1 0'] execute(cmd) cmd = ['ImageMath 3', outer_edge, 'ME', outer_edge, '1'] execute(cmd) cmd = ['ThresholdImage 3', outer_edge, outer_edge, '1 1 0 1'] execute(cmd) cmd = ['ImageMath 3', outer_edge, 'm', cortex, outer_edge] execute(cmd) cmd = ['ThresholdImage 3', inner_edge, temp, '1 10000 1 0'] execute(cmd) cmd = ['ThresholdImage 3', temp, temp, '1 1 0 1'] execute(cmd) cmd = ['ImageMath 3', outer_edge, 'm', temp, outer_edge] execute(cmd) #------------------------------------------------------------------------- # Load data: #------------------------------------------------------------------------- inner_edge_data = nb.load(inner_edge).get_data().ravel() if use_outer_edge: outer_edge_data = nb.load(outer_edge).get_data().ravel() #------------------------------------------------------------------------- # Loop through labels: #------------------------------------------------------------------------- if not labels: labeled_data = nb.load(labeled_file).get_data().ravel() labels = np.unique(labeled_data) labels = [int(x) for x in labels] label_volume_thickness = -1 * np.ones((len(labels), 3)) label_volume_thickness[:, 0] = labels for ilabel, label in enumerate(labels): if names: name = names[ilabel] #--------------------------------------------------------------------- # Compute thickness as a ratio of label volume and edge volume: # - Estimate middle cortical surface area by the average volume # of the outer and inner boundary voxels of the cortex. # - Compute the volume of a labeled region of cortex. # - Estimate the thickness of the labeled cortical region as the # volume of the labeled region divided by the surface area. #--------------------------------------------------------------------- label_cortex_volume = vv_orig * len(np.where(cortex_data == label)[0]) label_inner_edge_volume = vv * len( np.where(inner_edge_data == label)[0]) if label_inner_edge_volume: if use_outer_edge: label_outer_edge_volume = \ vv * len(np.where(outer_edge_data==label)[0]) label_area = (label_inner_edge_volume + label_outer_edge_volume) / 2.0 else: label_area = label_inner_edge_volume thickness = label_cortex_volume / label_area label_volume_thickness[ilabel, 1] = label_cortex_volume label_volume_thickness[ilabel, 2] = thickness #print('label {0} volume: cortex={1:2.2f}, inner={2:2.2f}, ' # 'outer={3:2.2f}, area51={4:2.2f}, thickness={5:2.2f}mm'. # format(name, label, label_cortex_volume, label_inner_edge_volume, # label_outer_edge_volume, label_area, thickness)) if names: print('{0} ({1}) volume={2:2.2f}, thickness={3:2.2f}mm'.format( name, label, label_cortex_volume, thickness)) else: print('{0}, volume={2:2.2f}, thickness={3:2.2f}mm'.format( label, label_cortex_volume, thickness)) if save_table: if names: fid.write('{0}, {1}, {2:2.4f}, {3:2.4f}\n'.format( name, label, label_cortex_volume, thickness)) else: fid.write('{0}, {1:2.4f}, {2:2.4f}\n'.format( label, label_cortex_volume, thickness)) label_volume_thickness = label_volume_thickness.transpose().tolist() return label_volume_thickness, output_table
def PropagateLabelsThroughMask(mask, labels, mask_index=None, output_file='', binarize=True, stopvalue=''): """ Use ANTs to fill a binary volume mask with initial labels. This program uses ThresholdImage and the ImageMath PropagateLabelsThroughMask functions in ANTs. ThresholdImage ImageDimension ImageIn.ext outImage.ext threshlo threshhi <insideValue> <outsideValue> PropagateLabelsThroughMask: Final output is the propagated label image. ImageMath ImageDimension Out.ext PropagateLabelsThroughMask speed/binaryimagemask.nii.gz initiallabelimage.nii.gz ... Parameters ---------- mask : string nibabel-readable image volume labels : string nibabel-readable image volume with integer labels mask_index : integer (optional) mask with just voxels having this value output_file : string nibabel-readable labeled image volume binarize : Boolean binarize mask? stopvalue : integer stopping value Returns ------- output_file : string name of labeled output nibabel-readable image volume Examples -------- >>> import os >>> from mindboggle.utils.ants import PropagateLabelsThroughMask >>> from mindboggle.utils.plots import plot_volumes >>> path = os.path.join(os.environ['MINDBOGGLE_DATA']) >>> labels = os.path.join(path, 'arno', 'labels', 'labels.DKT25.manual.nii.gz') >>> mask = os.path.join(path, 'arno', 'mri', 't1weighted_brain.nii.gz') >>> mask_index = None >>> output_file = '' >>> binarize = True >>> stopvalue = None >>> output_file = PropagateLabelsThroughMask(mask, labels, mask_index, output_file, binarize, stopvalue) >>> # View >>> plot_volumes(output_file) """ import os from mindboggle.utils.utils import execute if not output_file: output_file = os.path.join(os.getcwd(), 'PropagateLabelsThroughMask.nii.gz') output_file = os.path.join( os.getcwd(), os.path.basename(labels) + '_through_' + os.path.basename(mask)) print('mask: {0}, labels: {1}'.format(mask, labels)) # Binarize image volume: if binarize: temp_file = os.path.join(os.getcwd(), 'PropagateLabelsThroughMask.nii.gz') cmd = ['ThresholdImage', '3', mask, temp_file, '0 1 0 1'] execute(cmd, 'os') mask = temp_file # Mask with just voxels having mask_index value: if mask_index: mask2 = os.path.join(os.getcwd(), 'temp.nii.gz') cmd = 'ThresholdImage 3 {0} {1} {2} {3} 1 0'.format( mask, mask2, mask_index, mask_index) execute(cmd) else: mask2 = mask # Propagate labels: cmd = [ 'ImageMath', '3', output_file, 'PropagateLabelsThroughMask', mask2, labels ] if stopvalue: cmd.extend(stopvalue) execute(cmd, 'os') if not os.path.exists(output_file): raise (IOError("ImageMath did not create " + output_file + ".")) return output_file
def ANTS(source, target, iterations='30x99x11', output_stem=''): """ Use ANTs to register a source image volume to a target image volume. This program uses the ANTs SyN registration method. Parameters ---------- source : string file name of source image volume target : string file name of target image volume iterations : string number of iterations ("0" for affine, "30x99x11" default) output_stem : string file name stem for output transform matrix Returns ------- affine_transform : string file name for affine transform matrix nonlinear_transform : string file name for nonlinear transform nifti file nonlinear_inverse_transform : string file name for nonlinear inverse transform nifti file output_stem : string file name stem for output transform matrix Examples -------- >>> import os >>> from mindboggle.utils.ants import ANTS >>> path = os.environ['MINDBOGGLE_DATA'] >>> source = os.path.join(path, 'arno', 'mri', 't1weighted_brain.nii.gz') >>> target = os.path.join(path, 'atlases', 'MNI152_T1_1mm_brain.nii.gz') >>> iterations = "0" >>> output_stem = "" >>> # >>> ANTS(source, target, iterations, output_stem) """ import os from mindboggle.utils.utils import execute if not output_stem: src = os.path.basename(source).split('.')[0] tgt = os.path.basename(target).split('.')[0] output_stem = os.path.join(os.getcwd(), src+'_to_'+tgt) cmd = ['ANTS', '3', '-m CC[' + target + ',' + source + ',1,2]', '-r Gauss[2,0]', '-t SyN[0.5] -i', iterations, '-o', output_stem, '--use-Histogram-Matching', '--number-of-affine-iterations 10000x10000x10000x10000x10000'] execute(cmd, 'os') affine_transform = output_stem + 'Affine.txt' nonlinear_transform = output_stem + 'Warp.nii.gz' nonlinear_inverse_transform = output_stem + 'InverseWarp.nii.gz' if not os.path.exists(affine_transform): raise(IOError(affine_transform + " not found")) if not os.path.exists(nonlinear_transform): raise(IOError(nonlinear_transform + " not found")) if not os.path.exists(nonlinear_inverse_transform): raise(IOError(nonlinear_inverse_transform + " not found")) return affine_transform, nonlinear_transform,\ nonlinear_inverse_transform, output_stem
def label_with_classifier(subject, hemi, left_classifier='', right_classifier='', annot_file=''): """ Label a brain with the DKT atlas using FreeSurfer's mris_ca_label FreeSurfer documentation :: SYNOPSIS mris_ca_label [options] <subject> <hemi> <surf> <classifier> <output> DESCRIPTION For a single subject, produces an annotation file, in which each cortical surface vertex is assigned a neuroanatomical label. This automatic procedure employs data from a previously-prepared atlas file. An atlas file is created from a training set, capturing region data manually drawn by neuroanatomists combined with statistics on variability correlated to geometric information derived from the cortical model (sulcus and curvature). Besides the atlases provided with FreeSurfer, new ones can be prepared using mris_ca_train). Notes regarding creation and use of FreeSurfer Gaussian classifier atlas: Create the DKT classifier atlas (?h.DKTatlas40.gcs) --NEED TO VERIFY THIS: $ mris_ca_train -t $FREESURFERHOME/average/colortable_desikan_killiany.txt \ $hemi sphere.reg aparcNMMjt.annot $SCANS ./$hemi.DKTatlas40.gcs Label a brain with the DKT atlas (surface annotation file ?h.DKTatlas40.annot): $ mris_ca_label -l ./$x/label/$hemi.cortex.label $x/ $hemi sphere.reg \ ./$hemi.DKTatlas40.gcs ./$x/label/$hemi.DKTatlas40.annot Label the cortex of a subject's segmented volume according to the edited surface labels (?h.aparcNMMjt.annot): $ mri_aparc2aseg --s ./x --volmask --annot aparcNMMjt Label a brain with the DKT atlas using FreeSurfer's mris_ca_label: $ mris_ca_label MMRR-21-1 lh lh.sphere.reg ../lh.DKTatlas40.gcs ../out.annot Parameters ---------- subject : string subject corresponding to FreeSurfer subject directory hemi : string hemisphere ['lh' or 'rh'] left_classifier : string name of left hemisphere FreeSurfer classifier atlas (full path) right_classifier : string name of right hemisphere FreeSurfer classifier atlas (full path) annot_file : string name of output .annot file Returns ------- annot_file : string name of output .annot file Examples -------- >>> # This example requires a FreeSurfer subjects/<subject> subdirectory >>> import os >>> from mindboggle.labels.label_free import label_with_classifier >>> subject = 'Twins-2-1' >>> hemi = 'lh' >>> left_classifier = '/homedir/mindboggle_cache/b28a600a713c269f4c561f66f64337b2/lh.DKTatlas40.gcs' >>> right_classifier = '' >>> annot_file = './lh.classifier.annot' >>> label_with_classifier(subject, hemi, left_classifier, right_classifier, annot_file) >>> # >>> # View: >>> from mindboggle.utils.io_free import annot_to_vtk >>> from mindboggle.utils.plots import plot_vtk >>> path = os.environ['MINDBOGGLE_DATA'] >>> vtk_file = os.path.join(path, 'arno', 'freesurfer', 'lh.pial.vtk') >>> output_vtk = './lh.classifier.vtk' >>> # >>> labels, output_vtk = annot_to_vtk(annot_file, vtk_file, output_vtk) >>> plot_vtk(output_vtk) """ import os from mindboggle.utils.utils import execute if not annot_file: annot_file = os.path.join(os.getcwd(), hemi + '.classifier.annot') if hemi == 'lh': classifier = left_classifier elif hemi == 'rh': classifier = right_classifier else: print("label_with_classifier()'s hemi should be 'lh' or 'rh'") cmd = ['mris_ca_label', subject, hemi, hemi+'.sphere.reg', classifier, annot_file] execute(cmd) if not os.path.exists(annot_file): raise(IOError(annot_file + " not found")) return annot_file
def WarpImageMultiTransform(source, target, output='', interp='--use-NN', xfm_stem='', affine_transform='', nonlinear_transform='', inverse=False, affine_only=False): """ Use ANTs to transform a source image volume to a target image volume. This program uses the ANTs WarpImageMultiTransform function. Parameters ---------- source : string file name of source image volume target : string file name of target (reference) image volume output : string file name of output image volume interp : string interpolation type ("--use-NN" for nearest neighbor) xfm_stem : string file name stem for output transform affine_transform : string file containing affine transform nonlinear_transform : string file containing nonlinear transform inverse : Boolean apply inverse transform? affine_only : Boolean apply only affine transform? Returns ------- output : string output label file name """ import os import sys from mindboggle.utils.utils import execute if xfm_stem: affine_transform = xfm_stem + 'Affine.txt' if inverse: nonlinear_transform = xfm_stem + 'InverseWarp.nii.gz' else: nonlinear_transform = xfm_stem + 'Warp.nii.gz' elif not affine_transform and not nonlinear_transform: sys.exit('Provide either xfm_stem or affine_transform and ' 'nonlinear_transform.') if not output: output = os.path.join(os.getcwd(), 'WarpImageMultiTransform.nii.gz') if not os.path.exists(nonlinear_transform): affine_only = True if affine_only: if inverse: cmd = ['WarpImageMultiTransform', '3', source, output, '-R', target, interp, '-i', affine_transform] else: cmd = ['WarpImageMultiTransform', '3', source, output, '-R', target, interp, affine_transform] else: if inverse: cmd = ['WarpImageMultiTransform', '3', source, output, '-R', target, interp, '-i', affine_transform, nonlinear_transform] else: cmd = ['WarpImageMultiTransform', '3', source, output, '-R', target, interp, nonlinear_transform, affine_transform] execute(cmd, 'os') if not os.path.exists(output): raise(IOError(output + " not found")) return output
def antsApplyTransformsToPoints(points, transform_files, inverse_booleans=[0]): """ Run ANTs antsApplyTransformsToPoints function to transform points. (Creates pre- and post-transformed .csv points files for ANTs.) Parameters ---------- points : list of lists of three integers point coordinate data transform_files : list transform file names inverse_booleans : list for each transform, one to apply inverse of transform (otherwise zero) Returns ------- transformed_points : list of lists of three integers transformed point coordinate data Examples -------- >>> from mindboggle.utils.ants import antsApplyTransformsToPoints >>> from mindboggle.utils.io_vtk import read_vtk >>> transform_files = ['/Users/arno/Data/antsCorticalThickness/Twins-2-1/antsTemplateToSubject1GenericAffine.mat','/Users/arno/Data/antsCorticalThickness/Twins-2-1/antsTemplateToSubject0Warp.nii.gz','/Users/arno/Data/antsCorticalThickness/Twins-2-1/antsSubjectToTemplate0GenericAffine.mat','/Users/arno/Data/antsCorticalThickness/Twins-2-1/antsSubjectToTemplate1Warp.nii.gz'] >>> transform_files = [transform_files[0],transform_files[1],'/Users/arno/Data/mindboggle_cache/f36e3d5d99f7c4a9bb70e2494ed7340b/OASIS-30_Atropos_template_to_MNI152_affine.txt'] >>> vtk_file = '/Users/arno/mindboggle_working/Twins-2-1/Mindboggle/_hemi_lh/Surface_to_vtk/lh.pial.vtk' >>> faces, lines, indices, points, npoints, scalars, name, foo1 = read_vtk(vtk_file) >>> inverse_booleans = [0,0,1] >>> transformed_points = antsApplyTransformsToPoints(points, transform_files, inverse_booleans) """ import os from mindboggle.utils.utils import execute #------------------------------------------------------------------------- # Write points (x,y,z,1) to a .csv file: #------------------------------------------------------------------------- points_file = os.path.join(os.getcwd(), 'points.csv') fid = open(points_file, 'wa') fid.write('x,y,z,t\n') for point in points: string_of_zeros = (4 - len(point)) * ',0' fid.write(','.join([str(x) for x in point]) + string_of_zeros + '\n') fid.close() #------------------------------------------------------------------------- # Apply transforms to points in .csv file: #------------------------------------------------------------------------- transformed_points_file = os.path.join(os.getcwd(), 'transformed_points.csv') transform_string = '' for ixfm, transform_file in enumerate(transform_files): transform_string += " --t [{0},{1}]".\ format(transform_file, str(inverse_booleans[ixfm])) cmd = [ 'antsApplyTransformsToPoints', '-d', '3', '-i', points_file, '-o', transformed_points_file, transform_string ] execute(cmd, 'os') if not os.path.exists(transformed_points_file): str1 = "antsApplyTransformsToPoints did not create " raise (IOError(str1 + transformed_points_file + ".")) #------------------------------------------------------------------------- # Return transformed points: #------------------------------------------------------------------------- fid = open(transformed_points_file, 'r') lines = fid.readlines() fid.close() transformed_points = [] for iline, line in enumerate(lines): if iline > 0: point_xyz1 = [float(x) for x in line.split(',')] transformed_points.append(point_xyz1[0:3]) return transformed_points