예제 #1
0
def main():
    zernike_fn = zernike

    parser = argparse.ArgumentParser()
    parser.add_argument('--debug', nargs='?', default=None, const='debug', choices=['debug', 'info', 'warning', 'error', 'critical']) 
    parser.add_argument('vtk_file', nargs='?', default=None)
    parser.add_argument('-o', '--order', type=int, default=3)
    parser.add_argument('-p', '--profile', nargs='?', default=None, const='stdout')
    parser.add_argument('-t', '--timecall', default=False, action='store_true')
    parser.add_argument('-v', '--validate', default=False, action='store_true')
    ns = parser.parse_args()

    if ns.debug is not None:
        logging.basicConfig(level=getattr(logging, ns.debug.upper()))

#    if ns.profile is not None:
#        filename = ns.profile
#        if ns.profile == 'stdout':
#            filename = None
#        zernike_fn = profilehooks.profile(zernike_fn, immediate=False, filename=filename)

#    if ns.timecall:
#        zernike_fn = profilehooks.timecall(zernike_fn)

    if ns.vtk_file is not None:
        points, indices, lines, faces, depths, scalar_names, npoints, \
            input_vtk = read_vtk(ns.vtk_file)
        print('{0} {1}'.format(len(faces), len(points)))
        X = zernike_fn(points, faces, order=ns.order, scale_input=True)
        if ns.validate:
            Y = zernike_fn(points, faces, order=ns.order, scale_input=True, pl_cls=MultiprocPipeline)
            assert np.allclose(X, Y)
    else:
        example1()
def propagate_fundus_lines(surf_file, fundus_lines_file, thickness_file):
    """Propagate fundus lines to tile the surface.

    Parameters
    ----------
    surf_file: file containing the surface geometry in vtk format
    fundus_lines_file: file containing scalars representing fundus lines
    thickness_file: file containing cortical thickness scalar data
    (for masking out the medial wall only)

    Returns
    -------
    scalars indicating whether each vertex is part of the closed
    fundus lines or not
    """
    from mindboggle.mio.vtks import read_vtk, read_scalars

    points, indices, lines, faces, fundus_lines, scalar_names, num_points, \
        input_vtk = read_vtk(surf_file, return_first=True, return_array=True)

    fundus_lines, _ = read_scalars(fundus_lines_file)
    fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5]

    thickness, _ = read_scalars(thickness_file,
                             return_first=True, return_array=True)

    return propagate_fundus_lines(
        points, faces, fundus_line_indices, thickness)
def propagate_fundus_lines(surf_file, fundus_lines_file, thickness_file):
    """Propagate fundus lines to tile the surface.

    Parameters
    ----------
    surf_file: file containing the surface geometry in vtk format
    fundus_lines_file: file containing scalars representing fundus lines
    thickness_file: file containing cortical thickness scalar data
    (for masking out the medial wall only)

    Returns
    -------
    scalars indicating whether each vertex is part of the closed
    fundus lines or not
    """
    from mindboggle.mio.vtks import read_vtk, read_scalars

    faces, _, _, points, num_points, fundus_lines, _, _ = read_vtk(
        surf_file, return_first=True, return_array=True)

    fundus_lines, _ = read_scalars(fundus_lines_file)
    fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5]

    thickness, _ = read_scalars(thickness_file,
                             return_first=True, return_array=True)

    return propagate_fundus_lines(
        points, faces, fundus_line_indices, thickness)
예제 #4
0
def main():
    zernike_fn = zernike_moments

    parser = argparse.ArgumentParser()
    parser.add_argument('--debug', nargs='?', default=None, const='debug', choices=['debug', 'info', 'warning', 'error', 'critical']) 
    parser.add_argument('vtk_file', nargs='?', default=None)
    parser.add_argument('-o', '--order', type=int, default=3)
    parser.add_argument('-p', '--profile', nargs='?', default=None, const='stdout')
    parser.add_argument('-t', '--timecall', default=False, action='store_true')
    parser.add_argument('-v', '--validate', default=False, action='store_true')
    ns = parser.parse_args()

    if ns.debug is not None:
        logging.basicConfig(level=getattr(logging, ns.debug.upper()))

    if ns.profile is not None:
        filename = ns.profile
        if ns.profile == 'stdout':
            filename = None
        zernike_fn = profilehooks.profile(zernike_fn, immediate=False, filename=filename)

    if ns.timecall:
        zernike_fn = profilehooks.timecall(zernike_fn)

    if ns.vtk_file is not None:
        points, indices, lines, faces, depths, scalar_names, npoints, \
            input_vtk = read_vtk(ns.vtk_file)
        print(len(faces), len(points))
        X = zernike_fn(points, faces, order=ns.order, scale_input=True)
        if ns.validate:
            Y = zernike_fn(points, faces, order=ns.order, scale_input=True, pl_cls=MultiprocPipeline)
            assert np.allclose(X, Y)
    else:
        example1()
예제 #5
0
def decimate_file(input_vtk,
                  reduction=0.5,
                  smooth_steps=100,
                  save_vtk=True,
                  output_vtk=''):
    """
    Decimate vtk triangular mesh file with vtk.vtkDecimatePro.

    Parameters
    ----------
    input_vtk : string
        input vtk file with triangular surface mesh
    reduction : float
        fraction of mesh faces to remove
    do_smooth : Boolean
        smooth after decimation?
    save_vtk : Boolean
        output decimated vtk file?
    output_vtk : string
        output decimated vtk file name

    Returns
    -------
    output_vtk : string
        output decimated vtk file

    Examples
    --------
    >>> import os
    >>> from mindboggle.guts.mesh import decimate_file
    >>> from mindboggle.mio.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> input_vtk = os.path.join(path, 'arno', 'labels', 'label22.vtk')
    >>> #input_vtk='/drop/MB/data/arno/labels/lh.labels.DKT31.manual.vtk'
    >>> save_vtk = True
    >>> output_vtk = ''
    >>> reduction = 0.5
    >>> smooth_steps = 0
    >>> decimate_file(input_vtk, reduction, smooth_steps, save_vtk, output_vtk)
    >>> # View:
    >>> plot_surfaces('decimated.vtk') # doctest: +SKIP

    """
    from mindboggle.mio.vtks import read_vtk
    from mindboggle.guts.mesh import decimate

    if not save_vtk:
        raise NotImplementedError()

    # Read VTK surface mesh file:
    points, indices, lines, faces, scalars, scalar_names, npoints, \
            input_vtk = read_vtk(input_vtk)

    # Decimate vtk triangular mesh with vtk.vtkDecimatePro
    points, faces, scalars, output_vtk = decimate(points, faces, reduction,
                                                  smooth_steps, scalars,
                                                  save_vtk, output_vtk)
    return output_vtk
예제 #6
0
def spectrum_from_file(vtk_file, spectrum_size=10, exclude_labels=[-1],
                       normalization=None, area_file='', verbose=False):
    """
    Compute Laplace-Beltrami spectrum of a 3D shape in a VTK file.

    Parameters
    ----------
    vtk_file : string
        the input vtk file
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    area_file :  string
        name of VTK file with surface area scalar values
    verbose : bool
        print statements?

    Returns
    -------
    spectrum : list of floats
        first spectrum_size of Laplace-Beltrami spectrum

    Examples
    --------
    >>> # Spectrum for entire left hemisphere of Twins-2-1:
    >>> import numpy as np
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> spectrum = spectrum_from_file(vtk_file, spectrum_size=6)
    >>> print(np.array_str(np.array(spectrum[1::]),
    ...                    precision=5, suppress_small=True))
    [ 0.00013  0.00027  0.00032  0.00047  0.00058]

    """
    from mindboggle.mio.vtks import read_vtk, read_scalars
    from mindboggle.shapes.laplace_beltrami import spectrum_of_largest

    points, indices, lines, faces, scalars, scalar_names, npoints, \
            input_vtk = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    spectrum = spectrum_of_largest(points, faces, spectrum_size,
                                   exclude_labels, normalization, areas,
                                   verbose)

    return spectrum
예제 #7
0
파일: mesh.py 프로젝트: nicholsn/mindboggle
def decimate_file(input_vtk, reduction=0.5, smooth_steps=100,
                  save_vtk=True, output_vtk=''):
    """
    Decimate vtk triangular mesh file with vtk.vtkDecimatePro.

    Parameters
    ----------
    input_vtk : string
        input vtk file with triangular surface mesh
    reduction : float
        fraction of mesh faces to remove
    do_smooth : Boolean
        smooth after decimation?
    save_vtk : Boolean
        output decimated vtk file?
    output_vtk : string
        output decimated vtk file name

    Returns
    -------
    output_vtk : string
        output decimated vtk file

    Examples
    --------
    >>> import os
    >>> from mindboggle.guts.mesh import decimate_file
    >>> from mindboggle.mio.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> input_vtk = os.path.join(path, 'arno', 'labels', 'label22.vtk')
    >>> #input_vtk='/drop/MB/data/arno/labels/lh.labels.DKT31.manual.vtk'
    >>> save_vtk = True
    >>> output_vtk = ''
    >>> reduction = 0.5
    >>> smooth_steps = 0
    >>> decimate_file(input_vtk, reduction, smooth_steps, save_vtk, output_vtk)
    >>> # View:
    >>> plot_surfaces('decimated.vtk') # doctest: +SKIP

    """
    from mindboggle.mio.vtks import read_vtk
    from mindboggle.guts.mesh import decimate

    if not save_vtk:
        raise NotImplementedError()

    # Read VTK surface mesh file:
    points, indices, lines, faces, scalars, scalar_names, npoints, \
            input_vtk = read_vtk(input_vtk)

    # Decimate vtk triangular mesh with vtk.vtkDecimatePro
    points, faces, scalars, output_vtk = decimate(points, faces, reduction,
                                                  smooth_steps, scalars,
                                                  save_vtk, output_vtk)
    return output_vtk
예제 #8
0
def downsample_vtk(vtk_file, sample_rate):
    """Sample rate: number between 0 and 1."""
    from mindboggle.mio.vtks import read_vtk, write_vtk
    from mindboggle.guts.mesh import decimate_file

    if (sample_rate < 0 or sample_rate > 1):
        raise ValueError('0 <= sample_rate <= 1; you input %f' % sample_rate)

    # Downsample
    decimate_file(vtk_file, reduction=1 - sample_rate, output_vtk=vtk_file, save_vtk=True, smooth_steps=0)

    # Hack to re-save in
    vtk_data = read_vtk(vtk_file)
    write_vtk(vtk_file, *vtk_data[:-2])
예제 #9
0
def spectrum_per_label(vtk_file, spectrum_size=10, exclude_labels=[-1],
                       normalization='area', area_file='',
                       largest_segment=True):
    """
    Compute Laplace-Beltrami spectrum per labeled region in a file.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file containing index scalars (labels)
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    area_file :  string (optional)
        name of VTK file with surface area scalar values
    largest_segment :  Boolean
        compute spectrum only for largest segment with a given label?

    Returns
    -------
    spectrum_lists : list of lists
        first eigenvalues for each label's Laplace-Beltrami spectrum
    label_list : list of integers
        list of unique labels for which spectra are obtained

    Examples
    --------
    >>> # Uncomment "if label==22:" below to run example:
    >>> # Spectrum for Twins-2-1 left postcentral (22) pial surface:
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk')
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> spectrum_size = 6
    >>> exclude_labels = [0]  #[-1]
    >>> largest_segment = True
    >>> spectrum_per_label(vtk_file, spectrum_size, exclude_labels, None,
    >>>                    area_file, largest_segment)
    ([[6.3469513010430304e-18,
       0.0005178862383467463,
       0.0017434911095630772,
       0.003667561767487686,
       0.005429017880363784,
       0.006309346984678924]],
     [22])

    """
    from mindboggle.mio.vtks import read_vtk, read_scalars
    from mindboggle.guts.mesh import remove_faces, reindex_faces_points
    from mindboggle.shapes.laplace_beltrami import fem_laplacian,\
        spectrum_of_largest

    # Read VTK surface mesh file:
    faces, u1, u2, points, u4, labels, u5, u6 = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    # Loop through labeled regions:
    ulabels = []
    [ulabels.append(int(x)) for x in labels if x not in ulabels
     if x not in exclude_labels]
    label_list = []
    spectrum_lists = []
    for label in ulabels:
      #if label == 22:
      #  print("DEBUG: COMPUTE FOR ONLY ONE LABEL")

        # Determine the indices per label:
        Ilabel = [i for i,x in enumerate(labels) if x == label]
        print('{0} vertices for label {1}'.format(len(Ilabel), label))

        # Remove background faces:
        pick_faces = remove_faces(faces, Ilabel)
        pick_faces, pick_points, o1 = reindex_faces_points(pick_faces, points)

        # Compute Laplace-Beltrami spectrum for the label:
        if largest_segment:
            exclude_labels_inner = [-1]
            spectrum = spectrum_of_largest(pick_points, pick_faces,
                                           spectrum_size,
                                           exclude_labels_inner,
                                           normalization, areas)
        else:
            spectrum = fem_laplacian(pick_points, pick_faces,
                                     spectrum_size, normalization)

        # Append to a list of lists of spectra:
        spectrum_lists.append(spectrum)
        label_list.append(label)

    return spectrum_lists, label_list
예제 #10
0
def spectrum_per_label(vtk_file, spectrum_size=10, exclude_labels=[-1],
                       normalization='area', area_file='',
                       largest_segment=True, verbose=False):
    """
    Compute Laplace-Beltrami spectrum per labeled region in a file.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file containing index scalars (labels)
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    area_file :  string (optional)
        name of VTK file with surface area scalar values
    largest_segment :  bool
        compute spectrum only for largest segment with a given label?
    verbose : bool
        print statements?

    Returns
    -------
    spectrum_lists : list of lists
        first eigenvalues for each label's Laplace-Beltrami spectrum
    label_list : list of integers
        list of unique labels for which spectra are obtained

    Examples
    --------
    >>> # Uncomment "if label==22:" below to run example:
    >>> # Spectrum for Twins-2-1 left postcentral (22) pial surface:
    >>> import numpy as np
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> area_file = fetch_data(urls['left_area'], '', '.vtk')
    >>> spectrum_size = 6
    >>> exclude_labels = [0]  #[-1]
    >>> largest_segment = True
    >>> verbose = False
    >>> spectrum_lists, label_list = spectrum_per_label(vtk_file,
    ...     spectrum_size, exclude_labels, None, area_file, largest_segment,
    ...     verbose)
    >>> print(np.array_str(np.array(spectrum_lists[0][1::]),
    ...                    precision=5, suppress_small=True))
    [ 0.00054  0.00244  0.00291  0.00456  0.00575]
    >>> label_list[0:10]
    [1029, 1005, 1011, 1021, 1008, 1025, 999, 1013, 1007, 1022]

    """
    from mindboggle.mio.vtks import read_vtk, read_scalars
    from mindboggle.guts.mesh import keep_faces, reindex_faces_points
    from mindboggle.shapes.laplace_beltrami import fem_laplacian,\
        spectrum_of_largest

    # Read VTK surface mesh file:
    points, indices, lines, faces, labels, scalar_names, npoints, \
        input_vtk = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    # Loop through labeled regions:
    ulabels = []
    [ulabels.append(int(x)) for x in labels if x not in ulabels
     if x not in exclude_labels]
    label_list = []
    spectrum_lists = []
    for label in ulabels:
      #if label == 22:
      #  print("DEBUG: COMPUTE FOR ONLY ONE LABEL")

        # Determine the indices per label:
        Ilabel = [i for i,x in enumerate(labels) if x == label]
        if verbose:
          print('{0} vertices for label {1}'.format(len(Ilabel), label))

        # Remove background faces:
        pick_faces = keep_faces(faces, Ilabel)
        pick_faces, pick_points, o1 = reindex_faces_points(pick_faces, points)

        # Compute Laplace-Beltrami spectrum for the label:
        if largest_segment:
            exclude_labels_inner = [-1]
            spectrum = spectrum_of_largest(pick_points, pick_faces,
                                           spectrum_size,
                                           exclude_labels_inner,
                                           normalization, areas, verbose)
        else:
            spectrum = fem_laplacian(pick_points, pick_faces, spectrum_size,
                                     normalization, verbose)

        # Append to a list of lists of spectra:
        spectrum_lists.append(spectrum)
        label_list.append(label)

    return spectrum_lists, label_list
예제 #11
0
파일: sulci.py 프로젝트: nipy/mindboggle
def extract_sulci(
    labels_file,
    folds_or_file,
    hemi,
    min_boundary=1,
    sulcus_names=[],
    save_file=False,
    output_file="",
    background_value=-1,
    verbose=False,
):
    """
    Identify sulci from folds in a brain surface according to a labeling
    protocol that includes a list of label pairs defining each sulcus.

    Since folds are defined as deep, connected areas of a surface, and since
    folds may be connected to each other in ways that differ across brains,
    there usually does not exist a one-to-one mapping between folds of one
    brain and those of another.  To address the correspondence problem then,
    we need to find just those portions of the folds that correspond across
    brains. To accomplish this, Mindboggle segments folds into sulci, which
    do have a one-to-one correspondence across non-pathological brains.
    Mindboggle defines a sulcus as a folded portion of cortex whose opposing
    banks are labeled with one or more sulcus label pairs in the DKT labeling
    protocol, where each label pair is unique to one sulcus and represents
    a boundary between two adjacent gyri, and each vertex has one gyrus label.

    This function assigns vertices in a fold to a sulcus in one of two cases.
    In the first case, vertices whose labels are in only one label pair in
    the fold are assigned to the label pair’s sulcus if they are connected
    through similarly labeled vertices to the boundary between the two labels.
    In the second case, the segment_regions function propagates labels from
    label borders to vertices whose labels are in multiple label pairs in the
    fold.

    Steps for each fold ::

        1. Remove fold if it has fewer than two labels.
        2. Remove fold if its labels do not contain a sulcus label pair.
        3. Find vertices with labels that are in only one of the fold's
           label boundary pairs. Assign the vertices the sulcus with the label
           pair if they are connected to the label boundary for that pair.
        4. If there are remaining vertices, segment into sets of vertices
           connected to label boundaries, and assign a unique ID to each set.

    Parameters
    ----------
    labels_file : string
        file name for surface mesh VTK containing labels for all vertices
    folds_or_file : numpy array, list or string
        fold number for each vertex / name of VTK file containing fold scalars
    hemi : string
        hemisphere abbreviation in {'lh', 'rh'} for sulcus labels
    min_boundary : integer
        minimum number of vertices for a sulcus label boundary segment
    sulcus_names : list of strings
        names of sulci
    save_file : bool
        save output VTK file?
    output_file : string
        name of output file in VTK format
    background_value : integer or float
        background value
    verbose : bool
        print statements?

    Returns
    -------
    sulci : list of integers
        sulcus numbers for all vertices (-1 for non-sulcus vertices)
    n_sulci : integers
        number of sulci
    sulci_file : string
        output VTK file with sulcus numbers (-1 for non-sulcus vertices)

    Examples
    --------
    >>> # Example 1: Extract sulcus from a fold with one sulcus label pair:
    >>> import numpy as np
    >>> from mindboggle.features.sulci import extract_sulci
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> # Load labels, folds, neighbor lists, and sulcus names and label pairs
    >>> labels_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
    >>> folds_or_file, name = read_scalars(folds_file, True, True)
    >>> save_file = True
    >>> output_file = 'extract_sulci_fold4_1sulcus.vtk'
    >>> background_value = -1
    >>> # Limit number of folds to speed up the test:
    >>> limit_folds = True
    >>> if limit_folds:
    ...     fold_numbers = [4] #[4, 6]
    ...     i0 = [i for i,x in enumerate(folds_or_file) if x not in fold_numbers]
    ...     folds_or_file[i0] = background_value
    >>> hemi = 'lh'
    >>> min_boundary = 10
    >>> sulcus_names = []
    >>> verbose = False
    >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file,
    ...     hemi, min_boundary, sulcus_names, save_file, output_file,
    ...     background_value, verbose)
    >>> n_sulci  # 23 # (if not limit_folds)
    1
    >>> lens = [len([x for x in sulci if x==y])
    ...         for y in np.unique(sulci) if y != -1]
    >>> lens[0:10]  # [6358, 3288, 7612, 5205, 4414, 6251, 3493, 2566, 4436, 739] # (if not limit_folds)
    [1151]

    View result without background (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
    >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
    >>> output = 'extract_sulci_fold4_1sulcus_no_background.vtk'
    >>> rewrite_scalars(sulci_file, output, sulci,
    ...                 'sulci', sulci) # doctest: +SKIP
    >>> plot_surfaces(output) # doctest: +SKIP

    Example 2:  Extract sulcus from a fold with multiple sulcus label pairs:

    >>> folds_or_file, name = read_scalars(folds_file, True, True)
    >>> output_file = 'extract_sulci_fold7_2sulci.vtk'
    >>> # Limit number of folds to speed up the test:
    >>> limit_folds = True
    >>> if limit_folds:
    ...     fold_numbers = [7] #[4, 6]
    ...     i0 = [i for i,x in enumerate(folds_or_file) if x not in fold_numbers]
    ...     folds_or_file[i0] = background_value
    >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file,
    ...     hemi, min_boundary, sulcus_names, save_file, output_file,
    ...     background_value, verbose)
    >>> n_sulci  # 23 # (if not limit_folds)
    2
    >>> lens = [len([x for x in sulci if x==y])
    ...         for y in np.unique(sulci) if y != -1]
    >>> lens[0:10]  # [6358, 3288, 7612, 5205, 4414, 6251, 3493, 2566, 4436, 739] # (if not limit_folds)
    [369, 93]

    View result without background (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
    >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
    >>> output = 'extract_sulci_fold7_2sulci_no_background.vtk'
    >>> rewrite_scalars(sulci_file, output, sulci,
    ...                 'sulci', sulci) # doctest: +SKIP
    >>> plot_surfaces(output) # doctest: +SKIP

    """
    import os
    from time import time
    import numpy as np

    from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.guts.mesh import find_neighbors
    from mindboggle.guts.segment import extract_borders, propagate, segment_regions
    from mindboggle.mio.labels import DKTprotocol

    # Load fold numbers if folds_or_file is a string:
    if isinstance(folds_or_file, str):
        folds, name = read_scalars(folds_or_file)
    elif isinstance(folds_or_file, list):
        folds = folds_or_file
    elif isinstance(folds_or_file, np.ndarray):
        folds = folds_or_file.tolist()

    dkt = DKTprotocol()

    if hemi == "lh":
        pair_lists = dkt.left_sulcus_label_pair_lists
    elif hemi == "rh":
        pair_lists = dkt.right_sulcus_label_pair_lists
    else:
        raise IOError("Warning: hemisphere not properly specified ('lh' or 'rh').")

    # Load points, faces, and neighbors:
    points, indices, lines, faces, labels, scalar_names, npoints, input_vtk = read_vtk(labels_file)
    neighbor_lists = find_neighbors(faces, npoints)

    # Array of sulcus IDs for fold vertices, initialized as -1.
    # Since we do not touch gyral vertices and vertices whose labels
    # are not in the label list, or vertices having only one label,
    # their sulcus IDs will remain -1:
    sulci = background_value * np.ones(npoints)

    # ------------------------------------------------------------------------
    # Loop through folds
    # ------------------------------------------------------------------------
    fold_numbers = [int(x) for x in np.unique(folds) if x != background_value]
    n_folds = len(fold_numbers)
    if verbose:
        print("Extract sulci from {0} folds...".format(n_folds))
    t0 = time()
    for n_fold in fold_numbers:
        fold_indices = [i for i, x in enumerate(folds) if x == n_fold]
        len_fold = len(fold_indices)

        # List the labels in this fold:
        fold_labels = [labels[x] for x in fold_indices]
        unique_fold_labels = [int(x) for x in np.unique(fold_labels) if x != background_value]

        # --------------------------------------------------------------------
        # NO MATCH -- fold has fewer than two labels
        # --------------------------------------------------------------------
        if verbose and len(unique_fold_labels) < 2:
            # Ignore: sulci already initialized with -1 values:
            if not unique_fold_labels:
                print("  Fold {0} ({1} vertices): " "NO MATCH -- fold has no labels".format(n_fold, len_fold))
            else:
                print(
                    "  Fold {0} ({1} vertices): "
                    "NO MATCH -- fold has only one label ({2})".format(n_fold, len_fold, unique_fold_labels[0])
                )
            # Ignore: sulci already initialized with -1 values

        else:
            # Find all label boundary pairs within the fold:
            indices_fold_pairs, fold_pairs, unique_fold_pairs = extract_borders(
                fold_indices, labels, neighbor_lists, ignore_values=[], return_label_pairs=True
            )

            # Find fold label pairs in the protocol (pairs are already sorted):
            fold_pairs_in_protocol = [x for x in unique_fold_pairs if x in dkt.unique_sulcus_label_pairs]

            if verbose and unique_fold_labels:
                print(
                    "  Fold {0} labels: {1} ({2} vertices)".format(
                        n_fold, ", ".join([str(x) for x in unique_fold_labels]), len_fold
                    )
                )
            # ----------------------------------------------------------------
            # NO MATCH -- fold has no sulcus label pair
            # ----------------------------------------------------------------
            if verbose and not fold_pairs_in_protocol:
                print("  Fold {0}: NO MATCH -- fold has no sulcus label pair".format(n_fold, len_fold))

            # ----------------------------------------------------------------
            # Possible matches
            # ----------------------------------------------------------------
            else:
                if verbose:
                    print(
                        "  Fold {0} label pairs in protocol: {1}".format(
                            n_fold, ", ".join([str(x) for x in fold_pairs_in_protocol])
                        )
                    )

                # Labels in the protocol (includes repeats across label pairs):
                labels_in_pairs = [x for lst in fold_pairs_in_protocol for x in lst]

                # Labels that appear in one or more sulcus label boundary:
                unique_labels = []
                nonunique_labels = []
                for label in np.unique(labels_in_pairs):
                    if len([x for x in labels_in_pairs if x == label]) == 1:
                        unique_labels.append(label)
                    else:
                        nonunique_labels.append(label)

                # ------------------------------------------------------------
                # Vertices whose labels are in only one sulcus label pair
                # ------------------------------------------------------------
                # Find vertices with a label that is in only one of the fold's
                # label pairs (the other label in the pair can exist in other
                # pairs). Assign the vertices the sulcus with the label pair
                # if they are connected to the label boundary for that pair.
                # ------------------------------------------------------------
                if unique_labels:

                    for pair in fold_pairs_in_protocol:

                        # If one or both labels in label pair is/are unique:
                        unique_labels_in_pair = [x for x in pair if x in unique_labels]
                        n_unique = len(unique_labels_in_pair)
                        if n_unique:

                            ID = None
                            for i, pair_list in enumerate(pair_lists):
                                if not isinstance(pair_list, list):
                                    pair_list = [pair_list]
                                if pair in pair_list:
                                    ID = i
                                    break
                            if ID:
                                # Seeds from label boundary vertices
                                # (fold_pairs and pair already sorted):
                                indices_pair = [x for i, x in enumerate(indices_fold_pairs) if fold_pairs[i] == pair]

                                # Vertices with unique label(s) in pair:
                                indices_unique_labels = [
                                    fold_indices[i] for i, x in enumerate(fold_labels) if x in unique_labels_in_pair
                                ]
                                # dkt.unique_sulcus_label_pairs]

                                # Propagate sulcus ID from seeds to vertices
                                # with "unique" labels (only exist in one
                                # label pair in a fold); propagation ensures
                                # that sulci consist of contiguous vertices
                                # for each label boundary:
                                sulci2 = segment_regions(
                                    indices_unique_labels,
                                    neighbor_lists,
                                    min_region_size=1,
                                    seed_lists=[indices_pair],
                                    keep_seeding=False,
                                    spread_within_labels=True,
                                    labels=labels,
                                    label_lists=[],
                                    values=[],
                                    max_steps="",
                                    background_value=background_value,
                                    verbose=False,
                                )

                                sulci[sulci2 != background_value] = ID

                                # Print statement:
                                if verbose:
                                    if n_unique == 1:
                                        ps1 = "One label"
                                    else:
                                        ps1 = "Both labels"
                                    if len(sulcus_names):
                                        ps2 = sulcus_names[ID]
                                    else:
                                        ps2 = ""
                                    print(
                                        "    {0} unique to one fold pair: "
                                        "{1} {2}".format(ps1, ps2, unique_labels_in_pair)
                                    )

                # ------------------------------------------------------------
                # Vertex labels shared by multiple label pairs
                # ------------------------------------------------------------
                # Propagate labels from label borders to vertices with labels
                # that are shared by multiple label pairs in the fold.
                # ------------------------------------------------------------
                if len(nonunique_labels):
                    # For each label shared by different label pairs:
                    for label in nonunique_labels:
                        # Print statement:
                        if verbose:
                            print("    Propagate sulcus borders with label {0}".format(int(label)))

                        # Construct seeds from label boundary vertices:
                        seeds = background_value * np.ones(npoints)

                        for ID, pair_list in enumerate(pair_lists):
                            if not isinstance(pair_list, list):
                                pair_list = [pair_list]
                            label_pairs = [x for x in pair_list if label in x]
                            for label_pair in label_pairs:
                                indices_pair = [
                                    x
                                    for i, x in enumerate(indices_fold_pairs)
                                    if np.sort(fold_pairs[i]).tolist() == label_pair
                                ]
                                if indices_pair:

                                    # Do not include short boundary segments:
                                    if min_boundary > 1:
                                        indices_pair2 = []
                                        seeds2 = segment_regions(
                                            indices_pair,
                                            neighbor_lists,
                                            1,
                                            [],
                                            False,
                                            False,
                                            [],
                                            [],
                                            [],
                                            "",
                                            background_value,
                                            verbose,
                                        )

                                        useeds2 = [x for x in np.unique(seeds2) if x != background_value]
                                        for seed2 in useeds2:
                                            iseed2 = [i for i, x in enumerate(seeds2) if x == seed2]
                                            if len(iseed2) >= min_boundary:
                                                indices_pair2.extend(iseed2)
                                            elif verbose:
                                                if len(iseed2) == 1:
                                                    print(
                                                        "    Remove "
                                                        "assignment "
                                                        "of ID {0} from "
                                                        "1 vertex".format(seed2)
                                                    )
                                                else:
                                                    print(
                                                        "    Remove "
                                                        "assignment "
                                                        "of ID {0} from "
                                                        "{1} vertices".format(seed2, len(iseed2))
                                                    )
                                        indices_pair = indices_pair2

                                    # Assign sulcus IDs to seeds:
                                    seeds[indices_pair] = ID

                        # Identify vertices with the label:
                        indices_label = [fold_indices[i] for i, x in enumerate(fold_labels) if x == label]
                        if len(indices_label):

                            # Propagate sulcus ID from seeds to vertices
                            # with a given shared label:
                            seg_vs_prop = False
                            if seg_vs_prop:
                                indices_seeds = []
                                for seed in [x for x in np.unique(seeds) if x != background_value]:
                                    indices_seeds.append([i for i, x in enumerate(seeds) if x == seed])

                                sulci2 = segment_regions(
                                    indices_label,
                                    neighbor_lists,
                                    50,
                                    indices_seeds,
                                    False,
                                    True,
                                    labels,
                                    [],
                                    [],
                                    "",
                                    background_value,
                                    verbose,
                                )
                            else:
                                label_array = background_value * np.ones(npoints)
                                label_array[indices_label] = 1
                                sulci2 = propagate(
                                    points,
                                    faces,
                                    label_array,
                                    seeds,
                                    sulci,
                                    max_iters=10000,
                                    tol=0.001,
                                    sigma=5,
                                    background_value=background_value,
                                    verbose=verbose,
                                )
                            sulci[sulci2 != background_value] = sulci2[sulci2 != background_value]

    sulcus_numbers = [int(x) for x in np.unique(sulci) if x != background_value]
    n_sulci = len(sulcus_numbers)

    # ------------------------------------------------------------------------
    # Print statements
    # ------------------------------------------------------------------------
    if verbose:
        if n_sulci == 1:
            sulcus_str = "sulcus"
        else:
            sulcus_str = "sulci"
        if n_folds == 1:
            folds_str = "fold"
        else:
            folds_str = "folds"
        print("Extracted {0} {1} from {2} {3} ({4:.1f}s):".format(n_sulci, sulcus_str, n_folds, folds_str, time() - t0))
        if sulcus_names:
            for sulcus_number in sulcus_numbers:
                print("  {0}: {1}".format(sulcus_number, sulcus_names[sulcus_number]))
        elif sulcus_numbers:
            print("  " + ", ".join([str(x) for x in sulcus_numbers]))

        unresolved = [i for i in range(len(pair_lists)) if i not in sulcus_numbers]
        if len(unresolved) == 1:
            print("The following sulcus is unaccounted for:")
        else:
            print("The following {0} sulci are unaccounted for:".format(len(unresolved)))
        if sulcus_names:
            for sulcus_number in unresolved:
                print("  {0}: {1}".format(sulcus_number, sulcus_names[sulcus_number]))
        else:
            print("  " + ", ".join([str(x) for x in unresolved]))

    # ------------------------------------------------------------------------
    # Return sulci, number of sulci, and file name
    # ------------------------------------------------------------------------
    sulci = [int(x) for x in sulci]

    sulci_file = os.path.join(os.getcwd(), "sulci.vtk")
    rewrite_scalars(labels_file, sulci_file, sulci, "sulci", [], background_value)

    if not os.path.exists(sulci_file):
        raise IOError(sulci_file + " not found")

    return sulci, n_sulci, sulci_file
예제 #12
0
def write_vertex_measures(output_table, labels_or_file, sulci=[], fundi=[],
        affine_transform_files=[], inverse_booleans=[],
        transform_format='itk',
        area_file='', mean_curvature_file='', travel_depth_file='',
        geodesic_depth_file='', freesurfer_thickness_file='',
        freesurfer_curvature_file='', freesurfer_sulc_file=''):
    """
    Make a table of shape values per vertex.

    Note ::
        This function is tailored for Mindboggle outputs.

    Parameters
    ----------
    output_table : string
        output file (full path)
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    sulci :  list of integers
        indices to sulci, one per vertex, with -1 indicating no sulcus
    fundi :  list of integers
        indices to fundi, one per vertex, with -1 indicating no fundus
    affine_transform_files : list of strings
        affine transform files to standard space
    inverse_booleans : list of of zeros and ones
        for each transform, 1 to take the inverse, else 0
    transform_format : string
        format for transform file
        Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format
    area_file :  string
        name of VTK file with surface area scalar values
    mean_curvature_file :  string
        name of VTK file with mean curvature scalar values
    travel_depth_file :  string
        name of VTK file with travel depth scalar values
    geodesic_depth_file :  string
        name of VTK file with geodesic depth scalar values
    freesurfer_thickness_file :  string
        name of VTK file with FreeSurfer thickness scalar values
    freesurfer_curvature_file :  string
        name of VTK file with FreeSurfer curvature (curv) scalar values
    freesurfer_sulc_file :  string
        name of VTK file with FreeSurfer convexity (sulc) scalar values

    Returns
    -------
    output_table : table file name for vertex shape values

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.tables import write_vertex_measures
    >>> #
    >>> output_table = ''#vertex_shapes.csv'
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> labels_or_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> sulci_file = os.path.join(path, 'arno', 'features', 'sulci.vtk')
    >>> fundi_file = os.path.join(path, 'arno', 'features', 'fundi.vtk')
    >>> sulci, name = read_scalars(sulci_file)
    >>> fundi, name = read_scalars(fundi_file)
    >>> affine_transform_files = [os.path.join(path, 'arno', 'mri',
    >>>     't1weighted_brain.MNI152Affine.txt')]
    >>> inverse_booleans = [1]
    >>> transform_format = 'itk'
    >>> swap_xy = True
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> mean_curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> travel_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> geodesic_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.geodesic_depth.vtk')
    >>> freesurfer_thickness_file = ''
    >>> freesurfer_curvature_file = ''
    >>> freesurfer_sulc_file = ''
    >>> #
    >>> write_vertex_measures(output_table, labels_or_file, sulci, fundi,
    >>>     affine_transform_files, inverse_booleans, transform_format, area_file,
    >>>     mean_curvature_file, travel_depth_file, geodesic_depth_file,
    >>>     freesurfer_thickness_file, freesurfer_curvature_file, freesurfer_sulc_file)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.mio.vtks import read_scalars, read_vtk, \
        apply_affine_transforms

    # Make sure inputs are lists:
    if isinstance(labels_or_file, np.ndarray):
        labels = [int(x) for x in labels_or_file]
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    elif isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file)
    if isinstance(sulci, np.ndarray):
        sulci = [int(x) for x in sulci]
    if isinstance(fundi, np.ndarray):
        fundi = [int(x) for x in fundi]

    if not labels and not sulci and not fundi:
        import sys
        sys.exit('No feature data to tabulate in write_vertex_measures().')

    # Feature names and corresponding feature lists:
    feature_names = ['label ID', 'sulcus ID', 'fundus ID']
    feature_lists = [labels, sulci, fundi]

    # Shape names corresponding to shape files below:
    shape_names = ['area', 'travel depth', 'geodesic depth',
                   'mean curvature', 'freesurfer curvature',
                   'freesurfer thickness', 'freesurfer convexity (sulc)']

    # Load shape files as a list of numpy arrays of per-vertex shape values:
    shape_files = [area_file, travel_depth_file, geodesic_depth_file,
                   mean_curvature_file, freesurfer_curvature_file,
                   freesurfer_thickness_file, freesurfer_sulc_file]

    # Append columns of per-vertex scalar values:
    columns = []
    column_names = []
    for ifeature, values in enumerate(feature_lists):
        if values:
            columns.append(values)
            column_names.append(feature_names[ifeature])

    first_pass = True
    for ishape, shape_file in enumerate(shape_files):
        if os.path.exists(shape_file):
            if first_pass:

                # Append x,y,z position per vertex to columns:
                points, indices, lines, faces, scalars, scalar_names, \
                    npoints, input_vtk = read_vtk(shape_file)
                xyz_positions = np.asarray(points)
                for ixyz, xyz in enumerate(['x','y','z']):
                    column_names.append('position: {0}'.format(xyz))
                    columns.append(xyz_positions[:, ixyz].tolist())
                first_pass = False

                # Append standard space x,y,z position to columns:
                if affine_transform_files and transform_format:
                    affine_points, \
                        foo1 = apply_affine_transforms(affine_transform_files,
                                    inverse_booleans, transform_format,
                                    points, vtk_file_stem='')
                    xyz_std_positions = affine_points
                    for ixyz, xyz in enumerate(['x','y','z']):
                        column_names.append('position in standard space:'
                                            ' {0}'.format(xyz))
                        columns.append(xyz_std_positions[:, ixyz].tolist())
            else:
                scalars, name = read_scalars(shape_file)
            if len(scalars):
                columns.append(scalars)
                column_names.append(shape_names[ishape])

    # Prepend with column of indices and write table
    if not output_table:
        output_table = os.path.join(os.getcwd(), 'vertices.csv')

    df = pd.DataFrame(np.transpose(columns), columns = column_names)
    df.to_csv(output_table, index=False)

    if not os.path.exists(output_table):
        raise(IOError(output_table + " not found"))

    return output_table
예제 #13
0
def evaluate_deep_features(features_file, labels_file, sulci_file='', hemi='',
                           excludeIDs=[-1], output_vtk_name='', verbose=True):
    """
    Evaluate deep surface features by computing the minimum distance from each
    label border vertex to all of the feature vertices in the same sulcus,
    and from each feature vertex to all of the label border vertices in the
    same sulcus.  The label borders run along the deepest parts of sulci
    and correspond to fundi in the DKT cortical labeling protocol.

    Parameters
    ----------
    features_file : string
        VTK surface file with feature numbers for vertex scalars
    labels_file : string
        VTK surface file with label numbers for vertex scalars
    sulci_file : string
        VTK surface file with sulcus numbers for vertex scalars
    excludeIDs : list of integers
        feature/sulcus/label IDs to exclude (background set to -1)
    output_vtk_name : Boolean
        if not empty, output a VTK file beginning with output_vtk_name that
        contains a surface with mean distances as scalars
    verbose : Boolean
        print mean distances to standard output?

    Returns
    -------
    feature_to_border_mean_distances : numpy array [number of features x 1]
        mean distance from each feature to sulcus label border
    feature_to_border_sd_distances : numpy array [number of features x 1]
        standard deviations of feature-to-border distances
    feature_to_border_distances_vtk : string
        VTK surface file containing feature-to-border distances
    border_to_feature_mean_distances : numpy array [number of features x 1]
        mean distances from each sulcus label border to feature
    border_to_feature_sd_distances : numpy array [number of features x 1]
        standard deviations of border-to-feature distances
    border_to_feature_distances_vtk : string
        VTK surface file containing border-to-feature distances

    """
    import os
    import sys
    import numpy as np
    from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk
    from mindboggle.guts.mesh import find_neighbors, remove_faces
    from mindboggle.guts.segment import extract_borders
    from mindboggle.guts.compute import source_to_target_distances
    from mindboggle.mio.labels import DKTprotocol

    dkt = DKTprotocol()
    #-------------------------------------------------------------------------
    # Load labels, features, and sulci:
    #-------------------------------------------------------------------------
    faces, lines, indices, points, npoints, labels, scalar_names, \
        input_vtk = read_vtk(labels_file, True, True)
    features, name = read_scalars(features_file, True, True)
    if sulci_file:
        sulci, name = read_scalars(sulci_file, True, True)
        # List of indices to sulcus vertices:
        sulcus_indices = [i for i,x in enumerate(sulci) if x != -1]
        segmentIDs = sulci
        sulcus_faces = remove_faces(faces, sulcus_indices)
    else:
        sulcus_indices = range(len(labels))
        segmentIDs = []
        sulcus_faces = faces

    #-------------------------------------------------------------------------
    # Prepare neighbors, label pairs, border IDs, and outputs:
    #-------------------------------------------------------------------------
    # Calculate neighbor lists for all points:
    print('Find neighbors for all vertices...')
    neighbor_lists = find_neighbors(faces, npoints)

    # Find label border points in any of the sulci:
    print('Find label border points in any of the sulci...')
    border_indices, border_label_tuples, unique_border_label_tuples = \
        extract_borders(sulcus_indices, labels, neighbor_lists,
                        ignore_values=[], return_label_pairs=True)
    if not len(border_indices):
        sys.exit('There are no label border points!')

    # Initialize an array of label border IDs
    # (label border vertices that define sulci in the labeling protocol):
    print('Build an array of label border IDs...')
    label_borders = -1 * np.ones(npoints)

    if hemi == 'lh':
        nsulcus_lists = len(dkt.left_sulcus_label_pair_lists)
    else:
        nsulcus_lists = len(dkt.right_sulcus_label_pair_lists)
    feature_to_border_mean_distances = -1 * np.ones(nsulcus_lists)
    feature_to_border_sd_distances = -1 * np.ones(nsulcus_lists)
    border_to_feature_mean_distances = -1 * np.ones(nsulcus_lists)
    border_to_feature_sd_distances = -1 * np.ones(nsulcus_lists)
    feature_to_border_distances_vtk = ''
    border_to_feature_distances_vtk = ''

    #-------------------------------------------------------------------------
    # Loop through sulci:
    #-------------------------------------------------------------------------
    # For each list of sorted label pairs (corresponding to a sulcus):
    for isulcus, label_pairs in enumerate(dkt.sulcus_label_pair_lists):

        # Keep the border points with label pair labels:
        label_pair_border_indices = [x for i,x in enumerate(border_indices)
                          if np.unique(border_label_tuples[i]).tolist()
                          in label_pairs]

        # Store the points as sulcus IDs in the border IDs array:
        if label_pair_border_indices:
            label_borders[label_pair_border_indices] = isulcus

    if len(np.unique(label_borders)) > 1:

        #---------------------------------------------------------------------
        # Construct a feature-to-border distance matrix and VTK file:
        #---------------------------------------------------------------------
        # Construct a distance matrix:
        print('Construct a feature-to-border distance matrix...')
        sourceIDs = features
        targetIDs = label_borders
        distances, distance_matrix = source_to_target_distances(
            sourceIDs, targetIDs, points, segmentIDs, excludeIDs)

        # Compute mean distances for each feature:
        nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists)
        for ifeature in range(nfeatures):
            feature_distances = [x for x in distance_matrix[:, ifeature]
                                 if x != -1]
            feature_to_border_mean_distances[ifeature] = \
                np.mean(feature_distances)
            feature_to_border_sd_distances[ifeature] = \
                np.std(feature_distances)

        if verbose:
            print('Feature-to-border mean distances:')
            print(feature_to_border_mean_distances)
            print('Feature-to-border standard deviations of distances:')
            print(feature_to_border_sd_distances)

        # Write resulting feature-label border distances to VTK file:
        if output_vtk_name:
            feature_to_border_distances_vtk = os.path.join(os.getcwd(),
                output_vtk_name + '_feature_to_border_mean_distances.vtk')
            print('Write feature-to-border distances to {0}...'.
                  format(feature_to_border_distances_vtk))
            write_vtk(feature_to_border_distances_vtk, points,
                      [], [], sulcus_faces, [distances],
                      ['feature-to-border_distances'], 'float')

        #---------------------------------------------------------------------
        # Construct a border-to-feature distance matrix and VTK file:
        #---------------------------------------------------------------------
        # Construct a distance matrix:
        print('Construct a border-to-feature distance matrix...')
        sourceIDs = label_borders
        targetIDs = features
        distances, distance_matrix = source_to_target_distances(
            sourceIDs, targetIDs, points, segmentIDs, excludeIDs)

        # Compute mean distances for each feature:
        nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists)
        for ifeature in range(nfeatures):
            border_distances = [x for x in distance_matrix[:, ifeature]
                                if x != -1]
            border_to_feature_mean_distances[ifeature] = \
                np.mean(border_distances)
            border_to_feature_sd_distances[ifeature] = \
                np.std(border_distances)

        if verbose:
            print('border-to-feature mean distances:')
            print(border_to_feature_mean_distances)
            print('border-to-feature standard deviations of distances:')
            print(border_to_feature_sd_distances)

        # Write resulting feature-label border distances to VTK file:
        if output_vtk_name:
            border_to_feature_distances_vtk = os.path.join(os.getcwd(),
                output_vtk_name + '_border_to_feature_mean_distances.vtk')
            print('Write border-to-feature distances to {0}...'.
                  format(border_to_feature_distances_vtk))
            write_vtk(border_to_feature_distances_vtk, points,
                      [], [], sulcus_faces, [distances],
                      ['border-to-feature_distances'], 'float')

    #-------------------------------------------------------------------------
    # Return outputs:
    #-------------------------------------------------------------------------
    return feature_to_border_mean_distances, feature_to_border_sd_distances,\
           feature_to_border_distances_vtk,\
           border_to_feature_mean_distances, border_to_feature_sd_distances,\
           border_to_feature_distances_vtk
예제 #14
0
def write_shape_stats(labels_or_file=[],
                      sulci=[],
                      fundi=[],
                      affine_transform_files=[],
                      inverse_booleans=[],
                      transform_format='itk',
                      area_file='',
                      normalize_by_area=False,
                      mean_curvature_file='',
                      travel_depth_file='',
                      geodesic_depth_file='',
                      freesurfer_thickness_file='',
                      freesurfer_curvature_file='',
                      freesurfer_sulc_file='',
                      labels_spectra=[],
                      labels_spectra_IDs=[],
                      sulci_spectra=[],
                      sulci_spectra_IDs=[],
                      labels_zernike=[],
                      labels_zernike_IDs=[],
                      sulci_zernike=[],
                      sulci_zernike_IDs=[],
                      exclude_labels=[-1],
                      verbose=False):
    """
    Make tables of shape statistics per label, sulcus, and/or fundus.

    There can be thousands of vertices in a single feature such as a gyrus,
    sulcus, or fundus, and for per-vertex shape measures, it makes sense to
    characterize their collective shape as a distribution of shape values.
    Mindboggle's stats_per_label function generates tables of summary
    statistical measures for these distributions, and includes the shape
    measures computed on cortical features as well.

    Note ::
        This function is tailored for Mindboggle outputs.

    Parameters
    ----------
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    sulci : list of integers
        indices to sulci, one per vertex, with -1 indicating no sulcus
    fundi : list of integers
        indices to fundi, one per vertex, with -1 indicating no fundus
    affine_transform_files : list of strings
        affine transform files to standard space
    inverse_booleans : list of of zeros and ones
        for each transform, 1 to take the inverse, else 0
    transform_format : string
        format for transform file
        Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format
    area_file :  string
        name of VTK file with surface area scalar values
    normalize_by_area : bool
        normalize all shape measures by area of label/feature? (UNTESTED)
    mean_curvature_file :  string
        name of VTK file with mean curvature scalar values
    travel_depth_file :  string
        name of VTK file with travel depth scalar values
    geodesic_depth_file :  string
        name of VTK file with geodesic depth scalar values
    freesurfer_thickness_file :  string
        name of VTK file with FreeSurfer thickness scalar values
    freesurfer_curvature_file :  string
        name of VTK file with FreeSurfer curvature (curv) scalar values
    freesurfer_sulc_file :  string
        name of VTK file with FreeSurfer convexity (sulc) scalar values
    labels_spectra : list of lists of floats
        Laplace-Beltrami spectra for each labeled region
    labels_spectra_IDs : list of integers
        unique labels for labels_spectra
    sulci_spectra : list of lists of floats
        Laplace-Beltrami spectra for each sulcus
    sulci_spectra_IDs : list of integers
        unique sulcus IDs for sulci_spectra
    labels_zernike : list of lists of floats
        Zernike moments for each labeled region
    labels_zernike_IDs : list of integers
        unique labels for labels_zernike
    sulci_zernike : list of lists of floats
        Zernike moments for each sulcus
    sulci_zernike_IDs : list of integers
        unique sulcus IDs for sulci_zernike
    exclude_labels : list of lists of integers
        indices to be excluded (in addition to -1)
    verbose : bool
        print statements?

    Returns
    -------
    label_table :  string
        output table filename for label shapes
    sulcus_table :  string
        output table filename for sulcus shapes
    fundus_table :  string
        output table filename for fundus shapes

    Examples
    --------
    >>> from mindboggle.mio.tables import write_shape_stats
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> label_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> sulci_file = fetch_data(urls['left_sulci'], '', '.vtk')
    >>> fundi_file = fetch_data(urls['left_fundus_per_sulcus'], '', '.vtk')
    >>> mean_curvature_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')
    >>> travel_depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
    >>> geodesic_depth_file = fetch_data(urls['left_geodesic_depth'], '', '.vtk')
    >>> area_file = fetch_data(urls['left_area'], '', '.vtk')
    >>> freesurfer_thickness_file = ''
    >>> freesurfer_curvature_file = ''
    >>> freesurfer_sulc_file = ''
    >>> sulci, name = read_scalars(sulci_file)
    >>> fundi, name = read_scalars(fundi_file)
    >>> affine_transform_files = []
    >>> inverse_booleans = []
    >>> transform_format = 'itk'
    >>> normalize_by_area = False
    >>> labels, name = read_scalars(label_file)
    >>> labels_spectra = []
    >>> labels_spectra_IDs = []
    >>> sulci_spectra = []
    >>> sulci_spectra_IDs = []
    >>> labels_zernike = []
    >>> labels_zernike_IDs = []
    >>> sulci_zernike = []
    >>> sulci_zernike_IDs = []
    >>> exclude_labels = [-1]
    >>> verbose = False
    >>> label_table, sulcus_table, fundus_table = write_shape_stats(label_file,
    ...     sulci, fundi, affine_transform_files, inverse_booleans,
    ...     transform_format, area_file, normalize_by_area,
    ...     mean_curvature_file, travel_depth_file, geodesic_depth_file,
    ...     freesurfer_thickness_file, freesurfer_curvature_file,
    ...     freesurfer_sulc_file, labels_spectra, labels_spectra_IDs,
    ...     sulci_spectra, sulci_spectra_IDs, labels_zernike,
    ...     labels_zernike_IDs, sulci_zernike, sulci_zernike_IDs,
    ...     exclude_labels, verbose)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.guts.compute import stats_per_label
    from mindboggle.guts.compute import means_per_label
    from mindboggle.guts.compute import sum_per_label
    from mindboggle.mio.vtks import read_scalars, read_vtk
    from mindboggle.mio.vtks import apply_affine_transforms
    from mindboggle.mio.labels import DKTprotocol

    dkt = DKTprotocol()

    # Make sure inputs are lists:
    if isinstance(labels_or_file, np.ndarray):
        labels = [int(x) for x in labels_or_file]
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    elif isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file)
    if isinstance(sulci, np.ndarray):
        sulci = [int(x) for x in sulci]
    if isinstance(fundi, np.ndarray):
        fundi = [int(x) for x in fundi]

    if not labels and not sulci and not fundi:
        raise IOError('No feature data to tabulate in write_shape_stats().')

    spectrum_start = 1  # Store all columns of spectral components (0),
    # or start from higher frequency components (>=1)

    # ------------------------------------------------------------------------
    # Feature lists, shape names, and shape files:
    # ------------------------------------------------------------------------
    # Feature lists:
    feature_lists = [labels, sulci, fundi]
    feature_names = ['label', 'sulcus', 'fundus']
    spectra_lists = [labels_spectra, sulci_spectra]
    spectra_ID_lists = [labels_spectra_IDs, sulci_spectra_IDs]
    zernike_lists = [labels_zernike, sulci_zernike]
    zernike_ID_lists = [labels_zernike_IDs, sulci_zernike_IDs]
    table_names = [
        'label_shapes.csv', 'sulcus_shapes.csv', 'fundus_shapes.csv'
    ]

    # Shape names corresponding to shape files below:
    shape_names = [
        'area', 'travel depth', 'geodesic depth', 'mean curvature',
        'freesurfer curvature', 'freesurfer thickness',
        'freesurfer convexity (sulc)'
    ]

    # Load shape files as a list of numpy arrays of per-vertex shape values:
    shape_files = [
        area_file, travel_depth_file, geodesic_depth_file, mean_curvature_file,
        freesurfer_curvature_file, freesurfer_thickness_file,
        freesurfer_sulc_file
    ]
    shape_arrays = []
    first_pass = True
    area_array = []

    for ishape, shape_file in enumerate(shape_files):
        if os.path.exists(shape_file):
            if first_pass:
                points, indices, lines, faces, scalars_array, scalar_names, \
                    npoints, input_vtk = read_vtk(shape_file, True, True)
                points = np.array(points)
                first_pass = False
                if affine_transform_files and transform_format:
                    affine_points, \
                        foo1 = apply_affine_transforms(affine_transform_files,
                                    inverse_booleans, transform_format,
                                    points, vtk_file_stem='')
            else:
                scalars_array, name = read_scalars(shape_file, True, True)
            if scalars_array.size:
                shape_arrays.append(scalars_array)

                # Store area array:
                if ishape == 0:
                    area_array = scalars_array.copy()

    if normalize_by_area:
        use_area = area_array
    else:
        use_area = []

    # Initialize table file names:
    label_table = ''
    sulcus_table = ''
    fundus_table = ''

    # Loop through features / tables:
    for itable, feature_list in enumerate(feature_lists):
        column_names = []

        # ----------------------------------------------------------------
        # Label names:
        # ----------------------------------------------------------------
        label_title = 'name'
        if itable == 0:
            label_numbers = dkt.cerebrum_cortex_DKT31_numbers
            label_names = dkt.cerebrum_cortex_DKT31_names
        elif itable in [1, 2]:
            label_numbers = dkt.sulcus_numbers
            label_names = dkt.sulcus_names
        else:
            label_numbers = []
            label_names = []
        include_labels = label_numbers
        nlabels = len(label_numbers)

        # --------------------------------------------------------------------
        # For each feature, construct a table of average shape values:
        # --------------------------------------------------------------------
        if feature_list:
            feature_name = feature_names[itable]
            columns = []

            # ----------------------------------------------------------------
            # Loop through shape measures:
            # ----------------------------------------------------------------
            column_names.extend(column_names[:])
            for ishape, shape_array in enumerate(shape_arrays):
                shape = shape_names[ishape]
                if verbose:
                    print('  Compute statistics on {0} {1}...'.format(
                        feature_name, shape))
                # ------------------------------------------------------------
                # Append feature areas to columns:
                # ------------------------------------------------------------
                if ishape == 0 and np.size(area_array):
                    sums, label_list = sum_per_label(shape_array, feature_list,
                                                     include_labels,
                                                     exclude_labels)
                    column_names.append(shape)
                    columns.append(sums)
                # ------------------------------------------------------------
                # Append feature shape statistics to columns:
                # ------------------------------------------------------------
                else:
                    medians, mads, means, sdevs, skews, kurts, \
                    lower_quarts, upper_quarts, \
                    label_list = stats_per_label(shape_array, feature_list,
                                        include_labels, exclude_labels,
                                        area_array, precision=1)

                    column_names.append(shape + ': median')
                    column_names.append(shape + ': MAD')
                    column_names.append(shape + ': mean')
                    column_names.append(shape + ': SD')
                    column_names.append(shape + ': skew')
                    column_names.append(shape + ': kurtosis')
                    column_names.append(shape + ': 25%')
                    column_names.append(shape + ': 75%')
                    columns.append(medians)
                    columns.append(mads)
                    columns.append(means)
                    columns.append(sdevs)
                    columns.append(skews)
                    columns.append(kurts)
                    columns.append(lower_quarts)
                    columns.append(upper_quarts)

            # ----------------------------------------------------------------
            # Mean positions in the original space:
            # ----------------------------------------------------------------
            # Compute mean position per feature:
            positions, sdevs, label_list, foo = means_per_label(
                points, feature_list, include_labels, exclude_labels, use_area)

            # Append mean x,y,z position per feature to columns:
            xyz_positions = np.asarray(positions)
            for ixyz, xyz in enumerate(['x', 'y', 'z']):
                column_names.append('mean position: {0}'.format(xyz))
                columns.append(xyz_positions[:, ixyz].tolist())

            # ----------------------------------------------------------------
            # Mean positions in standard space:
            # ----------------------------------------------------------------
            if affine_transform_files and transform_format:
                # Compute standard space mean position per feature:
                standard_positions, sdevs, label_list, \
                foo = means_per_label(affine_points,
                    feature_list, include_labels, exclude_labels, use_area)

                # Append standard space x,y,z position per feature to columns:
                xyz_std_positions = np.asarray(standard_positions)
                for ixyz, xyz in enumerate(['x', 'y', 'z']):
                    column_names.append('mean position in standard space:'
                                        ' {0}'.format(xyz))
                    columns.append(xyz_std_positions[:, ixyz].tolist())

            # ----------------------------------------------------------------
            # Laplace-Beltrami spectra:
            # ----------------------------------------------------------------
            if itable in [0, 1]:
                spectra = spectra_lists[itable]
                if spectra:
                    spectra_IDs = spectra_ID_lists[itable]

                    # Construct a matrix of spectra:
                    len_spectrum = len(spectra[0])
                    spectrum_matrix = np.zeros((nlabels, len_spectrum))
                    for ilabel, label in enumerate(include_labels):
                        if label in spectra_IDs:
                            spectrum = spectra[spectra_IDs.index(label)]
                            spectrum_matrix[ilabel, 0:len_spectrum] = spectrum

                    # Append spectral shape name and values to columns:
                    for ispec in range(spectrum_start, len_spectrum):
                        columns.append(spectrum_matrix[:, ispec].tolist())
                        column_names.append('Laplace-Beltrami spectrum:'
                                            ' component {0}'.format(ispec + 1))

            # ----------------------------------------------------------------
            # Zernike moments:
            # ----------------------------------------------------------------
            if itable in [0, 1]:
                zernike = zernike_lists[itable]
                if zernike:
                    zernike_IDs = zernike_ID_lists[itable]

                    # Construct a matrix of Zernike moments:
                    len_moments = len(zernike[0])
                    moments_matrix = np.zeros((nlabels, len_moments))
                    for ilabel, label in enumerate(include_labels):
                        if label in zernike_IDs:
                            moments = zernike[zernike_IDs.index(label)]
                            moments_matrix[ilabel, 0:len_moments] = moments

                    # Append Zernike shape name and values to columns:
                    for imoment in range(0, len_moments):
                        columns.append(moments_matrix[:, imoment].tolist())
                        column_names.append(
                            'Zernike moments: component {0}'.format(imoment +
                                                                    1))

            # ----------------------------------------------------------------
            # Write labels/IDs and values to table:
            # ----------------------------------------------------------------
            # Write labels/IDs to table:
            output_table = os.path.join(os.getcwd(), table_names[itable])

            if columns:
                df1 = pd.DataFrame({'ID': label_numbers})
                df2 = pd.DataFrame(np.transpose(columns), columns=column_names)
                df = pd.concat([df1, df2], axis=1)
                if label_names:
                    df0 = pd.DataFrame({'name': label_names})
                    df = pd.concat([df0, df], axis=1)
                df.to_csv(output_table, index=False, encoding='utf-8')

            if not os.path.exists(output_table):
                raise IOError(output_table + " not found")

            # ----------------------------------------------------------------
            # Return correct table file name:
            # ----------------------------------------------------------------
            if itable == 0:
                label_table = output_table
            elif itable == 1:
                sulcus_table = output_table
            elif itable == 2:
                fundus_table = output_table

    return label_table, sulcus_table, fundus_table
예제 #15
0
def spectrum_per_label(vtk_file,
                       spectrum_size=10,
                       exclude_labels=[-1],
                       normalization='areaindex',
                       area_file='',
                       largest_segment=True,
                       verbose=False):
    """
    Compute Laplace-Beltrami spectrum per labeled region in a file.
    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file containing index scalars (labels)
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues
        if None, no normalization is used
        if "area", use area of the 2D structure as in Reuter et al. 2006
        if "index", divide eigenvalue by index to account for linear trend
        if "areaindex", do both (default)
    area_file :  string (optional)
        name of VTK file with surface area scalar values
    largest_segment :  bool
        compute spectrum only for largest segment with a given label?
    verbose : bool
        print statements?
    Returns
    -------
    spectrum_lists : list of lists
        first eigenvalues for each label's Laplace-Beltrami spectrum
    label_list : list of integers
        list of unique labels for which spectra are obtained
    Examples
    --------
    >>> # Uncomment "if label==22:" below to run example:
    >>> # Spectrum for Twins-2-1 left postcentral (22) pial surface:
    >>> import numpy as np
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> area_file = fetch_data(urls['left_area'], '', '.vtk')
    >>> spectrum_size = 6
    >>> exclude_labels = [0]  #[-1]
    >>> largest_segment = True
    >>> verbose = False
    >>> spectrum_lists, label_list = spectrum_per_label(vtk_file,
    ...     spectrum_size, exclude_labels, None, area_file, largest_segment,
    ...     verbose)
    >>> [np.float("{0:.{1}f}".format(x, 5)) for x in spectrum_lists[0]]
    [0.0, 0.00054, 0.00244, 0.00291, 0.00456, 0.00575]
    >>> label_list[0:10]
    [1029, 1005, 1011, 1021, 1008, 1025, 999, 1013, 1007, 1022]
    """
    from mindboggle.mio.vtks import read_vtk, read_scalars
    from mindboggle.guts.mesh import keep_faces, reindex_faces_points
    from mindboggle.shapes.laplace_beltrami import fem_laplacian,\
        spectrum_of_largest

    # Read VTK surface mesh file:
    points, indices, lines, faces, labels, scalar_names, npoints, \
        input_vtk = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    # Loop through labeled regions:
    ulabels = []
    [
        ulabels.append(int(x)) for x in labels if x not in ulabels
        if x not in exclude_labels
    ]
    label_list = []
    spectrum_lists = []
    for label in ulabels:
        #if label == 22:
        #  print("DEBUG: COMPUTE FOR ONLY ONE LABEL")

        # Determine the indices per label:
        Ilabel = [i for i, x in enumerate(labels) if x == label]
        if verbose:
            print('{0} vertices for label {1}'.format(len(Ilabel), label))

        # Remove background faces:
        pick_faces = keep_faces(faces, Ilabel)
        pick_faces, pick_points, o1 = reindex_faces_points(pick_faces, points)

        # Compute Laplace-Beltrami spectrum for the label:
        if largest_segment:
            exclude_labels_inner = [-1]
            spectrum = spectrum_of_largest(pick_points, pick_faces,
                                           spectrum_size, exclude_labels_inner,
                                           normalization, areas, verbose)
        else:
            spectrum = fem_laplacian(pick_points, pick_faces, spectrum_size,
                                     normalization, verbose)

        # Append to a list of lists of spectra:
        spectrum_lists.append(spectrum)
        label_list.append(label)

    return spectrum_lists, label_list
예제 #16
0
def realign_boundaries_to_fundus_lines(surf_file,
                                       init_label_file,
                                       fundus_lines_file,
                                       thickness_file,
                                       out_label_file=None):
    """
    Fix label boundaries to fundus lines.

    Parameters
    ----------
    surf_file : file containing the surface geometry in vtk format
    init_label_file : file containing scalars that represent the
                      initial guess at labels
    fundus_lines_file : file containing scalars representing fundus lines.
    thickness_file: file containing cortical thickness scalar data
    (for masking out the medial wall only)
    out_label_file : if specified, the realigned labels will be writen to
                     this file

    Returns
    -------
    numpy array representing the realigned label for each surface vertex.
    """

    import numpy as np
    from mindboggle.guts.segment import extract_borders
    import mindboggle.guts.graph as go
    from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk
    from mindboggle.guts.mesh import find_neighbors
    import propagate_fundus_lines

    ## read files
    points, indices, lines, faces, scalars, scalar_names, num_points, \
        input_vtk = read_vtk(surf_file, return_first=True, return_array=True)
    indices = range(num_points)

    init_labels, _ = read_scalars(init_label_file,
                                  return_first=True,
                                  return_array=True)

    fundus_lines, _ = read_scalars(fundus_lines_file,
                                   return_first=True,
                                   return_array=True)

    thickness, _ = read_scalars(thickness_file,
                                return_first=True,
                                return_array=True)

    # remove labels from vertices with zero thickness (get around
    # DKT40 annotations having the label '3' for all the Corpus
    # Callosum vertices).
    cc_inds = [x for x in indices if thickness[x] < 0.001]
    init_labels[cc_inds] = 0

    ## setup seeds from initial label boundaries
    neighbor_lists = find_neighbors(faces, num_points)

    # extract all vertices that are on a boundary between labels
    boundary_indices, label_pairs, _ = extract_borders(indices,
                                                       init_labels,
                                                       neighbor_lists,
                                                       return_label_pairs=True)

    # split boundary vertices into segments with common boundary pairs.
    boundary_segments = {}
    for boundary_index, label_pair in zip(boundary_indices, label_pairs):
        key = ((label_pair[0],
                label_pair[1]) if label_pair[0] < label_pair[1] else
               (label_pair[1], label_pair[0]))
        if key not in boundary_segments:
            boundary_segments[key] = []

        boundary_segments[key].append(boundary_index)

    boundary_matrix, boundary_matrix_keys = _build_boundary_matrix(
        boundary_segments, num_points)

    # build the affinity matrix
    affinity_matrix = go.weight_graph(np.array(points),
                                      indices,
                                      np.array(faces),
                                      sigma=10,
                                      add_to_graph=False)

    ## propagate boundaries to fundus line vertices
    learned_matrix = _propagate_labels(affinity_matrix, boundary_matrix,
                                       boundary_indices, 100, 1)

    # assign labels to fundus line vertices based on highest probability
    new_boundaries = -1 * np.ones(init_labels.shape)
    fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5]

    # tile the surface into connected components delimited by fundus lines
    closed_fundus_lines, _, _ = propagate_fundus_lines.propagate_fundus_lines(
        points, faces, fundus_line_indices, thickness)

    closed_fundus_line_indices = np.where(closed_fundus_lines > 0)[0]

    # split surface into connected components
    connected_component_faces = _remove_boundary_faces(
        points, faces, closed_fundus_line_indices)

    # label components based on most probable label assignment
    new_labels = _label_components(connected_component_faces, num_points,
                                   boundary_indices, learned_matrix,
                                   boundary_matrix_keys)

    # propagate new labels to fill holes
    label_matrix, label_map = _build_label_matrix(new_labels)
    new_learned_matrix = _propagate_labels(
        affinity_matrix, label_matrix,
        [i for i in range(num_points) if new_labels[i] >= 0], 100, 1)

    # assign most probable labels
    for idx in [i for i in range(num_points) if new_labels[i] == -1]:
        max_idx = np.argmax(new_learned_matrix[idx])
        new_labels[idx] = label_map[max_idx]

    # save
    if out_label_file is not None:
        write_vtk(out_label_file,
                  points,
                  faces=faces,
                  scalars=[int(x) for x in new_labels],
                  scalar_type='int')

    return new_labels
예제 #17
0
def zernike_moments_per_label(vtk_file, order=10, exclude_labels=[-1],
                              scale_input=True,
                              decimate_fraction=0, decimate_smooth=25):
    """
    Compute the Zernike moments per labeled region in a file.

    Optionally decimate the input mesh.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file containing index scalars (labels)
    order : integer
        number of moments to compute
    exclude_labels : list of integers
        labels to be excluded
    scale_input : Boolean
        translate and scale each object so it is bounded by a unit sphere?
        (this is the expected input to zernike_moments())
    decimate_fraction : float
        fraction of mesh faces to remove for decimation (1 for no decimation)
    decimate_smooth : integer
        number of smoothing steps for decimation

    Returns
    -------
    descriptors_lists : list of lists of floats
        Zernike descriptors per label
    label_list : list of integers
        list of unique labels for which moments are computed

    Examples
    --------
    >>> # Uncomment "if label==22:" below to run example:
    >>> # Twins-2-1 left postcentral (22) pial surface:
    >>> import os
    >>> from mindboggle.shapes.zernike.zernike import zernike_moments_per_label
    >>> path = os.path.join(os.environ['HOME'], 'mindboggled', 'OASIS-TRT-20-1')
    >>> vtk_file = os.path.join(path, 'labels', 'left_surface', 'relabeled_classifier.vtk')
    >>> order = 3
    >>> exclude_labels = [-1, 0]
    >>> scale_input = True
    >>> zernike_moments_per_label(vtk_file, order, exclude_labels, scale_input)
    ([[0.00528486237819844,
       0.009571754617699853,
       0.0033489494903015944,
       0.00875603468268444,
       0.0015879536633349918,
       0.0008080165707033097]],
     [22])


    ([[0.0018758013185778298,
       0.001757973693050823,
       0.002352403177686726,
       0.0032281044369938286,
       0.002215900343702539,
       0.0019646380916703856]],
     [14.0])
    Arthur Mikhno's result:
    1.0e+07 *
    0.0000
    0.0179
    0.0008
    4.2547
    0.0534
    4.4043



    """
    import numpy as np
    from mindboggle.mio.vtks import read_vtk
    from mindboggle.guts.mesh import remove_faces
    from mindboggle.shapes.zernike.zernike import zernike_moments

    min_points_faces = 4

    #-------------------------------------------------------------------------
    # Read VTK surface mesh file:
    #-------------------------------------------------------------------------
    faces, u1,u2, points, u3, labels, u4,u5 = read_vtk(vtk_file)

    #-------------------------------------------------------------------------
    # Loop through labeled regions:
    #-------------------------------------------------------------------------
    ulabels = [x for x in np.unique(labels) if x not in exclude_labels]
    label_list = []
    descriptors_lists = []
    for label in ulabels:
      #if label == 1022:  # 22:
      #    print("DEBUG: COMPUTE FOR ONLY ONE LABEL")

        #---------------------------------------------------------------------
        # Determine the indices per label:
        #---------------------------------------------------------------------
        Ilabel = [i for i,x in enumerate(labels) if x == label]
        print('  {0} vertices for label {1}'.format(len(Ilabel), label))
        if len(Ilabel) > min_points_faces:

            #-----------------------------------------------------------------
            # Remove background faces:
            #-----------------------------------------------------------------
            pick_faces = remove_faces(faces, Ilabel)
            if len(pick_faces) > min_points_faces:

                #-------------------------------------------------------------
                # Compute Zernike moments for the label:
                #-------------------------------------------------------------
                descriptors = zernike_moments(points, pick_faces,
                                              order, scale_input,
                                              decimate_fraction,
                                              decimate_smooth)

                #-------------------------------------------------------------
                # Append to a list of lists of spectra:
                #-------------------------------------------------------------
                descriptors_lists.append(descriptors)
                label_list.append(label)

    return descriptors_lists, label_list
예제 #18
0
def extract_fundi(folds,
                  curv_file,
                  depth_file,
                  min_separation=10,
                  erode_ratio=0.1,
                  erode_min_size=1,
                  save_file=False,
                  verbose=False):
    """
    Extract fundi from folds.

    A fundus is a branching curve that runs along the deepest and most
    highly curved portions of a fold.

    Steps ::
        1. Find fundus endpoints (outer anchors) with find_outer_anchors().
        2. Include inner anchor points.
        3. Connect anchor points using connect_points_erosion();
           inner anchors are removed if they result in endpoints.

    Parameters
    ----------
    folds : numpy array or list of integers
        fold number for each vertex
    curv_file :  string
        surface mesh file in VTK format with mean curvature values
    depth_file :  string
        surface mesh file in VTK format with rescaled depth values
    likelihoods : list of integers
        fundus likelihood value for each vertex
    min_separation : integer
        minimum number of edges between inner/outer anchor points
    erode_ratio : float
        fraction of indices to test for removal at each iteration
        in connect_points_erosion()
    save_file : bool
        save output VTK file?
    verbose : bool
        print statements?

    Returns
    -------
    fundus_per_fold : list of integers
        fundus numbers for all vertices, labeled by fold
        (-1 for non-fundus vertices)
    n_fundi_in_folds :  integer
        number of fundi
    fundus_per_fold_file : string (if save_file)
        output VTK file with fundus numbers (-1 for non-fundus vertices)

    Examples
    --------
    >>> # Extract fundus from one or more folds:
    >>> single_fold = True
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.features.fundi import extract_fundi
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> curv_file = fetch_data(urls['left_mean_curvature'])
    >>> depth_file = fetch_data(urls['left_travel_depth'])
    >>> folds_file = fetch_data(urls['left_folds'])
    >>> folds, name = read_scalars(folds_file, True, True)
    >>> if single_fold:
    ...     fold_number = 2 #11
    ...     folds[folds != fold_number] = -1
    >>> min_separation = 10
    >>> erode_ratio = 0.10
    >>> erode_min_size = 10
    >>> save_file = True
    >>> verbose = False
    >>> o1, o2, fundus_per_fold_file = extract_fundi(folds, curv_file,
    ...     depth_file, min_separation, erode_ratio, erode_min_size,
    ...     save_file, verbose)
    >>> if single_fold:
    ...     lens = [len([x for x in o1 if x == 2])]
    ... else:
    ...     lens = [len([x for x in o1 if x == y]) for y in range(o2)]
    >>> lens[0:10]
    [115]

    View result (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces
    >>> plot_surfaces(fundus_per_fold_file) # doctest: +SKIP

    """

    # Extract a skeleton to connect endpoints in a fold:
    import os
    import numpy as np
    from time import time

    from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.guts.compute import median_abs_dev
    from mindboggle.guts.paths import find_max_values
    from mindboggle.guts.mesh import find_neighbors_from_file
    from mindboggle.guts.mesh import find_complete_faces
    from mindboggle.guts.paths import find_outer_anchors
    from mindboggle.guts.paths import connect_points_erosion

    if isinstance(folds, list):
        folds = np.array(folds)

    # Load values, inner anchor threshold, and neighbors:
    points, indices, lines, faces, curvs, scalar_names, npoints, \
        input_vtk = read_vtk(curv_file, True, True)
    depths, name = read_scalars(depth_file, True, True)
    values = curvs * depths
    values0 = [x for x in values if x > 0]
    thr = np.median(values0) + 2 * median_abs_dev(values0)
    neighbor_lists = find_neighbors_from_file(curv_file)

    #-------------------------------------------------------------------------
    # Loop through folds:
    #-------------------------------------------------------------------------
    t1 = time()
    skeletons = []
    unique_fold_IDs = [x for x in np.unique(folds) if x != -1]

    if verbose:
        if len(unique_fold_IDs) == 1:
            print("Extract a fundus from 1 fold...")
        else:
            print("Extract a fundus from each of {0} folds...".format(
                len(unique_fold_IDs)))

    for fold_ID in unique_fold_IDs:
        indices_fold = [i for i, x in enumerate(folds) if x == fold_ID]
        if indices_fold:
            if verbose:
                print('  Fold {0}:'.format(int(fold_ID)))

            #-----------------------------------------------------------------
            # Find outer anchor points on the boundary of the surface region,
            # to serve as fundus endpoints:
            #-----------------------------------------------------------------
            verbose = False
            outer_anchors, tracks = find_outer_anchors(indices_fold,
                                                       neighbor_lists, values,
                                                       depths, min_separation,
                                                       verbose)

            #-----------------------------------------------------------------
            # Find inner anchor points:
            #-----------------------------------------------------------------
            inner_anchors = find_max_values(points, values, min_separation,
                                            thr)

            #-----------------------------------------------------------------
            # Connect anchor points to create skeleton:
            #-----------------------------------------------------------------
            B = -1 * np.ones(npoints)
            B[indices_fold] = 1
            skeleton = connect_points_erosion(B,
                                              neighbor_lists,
                                              outer_anchors,
                                              inner_anchors,
                                              values,
                                              erode_ratio,
                                              erode_min_size,
                                              save_steps=[],
                                              save_vtk='',
                                              verbose=False)
            if skeleton:
                skeletons.extend(skeleton)

            #-----------------------------------------------------------------
            # Remove fundus vertices if they complete triangle faces:
            #-----------------------------------------------------------------
            Iremove = find_complete_faces(skeletons, faces)
            if Iremove:
                skeletons = list(frozenset(skeletons).difference(Iremove))

    indices_skel = [x for x in skeletons if folds[x] != -1]
    fundus_per_fold = -1 * np.ones(npoints)
    fundus_per_fold[indices_skel] = folds[indices_skel]
    n_fundi_in_folds = len([x for x in np.unique(fundus_per_fold) if x != -1])
    if n_fundi_in_folds == 1:
        sdum = 'fold fundus'
    else:
        sdum = 'fold fundi'
    if verbose:
        print('  ...Extracted {0} {1}; {2} total ({3:.2f} seconds)'.format(
            n_fundi_in_folds, sdum, n_fundi_in_folds,
            time() - t1))

    #-------------------------------------------------------------------------
    # Return fundi, number of fundi, and file name:
    #-------------------------------------------------------------------------
    if n_fundi_in_folds > 0:
        fundus_per_fold = [int(x) for x in fundus_per_fold]
        if save_file:
            fundus_per_fold_file = os.path.join(os.getcwd(),
                                                'fundus_per_fold.vtk')
            rewrite_scalars(curv_file, fundus_per_fold_file, fundus_per_fold,
                            'fundi', folds)
            if not os.path.exists(fundus_per_fold_file):
                raise IOError(fundus_per_fold_file + " not found")
        else:
            fundus_per_fold_file = None

    return fundus_per_fold, n_fundi_in_folds, fundus_per_fold_file
예제 #19
0
def spectrum_from_file(vtk_file,
                       spectrum_size=10,
                       exclude_labels=[-1],
                       normalization=None,
                       area_file='',
                       verbose=False):
    """
    Compute Laplace-Beltrami spectrum of a 3D shape in a VTK file.

    Parameters
    ----------
    vtk_file : string
        the input vtk file
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    area_file :  string
        name of VTK file with surface area scalar values
    verbose : bool
        print statements?

    Returns
    -------
    spectrum : list of floats
        first spectrum_size of Laplace-Beltrami spectrum

    Examples
    --------
    >>> # Spectrum for entire left hemisphere of Twins-2-1:
    >>> import numpy as np
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> spectrum = spectrum_from_file(vtk_file, spectrum_size=6)
    >>> print(np.array_str(np.array(spectrum[1::]),
    ...                    precision=5, suppress_small=True))
    [ 0.00013  0.00027  0.00032  0.00047  0.00058]

    """
    from mindboggle.mio.vtks import read_vtk, read_scalars
    from mindboggle.shapes.laplace_beltrami import spectrum_of_largest

    points, indices, lines, faces, scalars, scalar_names, npoints, \
            input_vtk = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    spectrum = spectrum_of_largest(points, faces, spectrum_size,
                                   exclude_labels, normalization, areas,
                                   verbose)

    return spectrum
예제 #20
0
def plot_mask_surface(vtk_file, mask_file='', nonmask_value=-1,
                      masked_output='', remove_nonmask=False,
                      program='vtkviewer',
                      use_colormap=False, colormap_file=''):
    """
    Use vtkviewer or mayavi2 to visualize VTK surface mesh data.

    If a mask_file is provided, a temporary masked file is saved,
    and it is this file that is viewed.

    If using vtkviewer, can optionally provide colormap file
    or set $COLORMAP environment variable.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file
    mask_file : string
        name of VTK surface mesh file to mask vtk_file vertices
    nonmask_value : integer
        nonmask (usually background) value
    masked_output : string
        temporary masked output file name
    remove_nonmask : Boolean
        remove vertices that are not in mask? (otherwise assign nonmask_value)
    program : string {'vtkviewer', 'mayavi2'}
        program to visualize VTK file
    use_colormap : Boolean
        use Paraview-style XML colormap file set by $COLORMAP env variable?
    colormap_file : string
        use colormap in given file if use_colormap==True?  if empty and
        use_colormap==True, use file set by $COLORMAP environment variable

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.plots import plot_mask_surface
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk')
    >>> mask_file = os.path.join(path, 'test_one_label.vtk')
    >>> nonmask_value = 0 #-1
    >>> masked_output = ''
    >>> remove_nonmask = True
    >>> program = 'vtkviewer'
    >>> use_colormap = True
    >>> colormap_file = '' #'/software/surface_cpp_tools/colormap.xml'
    >>> plot_mask_surface(vtk_file, mask_file, nonmask_value, masked_output, remove_nonmask, program, use_colormap, colormap_file)

    """
    import os
    import numpy as np

    from mindboggle.guts.mesh import remove_faces, reindex_faces_points
    from mindboggle.guts.utilities import execute
    from mindboggle.mio.plots import plot_surfaces
    from mindboggle.mio.vtks import read_scalars, rewrite_scalars, \
                                        read_vtk, write_vtk

    #-------------------------------------------------------------------------
    # Filter mesh with non-background values from a second (same-size) mesh:
    #-------------------------------------------------------------------------
    if mask_file:
        mask, name = read_scalars(mask_file, True, True)
        if not masked_output:
            masked_output = os.path.join(os.getcwd(), 'temp.vtk')
        file_to_plot = masked_output

        #---------------------------------------------------------------------
        # Remove nonmask-valued vertices:
        #---------------------------------------------------------------------
        if remove_nonmask:
            #-----------------------------------------------------------------
            # Load VTK files:
            #-----------------------------------------------------------------
            points, indices, lines, faces, scalars, scalar_names, npoints, \
                input_vtk = read_vtk(vtk_file, True, True)
            #-----------------------------------------------------------------
            # Find mask indices, remove nonmask faces, and reindex:
            #-----------------------------------------------------------------
            Imask = [i for i,x in enumerate(mask) if x != nonmask_value]
            mask_faces = remove_faces(faces, Imask)
            mask_faces, points, \
            original_indices = reindex_faces_points(mask_faces, points)
            #-----------------------------------------------------------------
            # Write VTK file with scalar values:
            #-----------------------------------------------------------------
            if np.ndim(scalars) == 1:
                scalar_type = type(scalars[0]).__name__
            elif np.ndim(scalars) == 2:
                scalar_type = type(scalars[0][0]).__name__
            else:
                print("Undefined scalar type!")
            write_vtk(file_to_plot, points, [], [], mask_faces,
                      scalars[original_indices].tolist(), scalar_names,
                      scalar_type=scalar_type)
        else:
            scalars, name = read_scalars(vtk_file, True, True)
            scalars[mask == nonmask_value] = nonmask_value
            rewrite_scalars(vtk_file, file_to_plot, scalars)
    else:
        file_to_plot = vtk_file

    #-------------------------------------------------------------------------
    # Display with vtkviewer.py:
    #-------------------------------------------------------------------------
    if program == 'vtkviewer':
        plot_surfaces(file_to_plot, use_colormap=use_colormap,
                      colormap_file=colormap_file)
    #-------------------------------------------------------------------------
    # Display with mayavi2:
    #-------------------------------------------------------------------------
    elif program == 'mayavi2':
        cmd = ["mayavi2", "-d", file_to_plot, "-m", "Surface", "&"]
        execute(cmd, 'os')
예제 #21
0
def relabel_surface(vtk_file,
                    hemi='',
                    old_labels=[],
                    new_labels=[],
                    erase_remaining=True,
                    erase_labels=[],
                    erase_value=-1,
                    output_file=''):
    """
    Relabel surface in a VTK file.

    Parameters
    ----------
    vtk_file : string
         input labeled VTK file
    hemi : string
        hemisphere ('lh' or 'rh' or '')
        if set, add 1000 to left and 2000 to right hemisphere labels;
    old_labels : list of integers
        old labels (empty list if labels drawn from vtk scalars);
        may be used in conjunction with hemi
    new_labels : list of integers
        new labels (empty list if labels drawn from vtk scalars);
        may be used in conjunction with hemi
    erase_remaining : Boolean
        set all values not in old_labels to erase_value?
    erase_labels : list of integers
        values to erase (set to erase_value)
    erase_value : integer
        set vertices with labels in erase_labels to this value
    output_file : string
        new vtk file name

    Returns
    -------
    output_file : string
        new vtk file name

    Examples
    --------
    >>> import os
    >>> from mindboggle.guts.relabel import relabel_surface
    >>> from mindboggle.mio.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> hemi = 'lh'
    >>> old_labels = [1003,1009,1030]
    >>> new_labels = [3,9,30]
    >>> erase_remaining = False
    >>> erase_labels = [0]
    >>> erase_value = -1
    >>> output_file = ''
    >>> #
    >>> relabel_surface(vtk_file, hemi, old_labels, new_labels, erase_remaining, erase_labels, erase_value, output_file)
    >>> # View
    >>> plot_surfaces('relabeled_FreeSurfer_cortex_labels.vtk')
    >>> #plot_surfaces('relabeled_rh.labels.DKT31.manual.vtk')

    """
    import os
    import numpy as np
    from mindboggle.mio.vtks import read_vtk, write_vtk

    # Load labeled vtk surfaces:
    faces, lines, indices, points, npoints, scalars, \
        name, input_vtk = read_vtk(vtk_file, return_first=True, return_array=True)
    new_scalars = scalars[:]

    # Raise an error if inputs set incorrectly:
    if (new_labels and not old_labels) or \
       (hemi and hemi not in ['lh','rh']) or \
       (new_labels and len(old_labels) != len(new_labels)) or \
       (erase_remaining and not old_labels):
        raise IOError("Please check inputs for relabel_surface().")

    # Loop through unique labels in scalars:
    ulabels = np.unique(scalars)
    for label in ulabels:
        I = np.where(scalars == label)[0]

        # If label in erase_labels list, replace with erase_value:
        if label in erase_labels:
            new_scalars[I] = erase_value

        # If label in old_labels list, replace with corresponding new label,
        # and if hemi set, add 1000 or 2000 to the new label:
        elif label in old_labels and (len(old_labels) == len(new_labels)):
            new_label = new_labels[old_labels.index(label)]
            if hemi == 'lh':
                new_scalars[I] = 1000 + new_label
            elif hemi == 'rh':
                new_scalars[I] = 2000 + new_label
            else:
                new_scalars[I] = new_label

        # If labels not set then optionally add hemi value:
        elif hemi and not new_labels:
            if hemi == 'lh':
                new_scalars[I] = 1000 + label
            elif hemi == 'rh':
                new_scalars[I] = 2000 + label

        # If label unaccounted for and erase_remaining, set to erase_value:
        elif erase_remaining:
            new_scalars[I] = erase_value

    # Ensure that the new scalars are integer values:
    new_scalars = [int(x) for x in new_scalars]

    # Write output VTK file:
    if not output_file:
        output_file = os.path.join(os.getcwd(),
                                   'relabeled_' + os.path.basename(vtk_file))
    write_vtk(output_file,
              points,
              indices,
              lines,
              faces, [new_scalars], ['Labels'],
              scalar_type='int')
    if not os.path.exists(output_file):
        s = "relabel_surface() did not create " + output_file + "."
        raise (IOError(s))

    return output_file
예제 #22
0
def extract_folds(depth_file, min_vertices=10000, min_fold_size=50, 
                  do_fill_holes=False, min_hole_depth=0.001, 
                  save_file=False):
    """
    Use depth to extract folds from a triangular surface mesh.

    Steps ::
        1. Compute histogram of depth measures.
        2. Define a depth threshold and find the deepest vertices.
        3. Segment deep vertices as an initial set of folds.
        4. Remove small folds.
        5. Find and fill holes in the folds (optional).
        6. Renumber folds.

    Step 2 ::
        To extract an initial set of deep vertices from the surface mesh,
        we anticipate that there will be a rapidly decreasing distribution
        of low depth values (on the outer surface) with a long tail
        of higher depth values (in the folds), so we smooth the histogram's
        bin values, convolve to compute slopes, and find the depth value
        for the first bin with slope = 0. This is our threshold.

    Step 5 ::
        The folds could have holes in areas shallower than the depth threshold.
        Calling fill_holes() could accidentally include very shallow areas
        (in an annulus-shaped fold, for example), so we include the argument
        exclude_range to check for any values from zero to min_hole_depth;
        holes are not filled if they contains values within this range.

    Parameters
    ----------
    depth_file : string
        surface mesh file in VTK format with faces and depth scalar values
    min_fold_size : integer
        minimum fold size (number of vertices)
    do_fill_holes : Boolean
        fill holes in the folds?
    min_hole_depth : float
        largest non-zero depth value that will stop a hole from being filled
    save_file : Boolean
        save output VTK file?

    Returns
    -------
    folds : list of integers
        fold numbers for all vertices (-1 for non-fold vertices)
    n_folds :  int
        number of folds
    depth_threshold :  float
        threshold defining the minimum depth for vertices to be in a fold
    bins :  list of integers
        histogram bins: each is the number of vertices within a range of depth values
    bin_edges :  list of floats
        histogram bin edge values defining the bin ranges of depth values
    folds_file : string (if save_file)
        name of output VTK file with fold IDs (-1 for non-fold vertices)

    Examples
    --------
    >>> import os
    >>> import numpy as np
    >>> import pylab
    >>> from scipy.ndimage.filters import gaussian_filter1d
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.guts.mesh import find_neighbors_from_file
    >>> from mindboggle.mio.plots import plot_surfaces
    >>> from mindboggle.features.folds import extract_folds
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> depth_file = 'travel_depth.vtk' #os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> neighbor_lists = find_neighbors_from_file(depth_file)
    >>> min_vertices = 10000
    >>> min_fold_size = 50
    >>> do_fill_holes = False #True
    >>> min_hole_depth = 0.001
    >>> save_file = True
    >>> #
    >>> folds, n_folds, thr, bins, bin_edges, folds_file = extract_folds(depth_file,
    >>>     min_vertices, min_fold_size, do_fill_holes, min_hole_depth, save_file)
    >>> #
    >>> # View folds:
    >>> plot_surfaces('folds.vtk')
    >>> # Plot histogram and depth threshold:
    >>> depths, name = read_scalars(depth_file)
    >>> nbins = np.round(len(depths) / 100.0)
    >>> a,b,c = pylab.hist(depths, bins=nbins)
    >>> pylab.plot(thr*np.ones((100,1)), np.linspace(0, max(bins), 100), 'r.')
    >>> pylab.show()
    >>> # Plot smoothed histogram:
    >>> bins_smooth = gaussian_filter1d(bins.tolist(), 5)
    >>> pylab.plot(range(len(bins)), bins, '.', range(len(bins)), bins_smooth,'-')
    >>> pylab.show()

    """
    import os
    import sys
    import numpy as np
    from time import time
    from scipy.ndimage.filters import gaussian_filter1d
    from mindboggle.mio.vtks import rewrite_scalars, read_vtk
    from mindboggle.guts.mesh import find_neighbors
    from mindboggle.guts.morph import fill_holes
    from mindboggle.guts.segment import segment

    print("Extract folds in surface mesh")
    t0 = time()

    #-------------------------------------------------------------------------
    # Load depth values for all vertices
    #-------------------------------------------------------------------------
    faces, lines, indices, points, npoints, depths, name, input_vtk = read_vtk(depth_file,
        return_first=True, return_array=True)

    #-------------------------------------------------------------------------
    # Find neighbors for each vertex
    #-------------------------------------------------------------------------
    neighbor_lists = find_neighbors(faces, npoints)

    #-------------------------------------------------------------------------
    # Compute histogram of depth measures
    #-------------------------------------------------------------------------
    if npoints > min_vertices:
        nbins = np.round(npoints / 100.0)
    else:
        sys.err("  Expecting at least {0} vertices to create depth histogram".
            format(min_vertices))
    bins, bin_edges = np.histogram(depths, bins=nbins)

    #-------------------------------------------------------------------------
    # Anticipating that there will be a rapidly decreasing distribution
    # of low depth values (on the outer surface) with a long tail of higher
    # depth values (in the folds), smooth the bin values (Gaussian), convolve
    # to compute slopes, and find the depth for the first bin with slope = 0.
    #-------------------------------------------------------------------------
    bins_smooth = gaussian_filter1d(bins.tolist(), 5)
    window = [-1, 0, 1]
    bin_slopes = np.convolve(bins_smooth, window, mode='same') / (len(window) - 1)
    ibins0 = np.where(bin_slopes == 0)[0]
    if ibins0.shape:
        depth_threshold = bin_edges[ibins0[0]]
    else:
        depth_threshold = np.median(depths)

    #-------------------------------------------------------------------------
    # Find the deepest vertices
    #-------------------------------------------------------------------------
    indices_deep = [i for i,x in enumerate(depths) if x >= depth_threshold]
    if indices_deep:

        #---------------------------------------------------------------------
        # Segment deep vertices as an initial set of folds
        #---------------------------------------------------------------------
        print("  Segment vertices deeper than {0:.2f} as folds".format(depth_threshold))
        t1 = time()
        folds = segment(indices_deep, neighbor_lists)
        # Slightly slower alternative -- fill boundaries:
        #regions = -1 * np.ones(len(points))
        #regions[indices_deep] = 1
        #folds = segment_by_filling_borders(regions, neighbor_lists)
        print('  ...Segmented folds ({0:.2f} seconds)'.format(time() - t1))

        #---------------------------------------------------------------------
        # Remove small folds
        #---------------------------------------------------------------------
        if min_fold_size > 1:
            print('  Remove folds smaller than {0}'.format(min_fold_size))
            unique_folds = [x for x in np.unique(folds) if x != -1]
            for nfold in unique_folds:
                indices_fold = [i for i,x in enumerate(folds) if x == nfold]
                if len(indices_fold) < min_fold_size:
                    folds[indices_fold] = -1

        #---------------------------------------------------------------------
        # Find and fill holes in the folds
        # Note: Surfaces surrounded by folds can be mistaken for holes,
        #       so exclude_range includes outer surface values close to zero.
        #---------------------------------------------------------------------
        if do_fill_holes:
            print("  Find and fill holes in the folds")
            folds = fill_holes(folds, neighbor_lists, values=depths,
                               exclude_range=[0, min_hole_depth])

        #---------------------------------------------------------------------
        # Renumber folds so they are sequential
        #---------------------------------------------------------------------
        renumber_folds = -1 * np.ones(len(folds))
        fold_numbers = [int(x) for x in np.unique(folds) if x != -1]
        for i_fold, n_fold in enumerate(fold_numbers):
            fold = [i for i,x in enumerate(folds) if x == n_fold]
            renumber_folds[fold] = i_fold
        folds = renumber_folds
        n_folds = i_fold + 1

        # Print statement
        print('  ...Extracted {0} folds ({1:.2f} seconds)'.
              format(n_folds, time() - t0))
    else:
        print('  No deep vertices')

    folds = [int(x) for x in folds]

    #-------------------------------------------------------------------------
    # Return folds, number of folds, file name
    #-------------------------------------------------------------------------
    if save_file:

        folds_file = os.path.join(os.getcwd(), 'folds.vtk')
        rewrite_scalars(depth_file, folds_file, folds, 'folds', folds)

        if not os.path.exists(folds_file):
            raise(IOError(folds_file + " not found"))

    else:
        folds_file = None

    return folds, n_folds, depth_threshold, bins, bin_edges, folds_file
예제 #23
0
파일: zernike.py 프로젝트: liob/mindboggle
def zernike_moments_per_label(vtk_file,
                              order=10,
                              exclude_labels=[-1],
                              scale_input=True,
                              decimate_fraction=0,
                              decimate_smooth=25,
                              verbose=False):
    """
    Compute the Zernike moments per labeled region in a file.

    Optionally decimate the input mesh.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file containing index scalars (labels)
    order : integer
        number of moments to compute
    exclude_labels : list of integers
        labels to be excluded
    scale_input : bool
        translate and scale each object so it is bounded by a unit sphere?
        (this is the expected input to zernike_moments())
    decimate_fraction : float
        fraction of mesh faces to remove for decimation (1 for no decimation)
    decimate_smooth : integer
        number of smoothing steps for decimation
    verbose : bool
        print statements?

    Returns
    -------
    descriptors_lists : list of lists of floats
        Zernike descriptors per label
    label_list : list of integers
        list of unique labels for which moments are computed

    Examples
    --------
    >>> # Zernike moments per label of a FreeSurfer-labeled left cortex.
    >>> # Uncomment "if label==22:" below to run example
    >>> # for left postcentral (22) pial surface:
    >>> import numpy as np
    >>> from mindboggle.shapes.zernike.zernike import zernike_moments_per_label
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> order = 3
    >>> exclude_labels = [-1]
    >>> scale_input = True
    >>> verbose = False
    >>> descriptors_lists, label_list = zernike_moments_per_label(vtk_file,
    ...     order, exclude_labels, scale_input, verbose)
    >>> label_list[0:10]
    [999, 1001, 1002, 1003, 1005, 1006, 1007, 1008, 1009, 1010]
    >>> print(np.array_str(np.array(descriptors_lists[0]),
    ...                    precision=5, suppress_small=True))
    [ 0.00587  0.01143  0.0031   0.00881  0.00107  0.00041]
    >>> print(np.array_str(np.array(descriptors_lists[1]),
    ...                    precision=5, suppress_small=True))
    [ 0.00004  0.00009  0.00003  0.00009  0.00002  0.00001]
    >>> print(np.array_str(np.array(descriptors_lists[2]),
    ...                    precision=5, suppress_small=True))
    [ 0.00144  0.00232  0.00128  0.00304  0.00084  0.00051]
    >>> print(np.array_str(np.array(descriptors_lists[3]),
    ...                    precision=5, suppress_small=True))
    [ 0.00393  0.006    0.00371  0.00852  0.00251  0.00153]
    >>> print(np.array_str(np.array(descriptors_lists[4]),
    ...                    precision=5, suppress_small=True))
    [ 0.00043  0.0003   0.00095  0.00051  0.00115  0.00116]

    """
    import numpy as np
    from mindboggle.mio.vtks import read_vtk
    from mindboggle.guts.mesh import keep_faces
    from mindboggle.shapes.zernike.zernike import zernike_moments

    min_points_faces = 4

    # ------------------------------------------------------------------------
    # Read VTK surface mesh file:
    # ------------------------------------------------------------------------
    points, indices, lines, faces, labels, scalar_names, npoints, \
            input_vtk = read_vtk(vtk_file)

    # ------------------------------------------------------------------------
    # Loop through labeled regions:
    # ------------------------------------------------------------------------
    ulabels = [x for x in np.unique(labels) if x not in exclude_labels]
    label_list = []
    descriptors_lists = []
    for label in ulabels:
        #if label == 1022:  # 22:
        #    print("DEBUG: COMPUTE FOR ONLY ONE LABEL")

        # --------------------------------------------------------------------
        # Determine the indices per label:
        # --------------------------------------------------------------------
        Ilabel = [i for i, x in enumerate(labels) if x == label]
        if verbose:
            print('  {0} vertices for label {1}'.format(len(Ilabel), label))

        if len(Ilabel) > min_points_faces:

            # ----------------------------------------------------------------
            # Remove background faces:
            # ----------------------------------------------------------------
            pick_faces = keep_faces(faces, Ilabel)
            if len(pick_faces) > min_points_faces:

                # ------------------------------------------------------------
                # Compute Zernike moments for the label:
                # ------------------------------------------------------------
                descriptors = zernike_moments(points, pick_faces, order,
                                              scale_input, decimate_fraction,
                                              decimate_smooth, verbose)

                # ------------------------------------------------------------
                # Append to a list of lists of spectra:
                # ------------------------------------------------------------
                descriptors_lists.append(descriptors)
                label_list.append(label)

    return descriptors_lists, label_list
예제 #24
0
def extract_sulci(labels_file, folds_or_file, hemi, min_boundary=1,
                  sulcus_names=[]):
    """
    Identify sulci from folds in a brain surface according to a labeling
    protocol that includes a list of label pairs defining each sulcus.

    A fold is a group of connected, deep vertices.

    Steps for each fold ::

        1. Remove fold if it has fewer than two labels.
        2. Remove fold if its labels do not contain a sulcus label pair.
        3. Find vertices with labels that are in only one of the fold's
           label boundary pairs. Assign the vertices the sulcus with the label
           pair if they are connected to the label boundary for that pair.
        4. If there are remaining vertices, segment into sets of vertices
           connected to label boundaries, and assign a unique ID to each set.

    Parameters
    ----------
    labels_file : string
        file name for surface mesh VTK containing labels for all vertices
    folds_or_file : list or string
        fold number for each vertex / name of VTK file containing fold scalars
    hemi : string
        hemisphere abbreviation in {'lh', 'rh'} for sulcus labels
    min_boundary : integer
        minimum number of vertices for a sulcus label boundary segment
    sulcus_names : list of strings
        names of sulci

    Returns
    -------
    sulci : list of integers
        sulcus numbers for all vertices (-1 for non-sulcus vertices)
    n_sulci : integers
        number of sulci
    sulci_file : string
        output VTK file with sulcus numbers (-1 for non-sulcus vertices)

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars
    >>> from mindboggle.features.sulci import extract_sulci
    >>> from mindboggle.mio.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> # Load labels, folds, neighbor lists, and sulcus names and label pairs
    >>> labels_file = os.path.join(path, 'arno', 'labels', 'relabeled_lh.DKTatlas40.gcs.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds_or_file, name = read_scalars(folds_file)
    >>> hemi = 'lh'
    >>> min_boundary = 10
    >>> sulcus_names = []
    >>> #
    >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file, hemi, min_boundary, sulcus_names)
    >>> # View:
    >>> plot_surfaces('sulci.vtk')

    """
    import os
    from time import time
    import numpy as np

    from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.guts.mesh import find_neighbors
    from mindboggle.guts.segment import extract_borders, propagate, segment
    from mindboggle.mio.labels import DKTprotocol


    # Load fold numbers if folds_or_file is a string:
    if isinstance(folds_or_file, str):
        folds, name = read_scalars(folds_or_file)
    elif isinstance(folds_or_file, list):
        folds = folds_or_file

    dkt = DKTprotocol()

    if hemi == 'lh':
        pair_lists = dkt.left_sulcus_label_pair_lists
    elif hemi == 'rh':
        pair_lists = dkt.right_sulcus_label_pair_lists
    else:
        print("Warning: hemisphere not properly specified ('lh' or 'rh').")

    # Load points, faces, and neighbors:
    faces, o1, o2, points, npoints, labels, o3, o4 = read_vtk(labels_file)
    neighbor_lists = find_neighbors(faces, npoints)

    # Array of sulcus IDs for fold vertices, initialized as -1.
    # Since we do not touch gyral vertices and vertices whose labels
    # are not in the label list, or vertices having only one label,
    # their sulcus IDs will remain -1:
    sulci = -1 * np.ones(npoints)

    #-------------------------------------------------------------------------
    # Loop through folds
    #-------------------------------------------------------------------------
    fold_numbers = [int(x) for x in np.unique(folds) if x != -1]
    n_folds = len(fold_numbers)
    print("Extract sulci from {0} folds...".format(n_folds))
    t0 = time()
    for n_fold in fold_numbers:
        fold = [i for i,x in enumerate(folds) if x == n_fold]
        len_fold = len(fold)

        # List the labels in this fold:
        fold_labels = [labels[x] for x in fold]
        unique_fold_labels = [int(x) for x in np.unique(fold_labels)
                              if x != -1]

        #---------------------------------------------------------------------
        # NO MATCH -- fold has fewer than two labels
        #---------------------------------------------------------------------
        if len(unique_fold_labels) < 2:
            # Ignore: sulci already initialized with -1 values:
            if not unique_fold_labels:
                print("  Fold {0} ({1} vertices): "
                      "NO MATCH -- fold has no labels".
                      format(n_fold, len_fold))
            else:
                print("  Fold {0} ({1} vertices): "
                  "NO MATCH -- fold has only one label ({2})".
                  format(n_fold, len_fold, unique_fold_labels[0]))
            # Ignore: sulci already initialized with -1 values

        else:
            # Find all label boundary pairs within the fold:
            indices_fold_pairs, fold_pairs, unique_fold_pairs = \
                extract_borders(fold, labels, neighbor_lists,
                                ignore_values=[], return_label_pairs=True)

            # Find fold label pairs in the protocol (pairs are already sorted):
            fold_pairs_in_protocol = [x for x in unique_fold_pairs
                                      if x in dkt.unique_sulcus_label_pairs]

            if unique_fold_labels:
                print("  Fold {0} labels: {1} ({2} vertices)".format(n_fold,
                      ', '.join([str(x) for x in unique_fold_labels]),
                      len_fold))
            #-----------------------------------------------------------------
            # NO MATCH -- fold has no sulcus label pair
            #-----------------------------------------------------------------
            if not fold_pairs_in_protocol:
                print("  Fold {0}: NO MATCH -- fold has no sulcus label pair".
                      format(n_fold, len_fold))

            #-----------------------------------------------------------------
            # Possible matches
            #-----------------------------------------------------------------
            else:
                print("  Fold {0} label pairs in protocol: {1}".format(n_fold,
                      ', '.join([str(x) for x in fold_pairs_in_protocol])))

                # Labels in the protocol (includes repeats across label pairs):
                labels_in_pairs = [x for lst in fold_pairs_in_protocol
                                   for x in lst]

                # Labels that appear in one or more sulcus label boundary:
                unique_labels = []
                nonunique_labels = []
                for label in np.unique(labels_in_pairs):
                    if len([x for x in labels_in_pairs if x == label]) == 1:
                        unique_labels.append(label)
                    else:
                        nonunique_labels.append(label)

                #-------------------------------------------------------------
                # Vertices whose labels are in only one sulcus label pair
                #-------------------------------------------------------------
                # Find vertices with a label that is in only one of the fold's
                # label pairs (the other label in the pair can exist in other
                # pairs). Assign the vertices the sulcus with the label pair
                # if they are connected to the label boundary for that pair.
                #-------------------------------------------------------------
                if unique_labels:

                    for pair in fold_pairs_in_protocol:

                        # If one or both labels in label pair is/are unique:
                        unique_labels_in_pair = [x for x in pair
                                                 if x in unique_labels]
                        n_unique = len(unique_labels_in_pair)
                        if n_unique:

                            ID = None
                            for i, pair_list in enumerate(pair_lists):
                                if not isinstance(pair_list, list):
                                    pair_list = [pair_list]
                                if pair in pair_list:
                                    ID = i
                                    break
                            if ID:
                                # Seeds from label boundary vertices
                                # (fold_pairs and pair already sorted):
                                indices_pair = [x for i,x
                                    in enumerate(indices_fold_pairs)
                                    if fold_pairs[i] == pair]

                                # Vertices with unique label(s) in pair:
                                indices_unique_labels = [fold[i]
                                     for i,x in enumerate(fold_labels)
                                     if x in dkt.unique_sulcus_label_pairs]

                                # Propagate from seeds to labels in label pair:
                                sulci2 = segment(indices_unique_labels,
                                                 neighbor_lists,
                                                 min_region_size=1,
                                                 seed_lists=[indices_pair],
                                                 keep_seeding=False,
                                                 spread_within_labels=True,
                                                 labels=labels)
                                sulci[sulci2 != -1] = ID

                                # Print statement:
                                if n_unique == 1:
                                    ps1 = '1 label'
                                else:
                                    ps1 = 'Both labels'
                                if len(sulcus_names):
                                    ps2 = sulcus_names[ID]
                                else:
                                    ps2 = ''
                                print("    {0} unique to one fold pair: "
                                      "{1} {2}".
                                      format(ps1, ps2, unique_labels_in_pair))

                #-------------------------------------------------------------
                # Vertex labels shared by multiple label pairs
                #-------------------------------------------------------------
                # Propagate labels from label borders to vertices with labels
                # that are shared by multiple label pairs in the fold.
                #-------------------------------------------------------------
                if len(nonunique_labels):
                    # For each label shared by different label pairs:
                    for label in nonunique_labels:
                        # Print statement:
                        print("    Propagate sulcus borders with label {0}".
                              format(int(label)))

                        # Construct seeds from label boundary vertices:
                        seeds = -1 * np.ones(len(points))

                        for ID, pair_list in enumerate(pair_lists):
                            if not isinstance(pair_list, list):
                                pair_list = [pair_list]
                            label_pairs = [x for x in pair_list if label in x]
                            for label_pair in label_pairs:
                                indices_pair = [x for i,x
                                    in enumerate(indices_fold_pairs)
                                    if np.sort(fold_pairs[i]).
                                    tolist() == label_pair]
                                if indices_pair:

                                    # Do not include short boundary segments:
                                    if min_boundary > 1:
                                        indices_pair2 = []
                                        seeds2 = segment(indices_pair,
                                                         neighbor_lists)
                                        useeds2 = [x for x in
                                                   np.unique(seeds2)
                                                   if x != -1]
                                        for seed2 in useeds2:
                                            iseed2 = [i for i,x
                                                      in enumerate(seeds2)
                                                      if x == seed2]
                                            if len(iseed2) >= min_boundary:
                                                indices_pair2.extend(iseed2)
                                            else:
                                                if len(iseed2) == 1:
                                                    print("    Remove "
                                                          "assignment "
                                                          "of ID {0} from "
                                                          "1 vertex".
                                                          format(seed2))
                                                else:
                                                    print("    Remove "
                                                          "assignment "
                                                          "of ID {0} from "
                                                          "{1} vertices".
                                                          format(seed2,
                                                                 len(iseed2)))
                                        indices_pair = indices_pair2

                                    # Assign sulcus IDs to seeds:
                                    seeds[indices_pair] = ID

                        # Identify vertices with the label:
                        label_array = -1 * np.ones(len(points))
                        indices_label = [fold[i] for i,x
                                         in enumerate(fold_labels)
                                         if x == label]
                        if len(indices_label):
                            label_array[indices_label] = 1

                            # Propagate from seeds to vertices with label:
                            #indices_seeds = []
                            #for seed in range(int(max(seeds))+1):
                            #    indices_seeds.append([i for i,x
                            #                          in enumerate(seeds)
                            #                          if x == seed])
                            #sulci2 = segment(indices_label, neighbor_lists,
                            #                 50, indices_seeds, False, True,
                            #                 labels)
                            sulci2 = propagate(points, faces,
                                               label_array, seeds, sulci,
                                               max_iters=10000,
                                               tol=0.001, sigma=5)
                            sulci[sulci2 != -1] = sulci2[sulci2 != -1]

    #-------------------------------------------------------------------------
    # Print out assigned sulci
    #-------------------------------------------------------------------------
    sulcus_numbers = [int(x) for x in np.unique(sulci) if x != -1]
                      # if not np.isnan(x)]
    n_sulci = len(sulcus_numbers)
    print("Extracted {0} sulci from {1} folds ({2:.1f}s):".
          format(n_sulci, n_folds, time()-t0))
    if sulcus_names:
        for sulcus_number in sulcus_numbers:
            print("  {0}: {1}".format(sulcus_number,
                                      sulcus_names[sulcus_number]))
    elif sulcus_numbers:
        print("  " + ", ".join([str(x) for x in sulcus_numbers]))

    #-------------------------------------------------------------------------
    # Print out unresolved sulci
    #-------------------------------------------------------------------------
    unresolved = [i for i in range(len(pair_lists))
                  if i not in sulcus_numbers]
    if len(unresolved) == 1:
        print("The following sulcus is unaccounted for:")
    else:
        print("The following {0} sulci are unaccounted for:".
              format(len(unresolved)))
    if sulcus_names:
        for sulcus_number in unresolved:
            print("  {0}: {1}".format(sulcus_number,
                                      sulcus_names[sulcus_number]))
    else:
        print("  " + ", ".join([str(x) for x in unresolved]))

    #-------------------------------------------------------------------------
    # Return sulci, number of sulci, and file name
    #-------------------------------------------------------------------------
    sulci = [int(x) for x in sulci]
    sulci_file = os.path.join(os.getcwd(), 'sulci.vtk')
    rewrite_scalars(labels_file, sulci_file, sulci, 'sulci', sulci)

    if not os.path.exists(sulci_file):
        raise(IOError(sulci_file + " not found"))

    return sulci, n_sulci, sulci_file
예제 #25
0
def find_depth_threshold(depth_file, min_vertices=10000, verbose=False):
    """
    Find depth threshold to extract folds from a triangular surface mesh.

    Steps ::
        1. Compute histogram of depth measures.
        2. Define a depth threshold and find the deepest vertices.
           To extract an initial set of deep vertices from the surface mesh,
           we anticipate that there will be a rapidly decreasing distribution
           of low depth values (on the outer surface) with a long tail
           of higher depth values (in the folds), so we smooth the histogram's
           bin values, convolve to compute slopes, and find the depth value
           for the first bin with slope = 0. This is our threshold.

    Parameters
    ----------
    depth_file : string
        surface mesh file in VTK format with faces and depth scalar values
    min_vertices : integer
        minimum number of vertices
    verbose : bool
        print statements?

    Returns
    -------
    depth_threshold :  float
        threshold defining the minimum depth for vertices to be in a fold
    bins :  list of integers
        histogram bins: each is the number of vertices within a range of depth values
    bin_edges :  list of floats
        histogram bin edge values defining the bin ranges of depth values

    Examples
    --------
    >>> import numpy as np
    >>> from mindboggle.features.folds import find_depth_threshold
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
    >>> min_vertices = 10000
    >>> verbose = False
    >>> depth_threshold, bins, bin_edges = find_depth_threshold(depth_file,
    ...     min_vertices, verbose)
    >>> np.float("{0:.{1}f}".format(depth_threshold, 5))
    2.36089

    View threshold histogram plots (skip test):

    >>> def vis():
    ...     import numpy as np
    ...     import pylab
    ...     from scipy.ndimage.filters import gaussian_filter1d
    ...     from mindboggle.mio.vtks import read_scalars
    ...     # Plot histogram and depth threshold:
    ...     depths, name = read_scalars(depth_file)
    ...     nbins = np.round(len(depths) / 100.0)
    ...     a,b,c = pylab.hist(depths, bins=nbins)
    ...     pylab.plot(depth_threshold * np.ones((100,1)),
    ...                np.linspace(0, max(bins), 100), 'r.')
    ...     pylab.title('Histogram of depth values with threshold')
    ...     pylab.xlabel('Depth')
    ...     pylab.ylabel('Number of vertices')
    ...     pylab.show()
    ...     # Plot smoothed histogram:
    ...     bins_smooth = gaussian_filter1d(bins.tolist(), 5)
    ...     pylab.plot(list(range(len(bins))), bins, '.',
    ...                list(range(len(bins))), bins_smooth,'-')
    ...     pylab.title('Smoothed histogram of depth values')
    ...     pylab.show()
    >>> vis() # doctest: +SKIP

    """
    import numpy as np
    from scipy.ndimage.filters import gaussian_filter1d

    from mindboggle.mio.vtks import read_vtk

    # ------------------------------------------------------------------------
    # Load depth values for all vertices:
    # ------------------------------------------------------------------------
    points, indices, lines, faces, depths, scalar_names, npoints, \
        input_vtk = read_vtk(depth_file, return_first=True, return_array=True)

    # ------------------------------------------------------------------------
    # Compute histogram of depth measures:
    # ------------------------------------------------------------------------
    if npoints > min_vertices:
        nbins = np.int(np.round(npoints / 100.0))
    else:
        raise IOError("  Expecting at least {0} vertices to create "
                      "depth histogram".format(min_vertices))
    bins, bin_edges = np.histogram(depths, bins=nbins)

    # ------------------------------------------------------------------------
    # Anticipating that there will be a rapidly decreasing distribution
    # of low depth values (on the outer surface) with a long tail of higher
    # depth values (in the folds), smooth the bin values (Gaussian), convolve
    # to compute slopes, and find the depth for the first bin with slope = 0.
    # ------------------------------------------------------------------------
    bins_smooth = gaussian_filter1d(bins.tolist(), 5)
    window = [-1, 0, 1]
    bin_slopes = np.convolve(bins_smooth, window, mode='same') / \
                 (len(window) - 1)
    ibins0 = np.where(bin_slopes == 0)[0]
    if ibins0.shape:
        depth_threshold = bin_edges[ibins0[0]]
    else:
        depth_threshold = np.median(depths)

    # Print statement:
    if verbose:
        print('  Depth threshold: {0}'.format(depth_threshold))

    return depth_threshold, bins, bin_edges
예제 #26
0
def plot_mask_surface(vtk_file,
                      mask_file='',
                      nonmask_value=-1,
                      masked_output='',
                      remove_nonmask=False,
                      program='vtkviewer',
                      use_colormap=False,
                      colormap_file='',
                      background_value=-1):
    """
    Use vtkviewer or mayavi2 to visualize VTK surface mesh data.

    If a mask_file is provided, a temporary masked file is saved,
    and it is this file that is viewed.

    If using vtkviewer, optionally provide colormap file
    or set $COLORMAP environment variable.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file
    mask_file : string
        name of VTK surface mesh file to mask vtk_file vertices
    nonmask_value : integer
        nonmask (usually background) value
    masked_output : string
        temporary masked output file name
    remove_nonmask : bool
        remove vertices that are not in mask? (otherwise assign nonmask_value)
    program : string {'vtkviewer', 'mayavi2'}
        program to visualize VTK file
    use_colormap : bool
        use Paraview-style XML colormap file set by $COLORMAP env variable?
    colormap_file : string
        use colormap in given file if use_colormap==True?  if empty and
        use_colormap==True, use file set by $COLORMAP environment variable
    background_value : integer or float
        background value

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.plots import plot_mask_surface
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> vtk_file = fetch_data(urls['freesurfer_labels'], '', '.vtk')
    >>> os.rename(vtk_file, vtk_file + '.nii.gz')
    >>> vtk_file = vtk_file + '.nii.gz'
    >>> mask_file = ''
    >>> nonmask_value = 0 #-1
    >>> masked_output = ''
    >>> remove_nonmask = True
    >>> program = 'vtkviewer'
    >>> use_colormap = True
    >>> colormap_file = ''
    >>> background_value = -1
    >>> plot_mask_surface(vtk_file, mask_file, nonmask_value, masked_output,
    ...     remove_nonmask, program, use_colormap, colormap_file,
    ...     background_value) # doctest: +SKIP

    """
    import os
    import numpy as np

    from mindboggle.guts.mesh import keep_faces, reindex_faces_points
    from mindboggle.guts.utilities import execute
    from mindboggle.mio.plots import plot_surfaces
    from mindboggle.mio.vtks import read_scalars, rewrite_scalars, \
                                        read_vtk, write_vtk

    # ------------------------------------------------------------------------
    # Filter mesh with non-background values from a second (same-size) mesh:
    # ------------------------------------------------------------------------
    if mask_file:
        mask, name = read_scalars(mask_file, True, True)
        if not masked_output:
            masked_output = os.path.join(os.getcwd(), 'temp.vtk')
        file_to_plot = masked_output

        # --------------------------------------------------------------------
        # Remove nonmask-valued vertices:
        # --------------------------------------------------------------------
        if remove_nonmask:
            # ----------------------------------------------------------------
            # Load VTK files:
            # ----------------------------------------------------------------
            points, indices, lines, faces, scalars, scalar_names, npoints, \
                input_vtk = read_vtk(vtk_file, True, True)
            # ----------------------------------------------------------------
            # Find mask indices, remove nonmask faces, and reindex:
            # ----------------------------------------------------------------
            Imask = [i for i, x in enumerate(mask) if x != nonmask_value]
            mask_faces = keep_faces(faces, Imask)
            mask_faces, points, \
            original_indices = reindex_faces_points(mask_faces, points)
            # ----------------------------------------------------------------
            # Write VTK file with scalar values:
            # ----------------------------------------------------------------
            if np.ndim(scalars) == 1:
                scalar_type = type(scalars[0]).__name__
            elif np.ndim(scalars) == 2:
                scalar_type = type(scalars[0][0]).__name__
            else:
                print("Undefined scalar type!")
            write_vtk(file_to_plot,
                      points, [], [],
                      mask_faces,
                      scalars[original_indices].tolist(),
                      scalar_names,
                      scalar_type=scalar_type)
        else:
            scalars, name = read_scalars(vtk_file, True, True)
            scalars[mask == nonmask_value] = nonmask_value
            rewrite_scalars(vtk_file, file_to_plot, scalars, ['scalars'], [],
                            background_value)
    else:
        file_to_plot = vtk_file

    # ------------------------------------------------------------------------
    # Display with vtkviewer.py:
    # ------------------------------------------------------------------------
    if program == 'vtkviewer':
        plot_surfaces(file_to_plot,
                      use_colormap=use_colormap,
                      colormap_file=colormap_file)
    # ------------------------------------------------------------------------
    # Display with mayavi2:
    # ------------------------------------------------------------------------
    elif program == 'mayavi2':
        cmd = ["mayavi2", "-d", file_to_plot, "-m", "Surface", "&"]
        execute(cmd, 'os')
예제 #27
0
def write_face_vertex_averages(input_file, output_table='', area_file=''):
    """
    Make table of average vertex values per face
    (divided by face area if area_file provided).

    Parameters
    ----------
    input_file : string
        name of VTK file with scalars to average
    area_file :  string
        name of VTK file with surface area scalar values
    output_table :  string
        output table filename

    Returns
    -------
    output_table :  string
        output table filename

    Examples
    --------
    >>> from mindboggle.mio.tables import write_face_vertex_averages
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> input_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
    >>> area_file = fetch_data(urls['left_area'], '', '.vtk')
    >>> output_table = ''
    >>> output_table = write_face_vertex_averages(input_file, output_table,
    ...                                           area_file)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.mio.vtks import read_vtk, read_scalars

    points, indices, lines, faces, scalars, scalar_names, \
        npoints, input_vtk = read_vtk(input_file, True, True)
    if area_file:
        area_scalars, name = read_scalars(area_file, True, True)

    # --------------------------------------------------------------------
    # For each face, average vertex values:
    # --------------------------------------------------------------------
    columns = []
    for face in faces:
        values = []
        for index in face:
            if area_file:
                values.append(scalars[index] / area_scalars[index])
            else:
                values.append(scalars[index])
        columns.append(np.mean(values))

    # ----------------------------------------------------------------
    # Write to table:
    # ----------------------------------------------------------------
    if not output_table:
        output_table = os.path.join(os.getcwd(), 'average_face_values.csv')

    df = pd.DataFrame({'': columns})
    df.to_csv(output_table, index=False, encoding='utf-8')

    if not os.path.exists(output_table):
        raise IOError(output_table + " not found")

    return output_table
예제 #28
0
파일: sulci.py 프로젝트: tsball/mindboggle
def extract_sulci(labels_file,
                  folds_or_file,
                  hemi,
                  min_boundary=1,
                  sulcus_names=[],
                  save_file=False,
                  output_file='',
                  background_value=-1,
                  verbose=False):
    """
    Identify sulci from folds in a brain surface according to a labeling
    protocol that includes a list of label pairs defining each sulcus.

    Since folds are defined as deep, connected areas of a surface, and since
    folds may be connected to each other in ways that differ across brains,
    there usually does not exist a one-to-one mapping between folds of one
    brain and those of another.  To address the correspondence problem then,
    we need to find just those portions of the folds that correspond across
    brains. To accomplish this, Mindboggle segments folds into sulci, which
    do have a one-to-one correspondence across non-pathological brains.
    Mindboggle defines a sulcus as a folded portion of cortex whose opposing
    banks are labeled with one or more sulcus label pairs in the DKT labeling
    protocol, where each label pair is unique to one sulcus and represents
    a boundary between two adjacent gyri, and each vertex has one gyrus label.

    This function assigns vertices in a fold to a sulcus in one of two cases.
    In the first case, vertices whose labels are in only one label pair in
    the fold are assigned to the label pair’s sulcus if they are connected
    through similarly labeled vertices to the boundary between the two labels.
    In the second case, the segment_regions function propagates labels from
    label borders to vertices whose labels are in multiple label pairs in the
    fold.

    Steps for each fold ::

        1. Remove fold if it has fewer than two labels.
        2. Remove fold if its labels do not contain a sulcus label pair.
        3. Find vertices with labels that are in only one of the fold's
           label boundary pairs. Assign the vertices the sulcus with the label
           pair if they are connected to the label boundary for that pair.
        4. If there are remaining vertices, segment into sets of vertices
           connected to label boundaries, and assign a unique ID to each set.

    Parameters
    ----------
    labels_file : string
        file name for surface mesh VTK containing labels for all vertices
    folds_or_file : numpy array, list or string
        fold number for each vertex / name of VTK file containing fold scalars
    hemi : string
        hemisphere abbreviation in {'lh', 'rh'} for sulcus labels
    min_boundary : integer
        minimum number of vertices for a sulcus label boundary segment
    sulcus_names : list of strings
        names of sulci
    save_file : bool
        save output VTK file?
    output_file : string
        name of output file in VTK format
    background_value : integer or float
        background value
    verbose : bool
        print statements?

    Returns
    -------
    sulci : list of integers
        sulcus numbers for all vertices (-1 for non-sulcus vertices)
    n_sulci : integers
        number of sulci
    sulci_file : string
        output VTK file with sulcus numbers (-1 for non-sulcus vertices)

    Examples
    --------
    >>> # Example 1: Extract sulcus from a fold with one sulcus label pair:
    >>> import numpy as np
    >>> from mindboggle.features.sulci import extract_sulci
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> # Load labels, folds, neighbor lists, and sulcus names and label pairs
    >>> labels_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
    >>> folds_or_file, name = read_scalars(folds_file, True, True)
    >>> save_file = True
    >>> output_file = 'extract_sulci_fold4_1sulcus.vtk'
    >>> background_value = -1
    >>> # Limit number of folds to speed up the test:
    >>> limit_folds = True
    >>> if limit_folds:
    ...     fold_numbers = [4] #[4, 6]
    ...     i0 = [i for i,x in enumerate(folds_or_file) if x not in fold_numbers]
    ...     folds_or_file[i0] = background_value
    >>> hemi = 'lh'
    >>> min_boundary = 10
    >>> sulcus_names = []
    >>> verbose = False
    >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file,
    ...     hemi, min_boundary, sulcus_names, save_file, output_file,
    ...     background_value, verbose)
    >>> n_sulci  # 23 # (if not limit_folds)
    1
    >>> lens = [len([x for x in sulci if x==y])
    ...         for y in np.unique(sulci) if y != -1]
    >>> lens[0:10]  # [6358, 3288, 7612, 5205, 4414, 6251, 3493, 2566, 4436, 739] # (if not limit_folds)
    [1151]

    View result without background (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
    >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
    >>> output = 'extract_sulci_fold4_1sulcus_no_background.vtk'
    >>> rewrite_scalars(sulci_file, output, sulci,
    ...                 'sulci', sulci) # doctest: +SKIP
    >>> plot_surfaces(output) # doctest: +SKIP

    Example 2:  Extract sulcus from a fold with multiple sulcus label pairs:

    >>> folds_or_file, name = read_scalars(folds_file, True, True)
    >>> output_file = 'extract_sulci_fold7_2sulci.vtk'
    >>> # Limit number of folds to speed up the test:
    >>> limit_folds = True
    >>> if limit_folds:
    ...     fold_numbers = [7] #[4, 6]
    ...     i0 = [i for i,x in enumerate(folds_or_file) if x not in fold_numbers]
    ...     folds_or_file[i0] = background_value
    >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file,
    ...     hemi, min_boundary, sulcus_names, save_file, output_file,
    ...     background_value, verbose)
    >>> n_sulci  # 23 # (if not limit_folds)
    2
    >>> lens = [len([x for x in sulci if x==y])
    ...         for y in np.unique(sulci) if y != -1]
    >>> lens[0:10]  # [6358, 3288, 7612, 5205, 4414, 6251, 3493, 2566, 4436, 739] # (if not limit_folds)
    [369, 93]

    View result without background (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
    >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
    >>> output = 'extract_sulci_fold7_2sulci_no_background.vtk'
    >>> rewrite_scalars(sulci_file, output, sulci,
    ...                 'sulci', sulci) # doctest: +SKIP
    >>> plot_surfaces(output) # doctest: +SKIP

    """
    import os
    from time import time
    import numpy as np

    from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.guts.mesh import find_neighbors
    from mindboggle.guts.segment import extract_borders, propagate, segment_regions
    from mindboggle.mio.labels import DKTprotocol

    # Load fold numbers if folds_or_file is a string:
    if isinstance(folds_or_file, str):
        folds, name = read_scalars(folds_or_file)
    elif isinstance(folds_or_file, list):
        folds = folds_or_file
    elif isinstance(folds_or_file, np.ndarray):
        folds = folds_or_file.tolist()

    dkt = DKTprotocol()

    if hemi == 'lh':
        pair_lists = dkt.left_sulcus_label_pair_lists
    elif hemi == 'rh':
        pair_lists = dkt.right_sulcus_label_pair_lists
    else:
        raise IOError(
            "Warning: hemisphere not properly specified ('lh' or 'rh').")

    # Load points, faces, and neighbors:
    points, indices, lines, faces, labels, scalar_names, npoints, \
            input_vtk = read_vtk(labels_file)
    neighbor_lists = find_neighbors(faces, npoints)

    # Array of sulcus IDs for fold vertices, initialized as -1.
    # Since we do not touch gyral vertices and vertices whose labels
    # are not in the label list, or vertices having only one label,
    # their sulcus IDs will remain -1:
    sulci = background_value * np.ones(npoints)

    # ------------------------------------------------------------------------
    # Loop through folds
    # ------------------------------------------------------------------------
    fold_numbers = [int(x) for x in np.unique(folds) if x != background_value]
    n_folds = len(fold_numbers)
    if verbose:
        print("Extract sulci from {0} folds...".format(n_folds))
    t0 = time()
    for n_fold in fold_numbers:
        fold_indices = [i for i, x in enumerate(folds) if x == n_fold]
        len_fold = len(fold_indices)

        # List the labels in this fold:
        fold_labels = [labels[x] for x in fold_indices]
        unique_fold_labels = [
            int(x) for x in np.unique(fold_labels) if x != background_value
        ]

        # --------------------------------------------------------------------
        # NO MATCH -- fold has fewer than two labels
        # --------------------------------------------------------------------
        if verbose and len(unique_fold_labels) < 2:
            # Ignore: sulci already initialized with -1 values:
            if not unique_fold_labels:
                print("  Fold {0} ({1} vertices): "
                      "NO MATCH -- fold has no labels".format(
                          n_fold, len_fold))
            else:
                print("  Fold {0} ({1} vertices): "
                      "NO MATCH -- fold has only one label ({2})".format(
                          n_fold, len_fold, unique_fold_labels[0]))
            # Ignore: sulci already initialized with -1 values

        else:
            # Find all label boundary pairs within the fold:
            indices_fold_pairs, fold_pairs, unique_fold_pairs = \
                extract_borders(fold_indices, labels, neighbor_lists,
                                ignore_values=[], return_label_pairs=True)

            # Find fold label pairs in the protocol (pairs are already sorted):
            fold_pairs_in_protocol = [
                x for x in unique_fold_pairs
                if x in dkt.unique_sulcus_label_pairs
            ]

            if verbose and unique_fold_labels:
                print("  Fold {0} labels: {1} ({2} vertices)".format(
                    n_fold, ', '.join([str(x) for x in unique_fold_labels]),
                    len_fold))
            # ----------------------------------------------------------------
            # NO MATCH -- fold has no sulcus label pair
            # ----------------------------------------------------------------
            if verbose and not fold_pairs_in_protocol:
                print("  Fold {0}: NO MATCH -- fold has no sulcus label pair".
                      format(n_fold, len_fold))

            # ----------------------------------------------------------------
            # Possible matches
            # ----------------------------------------------------------------
            else:
                if verbose:
                    print("  Fold {0} label pairs in protocol: {1}".format(
                        n_fold,
                        ', '.join([str(x) for x in fold_pairs_in_protocol])))

                # Labels in the protocol (includes repeats across label pairs):
                labels_in_pairs = [
                    x for lst in fold_pairs_in_protocol for x in lst
                ]

                # Labels that appear in one or more sulcus label boundary:
                unique_labels = []
                nonunique_labels = []
                for label in np.unique(labels_in_pairs):
                    if len([x for x in labels_in_pairs if x == label]) == 1:
                        unique_labels.append(label)
                    else:
                        nonunique_labels.append(label)

                # ------------------------------------------------------------
                # Vertices whose labels are in only one sulcus label pair
                # ------------------------------------------------------------
                # Find vertices with a label that is in only one of the fold's
                # label pairs (the other label in the pair can exist in other
                # pairs). Assign the vertices the sulcus with the label pair
                # if they are connected to the label boundary for that pair.
                # ------------------------------------------------------------
                if unique_labels:

                    for pair in fold_pairs_in_protocol:

                        # If one or both labels in label pair is/are unique:
                        unique_labels_in_pair = [
                            x for x in pair if x in unique_labels
                        ]
                        n_unique = len(unique_labels_in_pair)
                        if n_unique:

                            ID = None
                            for i, pair_list in enumerate(pair_lists):
                                if not isinstance(pair_list, list):
                                    pair_list = [pair_list]
                                if pair in pair_list:
                                    ID = i
                                    break
                            if ID:
                                # Seeds from label boundary vertices
                                # (fold_pairs and pair already sorted):
                                indices_pair = [
                                    x for i, x in enumerate(indices_fold_pairs)
                                    if fold_pairs[i] == pair
                                ]

                                # Vertices with unique label(s) in pair:
                                indices_unique_labels = [
                                    fold_indices[i]
                                    for i, x in enumerate(fold_labels)
                                    if x in unique_labels_in_pair
                                ]
                                #dkt.unique_sulcus_label_pairs]

                                # Propagate sulcus ID from seeds to vertices
                                # with "unique" labels (only exist in one
                                # label pair in a fold); propagation ensures
                                # that sulci consist of contiguous vertices
                                # for each label boundary:
                                sulci2 = segment_regions(
                                    indices_unique_labels,
                                    neighbor_lists,
                                    min_region_size=1,
                                    seed_lists=[indices_pair],
                                    keep_seeding=False,
                                    spread_within_labels=True,
                                    labels=labels,
                                    label_lists=[],
                                    values=[],
                                    max_steps='',
                                    background_value=background_value,
                                    verbose=False)

                                sulci[sulci2 != background_value] = ID

                                # Print statement:
                                if verbose:
                                    if n_unique == 1:
                                        ps1 = 'One label'
                                    else:
                                        ps1 = 'Both labels'
                                    if len(sulcus_names):
                                        ps2 = sulcus_names[ID]
                                    else:
                                        ps2 = ''
                                    print("    {0} unique to one fold pair: "
                                          "{1} {2}".format(
                                              ps1, ps2, unique_labels_in_pair))

                # ------------------------------------------------------------
                # Vertex labels shared by multiple label pairs
                # ------------------------------------------------------------
                # Propagate labels from label borders to vertices with labels
                # that are shared by multiple label pairs in the fold.
                # ------------------------------------------------------------
                if len(nonunique_labels):
                    # For each label shared by different label pairs:
                    for label in nonunique_labels:
                        # Print statement:
                        if verbose:
                            print(
                                "    Propagate sulcus borders with label {0}".
                                format(int(label)))

                        # Construct seeds from label boundary vertices:
                        seeds = background_value * np.ones(npoints)

                        for ID, pair_list in enumerate(pair_lists):
                            if not isinstance(pair_list, list):
                                pair_list = [pair_list]
                            label_pairs = [x for x in pair_list if label in x]
                            for label_pair in label_pairs:
                                indices_pair = [
                                    x for i, x in enumerate(indices_fold_pairs)
                                    if np.sort(fold_pairs[i]).tolist() ==
                                    label_pair
                                ]
                                if indices_pair:

                                    # Do not include short boundary segments:
                                    if min_boundary > 1:
                                        indices_pair2 = []
                                        seeds2 = segment_regions(
                                            indices_pair, neighbor_lists, 1,
                                            [], False, False, [], [], [], '',
                                            background_value, verbose)

                                        useeds2 = [
                                            x for x in np.unique(seeds2)
                                            if x != background_value
                                        ]
                                        for seed2 in useeds2:
                                            iseed2 = [
                                                i for i, x in enumerate(seeds2)
                                                if x == seed2
                                            ]
                                            if len(iseed2) >= min_boundary:
                                                indices_pair2.extend(iseed2)
                                            elif verbose:
                                                if len(iseed2) == 1:
                                                    print("    Remove "
                                                          "assignment "
                                                          "of ID {0} from "
                                                          "1 vertex".format(
                                                              seed2))
                                                else:
                                                    print(
                                                        "    Remove "
                                                        "assignment "
                                                        "of ID {0} from "
                                                        "{1} vertices".format(
                                                            seed2,
                                                            len(iseed2)))
                                        indices_pair = indices_pair2

                                    # Assign sulcus IDs to seeds:
                                    seeds[indices_pair] = ID

                        # Identify vertices with the label:
                        indices_label = [
                            fold_indices[i] for i, x in enumerate(fold_labels)
                            if x == label
                        ]
                        if len(indices_label):

                            # Propagate sulcus ID from seeds to vertices
                            # with a given shared label:
                            seg_vs_prop = False
                            if seg_vs_prop:
                                indices_seeds = []
                                for seed in [
                                        x for x in np.unique(seeds)
                                        if x != background_value
                                ]:
                                    indices_seeds.append([
                                        i for i, x in enumerate(seeds)
                                        if x == seed
                                    ])

                                sulci2 = segment_regions(
                                    indices_label, neighbor_lists, 50,
                                    indices_seeds, False, True, labels, [], [],
                                    '', background_value, verbose)
                            else:
                                label_array = background_value * \
                                              np.ones(npoints)
                                label_array[indices_label] = 1
                                sulci2 = propagate(
                                    points,
                                    faces,
                                    label_array,
                                    seeds,
                                    sulci,
                                    max_iters=10000,
                                    tol=0.001,
                                    sigma=5,
                                    background_value=background_value,
                                    verbose=verbose)
                            sulci[sulci2 != background_value] = \
                                sulci2[sulci2 != background_value]

    sulcus_numbers = [
        int(x) for x in np.unique(sulci) if x != background_value
    ]
    n_sulci = len(sulcus_numbers)

    # ------------------------------------------------------------------------
    # Print statements
    # ------------------------------------------------------------------------
    if verbose:
        if n_sulci == 1:
            sulcus_str = 'sulcus'
        else:
            sulcus_str = 'sulci'
        if n_folds == 1:
            folds_str = 'fold'
        else:
            folds_str = 'folds'
        print("Extracted {0} {1} from {2} {3} ({4:.1f}s):".format(
            n_sulci, sulcus_str, n_folds, folds_str,
            time() - t0))
        if sulcus_names:
            for sulcus_number in sulcus_numbers:
                print("  {0}: {1}".format(sulcus_number,
                                          sulcus_names[sulcus_number]))
        elif sulcus_numbers:
            print("  " + ", ".join([str(x) for x in sulcus_numbers]))

        unresolved = [
            i for i in range(len(pair_lists)) if i not in sulcus_numbers
        ]
        if len(unresolved) == 1:
            print("The following sulcus is unaccounted for:")
        else:
            print("The following {0} sulci are unaccounted for:".format(
                len(unresolved)))
        if sulcus_names:
            for sulcus_number in unresolved:
                print("  {0}: {1}".format(sulcus_number,
                                          sulcus_names[sulcus_number]))
        else:
            print("  " + ", ".join([str(x) for x in unresolved]))

    # ------------------------------------------------------------------------
    # Return sulci, number of sulci, and file name
    # ------------------------------------------------------------------------
    sulci = [int(x) for x in sulci]

    sulci_file = os.path.join(os.getcwd(), 'sulci.vtk')
    rewrite_scalars(labels_file, sulci_file, sulci, 'sulci', [],
                    background_value)

    if not os.path.exists(sulci_file):
        raise IOError(sulci_file + " not found")

    return sulci, n_sulci, sulci_file
예제 #29
0
def extract_subfolds(depth_file,
                     folds,
                     min_size=10,
                     depth_factor=0.25,
                     depth_ratio=0.1,
                     tolerance=0.01,
                     save_file=False):
    """
    Use depth to segment folds into subfolds in a triangular surface mesh.

    Note ::

        The function extract_sulci() performs about the same whether folds
        or subfolds are used as input.  The latter leads to some loss of
        small subfolds and possibly holes for small subfolds in the middle
        of other subfolds.

    Note about the watershed() function:
    The watershed() function performs individual seed growing from deep seeds,
    repeats segmentation from the resulting seeds until each seed's segment
    touches a boundary. The function segment() fills in the rest. Finally
    segments are joined if their seeds are too close to each other.
    Despite these precautions, the order of seed selection in segment() could
    possibly influence the resulting borders between adjoining segments.
    [The propagate() function is slower and insensitive to depth,
     but is not biased by seed order.]

    Parameters
    ----------
    depth_file : string
        surface mesh file in VTK format with faces and depth scalar values
    folds : list of integers
        fold numbers for all vertices (-1 for non-fold vertices)
    min_size : integer
        minimum number of vertices for a subfold
    depth_factor : float
        watershed() depth_factor:
        factor to determine whether to merge two neighboring watershed catchment
        basins -- they are merged if the Euclidean distance between their basin
        seeds is less than this fraction of the maximum Euclidean distance
        between points having minimum and maximum depths
    depth_ratio : float
        watershed() depth_ratio:
        the minimum fraction of depth for a neighboring shallower
        watershed catchment basin (otherwise merged with the deeper basin)
    tolerance : float
        watershed() tolerance:
        tolerance for detecting differences in depth between vertices
    save_file : Boolean
        save output VTK file?

    Returns
    -------
    subfolds : list of integers
        fold numbers for all vertices (-1 for non-fold vertices)
    n_subfolds :  int
        number of subfolds
    subfolds_file : string (if save_file)
        name of output VTK file with fold IDs (-1 for non-fold vertices)

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars
    >>> from mindboggle.guts.mesh import find_neighbors_from_file
    >>> from mindboggle.features.folds import extract_subfolds
    >>> from mindboggle.mio.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file)
    >>> min_size = 10
    >>> depth_factor = 0.5
    >>> depth_ratio = 0.1
    >>> tolerance = 0.01
    >>> #
    >>> subfolds, n_subfolds, subfolds_file = extract_subfolds(depth_file,
    >>>     folds, min_size, depth_factor, depth_ratio, tolerance, True)
    >>> #
    >>> # View:
    >>> rewrite_scalars(depth_file, 'subfolds.vtk', subfolds, 'subfolds', subfolds)
    >>> plot_surfaces('subfolds.vtk')

    """
    import os
    import numpy as np
    from time import time
    from mindboggle.mio.vtks import rewrite_scalars, read_vtk
    from mindboggle.guts.mesh import find_neighbors
    from mindboggle.guts.segment import segment, propagate, watershed

    print("Segment folds into subfolds")
    t0 = time()

    #-------------------------------------------------------------------------
    # Load depth values for all vertices
    #-------------------------------------------------------------------------
    points, indices, lines, faces, depths, scalar_names, npoints, \
        input_vtk = read_vtk(depth_file, return_first=True, return_array=True)

    #-------------------------------------------------------------------------
    # Find neighbors for each vertex
    #-------------------------------------------------------------------------
    neighbor_lists = find_neighbors(faces, npoints)

    #-------------------------------------------------------------------------
    # Segment folds into "watershed basins"
    #-------------------------------------------------------------------------
    indices_folds = [i for i, x in enumerate(folds) if x != -1]
    subfolds, seed_indices = watershed(depths,
                                       points,
                                       indices_folds,
                                       neighbor_lists,
                                       min_size,
                                       depth_factor=0.25,
                                       depth_ratio=0.1,
                                       tolerance=0.01,
                                       regrow=True)

    # Print statement
    n_subfolds = len([x for x in np.unique(subfolds) if x != -1])
    print('  ...Extracted {0} subfolds ({1:.2f} seconds)'.format(
        n_subfolds,
        time() - t0))

    #-------------------------------------------------------------------------
    # Return subfolds, number of subfolds, file name
    #-------------------------------------------------------------------------
    if save_file:
        subfolds_file = os.path.join(os.getcwd(), 'subfolds.vtk')
        rewrite_scalars(depth_file, subfolds_file, subfolds, 'subfolds',
                        subfolds)

        if not os.path.exists(subfolds_file):
            raise (IOError(subfolds_file + " not found"))

    else:
        subfolds_file = None

    return subfolds, n_subfolds, subfolds_file
예제 #30
0
def write_face_vertex_averages(input_file, output_table='', area_file=''):
    """
    Make table of average vertex values per face
    (divided by face area if area_file provided).

    Parameters
    ----------
    input_file : string
        name of VTK file with scalars to average
    area_file :  string
        name of VTK file with surface area scalar values
    output_table :  string
        output table filename

    Returns
    -------
    output_table :  string
        output table filename

    Examples
    --------
    >>> from mindboggle.mio.tables import write_face_vertex_averages
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> input_file = fetch_data(urls['left_travel_depth'])
    >>> area_file = fetch_data(urls['left_area'])
    >>> output_table = ''
    >>> output_table = write_face_vertex_averages(input_file, output_table,
    ...                                           area_file)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.mio.vtks import read_vtk, read_scalars

    points, indices, lines, faces, scalars, scalar_names, \
        npoints, input_vtk = read_vtk(input_file, True, True)
    if area_file:
        area_scalars, name = read_scalars(area_file, True, True)

    #---------------------------------------------------------------------
    # For each face, average vertex values:
    #---------------------------------------------------------------------
    columns = []
    for face in faces:
        values = []
        for index in face:
            if area_file:
                values.append(scalars[index] / area_scalars[index])
            else:
                values.append(scalars[index])
        columns.append(np.mean(values))

    #-----------------------------------------------------------------
    # Write to table:
    #-----------------------------------------------------------------
    if not output_table:
        output_table = os.path.join(os.getcwd(), 'average_face_values.csv')

    df = pd.DataFrame({'': columns})
    df.to_csv(output_table, index=False)

    if not os.path.exists(output_table):
        raise IOError(output_table + " not found")

    return output_table
예제 #31
0
def write_average_face_values_per_label(input_indices_vtk,
                    input_values_vtk='', area_file='',
                    output_stem='', exclude_values=[-1], background_value=-1):
    """
    Write out a separate VTK file for each integer
    in (the first) scalar list of an input VTK file.
    Optionally write the values drawn from a second VTK file.

    Parameters
    ----------
    input_indices_vtk : string
        path of the input VTK file that contains indices as scalars
    input_values_vtk : string
        path of the input VTK file that contains values as scalars
    output_stem : string
        path and stem of the output VTK file
    exclude_values : list or array
        values to exclude
    background_value : integer or float
        background value in output VTK files
    scalar_name : string
        name of a lookup table of scalars values

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.tables import write_average_face_values_per_label
    >>> path = '/homedir/mindboggled'
    >>> input_indices_vtk = os.path.join(path, 'Twins-2-1', 'labels', 'left_cortical_surface', 'freesurfer_cortex_labels.vtk')
    >>> input_values_vtk = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'freesurfer_thickness.vtk')
    >>> area_file = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'area.vtk')
    >>> output_stem = 'labels_thickness'
    >>> exclude_values = [-1]
    >>> background_value = -1
    >>> #
    >>> write_average_face_values_per_label(input_indices_vtk,
    >>>     input_values_vtk, area_file, output_stem, exclude_values, background_value)
    >>> #
    >>> # View:
    >>> #example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk')
    >>> #from mindboggle.mio.plots import plot_surfaces
    >>> #plot_surfaces(example_vtk)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.mio.vtks import read_scalars, read_vtk, write_vtk
    from mindboggle.guts.mesh import remove_faces

    # Load VTK file:
    points, indices, lines, faces, scalars, scalar_names, npoints, \
        input_vtk = read_vtk(input_indices_vtk, True, True)
    if area_file:
        area_scalars, name = read_scalars(area_file, True, True)
    print("Explode the scalar list in {0}".
          format(os.path.basename(input_indices_vtk)))
    if input_values_vtk != input_indices_vtk:
        values, name = read_scalars(input_values_vtk, True, True)
        print("Explode the scalar list of values in {0} "
              "with the scalar list of indices in {1}".
              format(os.path.basename(input_values_vtk),
                     os.path.basename(input_indices_vtk)))
    else:
        values = np.copy(scalars)

    # Loop through unique (non-excluded) scalar values:
    unique_scalars = [int(x) for x in np.unique(scalars)
                      if x not in exclude_values]
    for scalar in unique_scalars:

        keep_indices = [x for sublst in faces for x in sublst]
        new_faces = remove_faces(faces, keep_indices)

        # Create array and indices for scalar value:
        select_scalars = np.copy(scalars)
        select_scalars[scalars != scalar] = background_value
        scalar_indices = [i for i,x in enumerate(select_scalars) if x==scalar]
        print("  Scalar {0}: {1} vertices".format(scalar, len(scalar_indices)))

        #---------------------------------------------------------------------
        # For each face, average vertex values:
        #---------------------------------------------------------------------
        output_table = os.path.join(os.getcwd(),
                                    output_stem+str(scalar)+'.csv')
        columns = []
        for face in new_faces:
            values = []
            for index in face:
                if area_file:
                    values.append(scalars[index] / area_scalars[index])
                else:
                    values.append(scalars[index])
            columns.append(np.mean(values))

        #-----------------------------------------------------------------
        # Write to table:
        #-----------------------------------------------------------------
        df = pd.DataFrame({'': columns})
        df.to_csv(output_table, index=False)

        # Write VTK file with scalar value:
        #output_vtk = os.path.join(os.getcwd(), output_stem + str(scalar) + '.vtk')
        #write_vtk(output_vtk, points, indices, lines, new_faces,
        #          [select_values.tolist()], [output_scalar_name])

        if not os.path.exists(output_table):
            raise(IOError(output_table + " not found"))
예제 #32
0
def write_average_face_values_per_label(input_indices_vtk,
        input_values_vtk='', area_file='', output_stem='',
        exclude_values=[-1], background_value=-1, verbose=False):
    """
    Write out a separate VTK file for each integer
    in (the first) scalar list of an input VTK file.
    Optionally write the values drawn from a second VTK file.

    Parameters
    ----------
    input_indices_vtk : string
        path of the input VTK file that contains indices as scalars
    input_values_vtk : string
        path of the input VTK file that contains values as scalars
    output_stem : string
        path and stem of the output VTK file
    exclude_values : list or array
        values to exclude
    background_value : integer or float
        background value in output VTK files
    scalar_name : string
        name of a lookup table of scalars values
    verbose : bool
        print statements?

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.tables import write_average_face_values_per_label
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> input_indices_vtk = fetch_data(urls['left_freesurfer_labels'])
    >>> input_values_vtk = fetch_data(urls['left_mean_curvature'])
    >>> area_file = fetch_data(urls['left_area'])
    >>> output_stem = 'labels_thickness'
    >>> exclude_values = [-1]
    >>> background_value = -1
    >>> verbose = False
    >>> write_average_face_values_per_label(input_indices_vtk,
    ...     input_values_vtk, area_file, output_stem, exclude_values,
    ...     background_value, verbose)

    View vtk file (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces
    >>> example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk')
    >>> plot_surfaces(example_vtk) # doctest: +SKIP

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.mio.vtks import read_scalars, read_vtk, write_vtk
    from mindboggle.guts.mesh import keep_faces

    # Load VTK file:
    points, indices, lines, faces, scalars, scalar_names, npoints, \
        input_vtk = read_vtk(input_indices_vtk, True, True)
    if area_file:
        area_scalars, name = read_scalars(area_file, True, True)
    if verbose:
        print("Explode the scalar list in {0}".
            format(os.path.basename(input_indices_vtk)))
    if input_values_vtk != input_indices_vtk:
        if verbose:
            print("Explode the scalar list of values in {0} "
                  "with the scalar list of indices in {1}".
                format(os.path.basename(input_values_vtk),
                       os.path.basename(input_indices_vtk)))

    # Loop through unique (non-excluded) scalar values:
    unique_scalars = [int(x) for x in np.unique(scalars)
                      if x not in exclude_values]
    for scalar in unique_scalars:

        keep_indices = [x for sublst in faces for x in sublst]
        new_faces = keep_faces(faces, keep_indices)

        # Create array and indices for scalar value:
        select_scalars = np.copy(scalars)
        select_scalars[scalars != scalar] = background_value
        scalar_indices = [i for i,x in enumerate(select_scalars) if x==scalar]
        if verbose:
            print("  Scalar {0}: {1} vertices".format(scalar,
                                                      len(scalar_indices)))

        #---------------------------------------------------------------------
        # For each face, average vertex values:
        #---------------------------------------------------------------------
        output_table = os.path.join(os.getcwd(),
                                    output_stem+str(scalar)+'.csv')
        columns = []
        for face in new_faces:
            values = []
            for index in face:
                if area_file:
                    values.append(scalars[index] / area_scalars[index])
                else:
                    values.append(scalars[index])
            columns.append(np.mean(values))

        #-----------------------------------------------------------------
        # Write to table:
        #-----------------------------------------------------------------
        df = pd.DataFrame({'': columns})
        df.to_csv(output_table, index=False)
        if not os.path.exists(output_table):
            raise IOError(output_table + " not found")
예제 #33
0
def spectrum_from_file(vtk_file,
                       spectrum_size=10,
                       exclude_labels=[-1],
                       normalization=None,
                       area_file=''):
    """
    Compute Laplace-Beltrami spectrum of a 3D shape in a VTK file.

    Parameters
    ----------
    vtk_file : string
        the input vtk file
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    area_file :  string
        name of VTK file with surface area scalar values

    Returns
    -------
    spectrum : list of floats
        first spectrum_size of Laplace-Beltrami spectrum

    Examples
    --------
    >>> # Spectrum for entire left hemisphere of Twins-2-1:
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> spectrum_from_file(vtk_file, spectrum_size=6)
    [4.829758648026223e-18,
     0.00012841730024671977,
     0.0002715181572272744,
     0.00032051508471594173,
     0.000470162807048644,
     0.0005768904023010327]
    >>> # Spectrum for Twins-2-1 left postcentral pial surface (22)
    >>> # (after running explode_scalars() with reindex=True):
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'label22.vtk')
    >>> spectrum_from_file(vtk_file, spectrum_size=6)
    [6.3469513010430304e-18,
     0.0005178862383467463,
     0.0017434911095630772,
     0.003667561767487686,
     0.005429017880363784,
     0.006309346984678924]
    >>> # Loop thru all MB 101 brains
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> for hemidir in os.listdir(header):
    >>>     print hemidir
    >>>     sulci_file = os.path.join(header, hemidir, "sulci.vtk")
    >>>     spectrum = spectrum_from_file(sulci_file)

    """
    from mindboggle.mio.vtks import read_vtk, read_scalars
    from mindboggle.shapes.laplace_beltrami import spectrum_of_largest

    faces, u1, u2, points, u4, u5, u6, u7 = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    spectrum = spectrum_of_largest(points, faces, spectrum_size,
                                   exclude_labels, normalization, areas)

    return spectrum
예제 #34
0
def zernike_moments_per_label(vtk_file, order=10, exclude_labels=[-1],
                              scale_input=True, decimate_fraction=0,
                              decimate_smooth=25, verbose=False):
    """
    Compute the Zernike moments per labeled region in a file.

    Optionally decimate the input mesh.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file containing index scalars (labels)
    order : integer
        number of moments to compute
    exclude_labels : list of integers
        labels to be excluded
    scale_input : bool
        translate and scale each object so it is bounded by a unit sphere?
        (this is the expected input to zernike_moments())
    decimate_fraction : float
        fraction of mesh faces to remove for decimation (1 for no decimation)
    decimate_smooth : integer
        number of smoothing steps for decimation
    verbose : bool
        print statements?

    Returns
    -------
    descriptors_lists : list of lists of floats
        Zernike descriptors per label
    label_list : list of integers
        list of unique labels for which moments are computed

    Examples
    --------
    >>> # Zernike moments per label of a FreeSurfer-labeled left cortex.
    >>> # Uncomment "if label==22:" below to run example
    >>> # for left postcentral (22) pial surface:
    >>> import numpy as np
    >>> from mindboggle.shapes.zernike.zernike import zernike_moments_per_label
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> vtk_file = fetch_data(urls['left_freesurfer_labels'])
    >>> order = 3
    >>> exclude_labels = [-1]
    >>> scale_input = True
    >>> verbose = False
    >>> descriptors_lists, label_list = zernike_moments_per_label(vtk_file,
    ...     order, exclude_labels, scale_input, verbose)
    >>> label_list[0:10]
    [999, 1001, 1002, 1003, 1005, 1006, 1007, 1008, 1009, 1010]
    >>> print(np.array_str(np.array(descriptors_lists[0]),
    ...                    precision=5, suppress_small=True))
    [ 0.00587  0.01143  0.0031   0.00881  0.00107  0.00041]
    >>> print(np.array_str(np.array(descriptors_lists[1]),
    ...                    precision=5, suppress_small=True))
    [ 0.00004  0.00009  0.00003  0.00009  0.00002  0.00001]
    >>> print(np.array_str(np.array(descriptors_lists[2]),
    ...                    precision=5, suppress_small=True))
    [ 0.00144  0.00232  0.00128  0.00304  0.00084  0.00051]
    >>> print(np.array_str(np.array(descriptors_lists[3]),
    ...                    precision=5, suppress_small=True))
    [ 0.00393  0.006    0.00371  0.00852  0.00251  0.00153]
    >>> print(np.array_str(np.array(descriptors_lists[4]),
    ...                    precision=5, suppress_small=True))
    [ 0.00043  0.0003   0.00095  0.00051  0.00115  0.00116]

    """
    import numpy as np
    from mindboggle.mio.vtks import read_vtk
    from mindboggle.guts.mesh import keep_faces
    from mindboggle.shapes.zernike.zernike import zernike_moments

    min_points_faces = 4

    #-------------------------------------------------------------------------
    # Read VTK surface mesh file:
    #-------------------------------------------------------------------------
    points, indices, lines, faces, labels, scalar_names, npoints, \
            input_vtk = read_vtk(vtk_file)

    #-------------------------------------------------------------------------
    # Loop through labeled regions:
    #-------------------------------------------------------------------------
    ulabels = [x for x in np.unique(labels) if x not in exclude_labels]
    label_list = []
    descriptors_lists = []
    for label in ulabels:
      #if label == 1022:  # 22:
      #    print("DEBUG: COMPUTE FOR ONLY ONE LABEL")

        #---------------------------------------------------------------------
        # Determine the indices per label:
        #---------------------------------------------------------------------
        Ilabel = [i for i,x in enumerate(labels) if x == label]
        if verbose:
          print('  {0} vertices for label {1}'.format(len(Ilabel), label))

        if len(Ilabel) > min_points_faces:

            #-----------------------------------------------------------------
            # Remove background faces:
            #-----------------------------------------------------------------
            pick_faces = keep_faces(faces, Ilabel)
            if len(pick_faces) > min_points_faces:

                #-------------------------------------------------------------
                # Compute Zernike moments for the label:
                #-------------------------------------------------------------
                descriptors = zernike_moments(points, pick_faces,
                                              order, scale_input,
                                              decimate_fraction,
                                              decimate_smooth, verbose)

                #-------------------------------------------------------------
                # Append to a list of lists of spectra:
                #-------------------------------------------------------------
                descriptors_lists.append(descriptors)
                label_list.append(label)

    return descriptors_lists, label_list
예제 #35
0
def spectrum_from_file(vtk_file, spectrum_size=10, exclude_labels=[-1],
                       normalization=None, area_file=''):
    """
    Compute Laplace-Beltrami spectrum of a 3D shape in a VTK file.

    Parameters
    ----------
    vtk_file : string
        the input vtk file
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    area_file :  string
        name of VTK file with surface area scalar values

    Returns
    -------
    spectrum : list of floats
        first spectrum_size of Laplace-Beltrami spectrum

    Examples
    --------
    >>> # Spectrum for entire left hemisphere of Twins-2-1:
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> spectrum_from_file(vtk_file, spectrum_size=6)
    [4.829758648026223e-18,
     0.00012841730024671977,
     0.0002715181572272744,
     0.00032051508471594173,
     0.000470162807048644,
     0.0005768904023010327]
    >>> # Spectrum for Twins-2-1 left postcentral pial surface (22)
    >>> # (after running explode_scalars() with reindex=True):
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'label22.vtk')
    >>> spectrum_from_file(vtk_file, spectrum_size=6)
    [6.3469513010430304e-18,
     0.0005178862383467463,
     0.0017434911095630772,
     0.003667561767487686,
     0.005429017880363784,
     0.006309346984678924]
    >>> # Loop thru all MB 101 brains
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> for hemidir in os.listdir(header):
    >>>     print hemidir
    >>>     sulci_file = os.path.join(header, hemidir, "sulci.vtk")
    >>>     spectrum = spectrum_from_file(sulci_file)

    """
    from mindboggle.mio.vtks import read_vtk, read_scalars
    from mindboggle.shapes.laplace_beltrami import spectrum_of_largest

    faces, u1, u2, points, u4, u5, u6, u7 = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    spectrum = spectrum_of_largest(points, faces, spectrum_size,
                                   exclude_labels, normalization, areas)

    return spectrum
예제 #36
0
def label_adjacency_matrix(label_file, ignore_values=[-1, 999], add_value=0,
                           save_table=True, output_format='csv',
                           verbose=True):
    """
    Extract surface or volume label boundaries, find unique label pairs,
    and write adjacency matrix (useful for constructing a colormap).

    Each row of the (upper triangular) adjacency matrix corresponds to an
    index to a unique label, where each column has a 1 if the label indexed
    by that column is adjacent to the label indexed by the row.

    Parameters
    ----------
    label_file : string
        path to VTK surface file or nibabel-readable volume file with labels
    ignore_values : list of integers
        labels to ignore
    add_value : integer
        value to add to labels
    matrix : pandas dataframe
        adjacency matrix
    save_table : Boolean
        output table file?
    output_format : string
        format of adjacency table file name (currently only 'csv')
    verbose : Boolean
        print to stdout?

    Returns
    -------
    labels : list
        label numbers
    matrix : pandas DataFrame
        adjacency matrix
    output_table : string
        adjacency table file name

    Examples
    --------
    >>> from mindboggle.mio.colors import label_adjacency_matrix
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> ignore_values = [-1, 0]
    >>> add_value = 0
    >>> save_table = False
    >>> output_format = 'csv'
    >>> verbose = False
    >>> label_file = fetch_data(urls['left_manual_labels'], '', '.vtk')
    >>> labels, matrix, output_table = label_adjacency_matrix(label_file,
    ...     ignore_values, add_value, save_table, output_format, verbose)
    >>> matrix.lookup([20,21,22,23,24,25,26,27,28,29],
    ...               [35,35,35,35,35,35,35,35,35,35])
    array([ 0.,  1.,  0.,  0.,  0.,  0.,  0.,  1.,  1.,  1.])

    >>> label_file = fetch_data(urls['freesurfer_labels'], '', '.nii.gz')
    >>> labels, matrix, output_table = label_adjacency_matrix(label_file,
    ...     ignore_values, add_value, save_table, output_format, verbose)
    >>> matrix.lookup([4,5,7,8,10,11,12,13,14,15], [4,4,4,4,4,4,4,4,4,4])
    array([ 1.,  1.,  0.,  0.,  0.,  1.,  0.,  0.,  1.,  0.])

    """
    import numpy as np
    import pandas as pd
    from nibabel import load
    from scipy import ndimage

    from mindboggle.guts.mesh import find_neighbors
    from mindboggle.guts.segment import extract_borders
    from mindboggle.mio.vtks import read_vtk

    # Use Mindboggle's extract_borders() function for surface VTK files:
    if label_file.endswith('.vtk'):
        f1,f2,f3, faces, labels, f4, npoints, f5 = read_vtk(label_file,
                                                            True, True)
        neighbor_lists = find_neighbors(faces, npoints)
        return_label_pairs = True
        indices_borders, label_pairs, f1 = extract_borders(list(range(npoints)),
            labels, neighbor_lists, ignore_values, return_label_pairs)

        output_table = 'adjacent_surface_labels.' + output_format

    # Use scipy to dilate volume files to find neighboring labels:
    elif label_file.endswith('.nii.gz'):

        L = load(label_file).get_data()
        unique_volume_labels = np.unique(L)

        label_pairs = []
        for label in unique_volume_labels:

            if label not in ignore_values:

                B = L * np.logical_xor(ndimage.binary_dilation(L==int(label)),
                                       (L==int(label)))
                neighbor_labels = np.unique(np.ravel(B))

                for neigh in neighbor_labels:
                    if neigh > 0 and neigh in unique_volume_labels:
                    #        and neigh%2==(int(label)%2):
                        label_pairs.append([int(label), int(neigh)])

        output_table = 'adjacent_volume_labels.' + output_format

    else:
        raise IOError("Use appropriate input file type.")

    # Find unique pairs (or first two of each list):
    pairs = []
    for pair in label_pairs:
        new_pair = [int(pair[0]) + add_value,
                    int(pair[1]) + add_value]
        if new_pair not in pairs:
            pairs.append(new_pair)

    # Write adjacency matrix:
    unique_labels = np.unique(pairs)
    nlabels = np.size(unique_labels)
    matrix = np.zeros((nlabels, nlabels))
    for pair in pairs:
        index1 = [i for i, x in enumerate(unique_labels) if x == pair[0]]
        index2 = [i for i, x in enumerate(unique_labels) if x == pair[1]]
        matrix[index1, index2] = 1

    df1 = pd.DataFrame({'ID': unique_labels}, index=None)
    df2 = pd.DataFrame(matrix, index=None)
    df2.columns = unique_labels
    matrix = pd.concat([df1, df2], axis=1)

    if save_table:
        if output_format == 'csv':
            matrix.to_csv(output_table, index=False)
            if verbose:
                print("Adjacency matrix saved to {0}".format(output_table))
        else:
            raise IOError("Set appropriate output file format.")
    else:
        output_table = None

    labels = list(unique_labels)

    return labels, matrix, output_table
예제 #37
0
파일: fundi.py 프로젝트: tsball/mindboggle
def extract_fundi(folds,
                  curv_file,
                  depth_file,
                  min_separation=10,
                  erode_ratio=0.1,
                  erode_min_size=1,
                  save_file=False,
                  output_file='',
                  background_value=-1,
                  verbose=False):
    """
    Extract fundi from folds.

    A fundus is a branching curve that runs along the deepest and most highly
    curved portions of a fold. This function extracts one fundus from each
    fold by finding the deepest vertices inside the fold, finding endpoints
    along the edge of the fold, and connecting the former to the latter with
    tracks that run along deep and curved paths (through vertices with high
    values of travel depth multiplied by curvature), and a final filtration
    step.

    The deepest vertices are those with values at least two median
    absolute deviations above the median (non-zero) value, with the higher
    value chosen if two of the vertices are within (a default of) 10 edges
    from each other (to reduce the number of possible fundus paths as well
    as computation time).

    To find the endpoints, the find_outer_endpoints function propagates
    multiple tracks from seed vertices at median depth in the fold through
    concentric rings toward the fold’s edge, selecting maximal values within
    each ring, and terminating at candidate endpoints. The final endpoints
    are those candidates at the end of tracks that have a high median value,
    with the higher value chosen if two candidate endpoints are within
    (a default of) 10 edges from each other (otherwise, the resulting fundi
    can have spurious branching at the fold’s edge).

    The connect_points_erosion function connects the deepest fold vertices
    to the endpoints with a skeleton of 1-vertex-thick curves by erosion.
    It erodes by iteratively removing simple topological points and endpoints
    in order of lowest to highest values, where a simple topological point
    is a vertex that when added to or removed from an object on a surface
    mesh (such as a fundus curve) does not alter the object's topology.

    Steps ::
        1. Find fundus endpoints (outer anchors) with find_outer_endpoints().
        2. Include inner anchor points.
        3. Connect anchor points using connect_points_erosion();
           inner anchors are removed if they result in endpoints.

    Note ::
        Follow this with segment_by_region() to segment fundi by sulci.

    Parameters
    ----------
    folds : numpy array or list of integers
        fold number for each vertex
    curv_file :  string
        surface mesh file in VTK format with mean curvature values
    depth_file :  string
        surface mesh file in VTK format with rescaled depth values
    likelihoods : list of integers
        fundus likelihood value for each vertex
    min_separation : integer
        minimum number of edges between inner/outer anchor points
    erode_ratio : float
        fraction of indices to test for removal at each iteration
        in connect_points_erosion()
    save_file : bool
        save output VTK file?
    output_file : string
        output VTK file
    background_value : integer or float
        background value
    verbose : bool
        print statements?

    Returns
    -------
    fundus_per_fold : list of integers
        fundus numbers for all vertices, labeled by fold
        (-1 for non-fundus vertices)
    n_fundi_in_folds :  integer
        number of fundi
    fundus_per_fold_file : string (if save_file)
        output VTK file with fundus numbers (-1 for non-fundus vertices)

    Examples
    --------
    >>> # Extract fundus from one or more folds:
    >>> import numpy as np
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.features.fundi import extract_fundi
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> curv_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')
    >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
    >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
    >>> folds, name = read_scalars(folds_file, True, True)
    >>> # Limit number of folds to speed up the test:
    >>> limit_folds = True
    >>> if limit_folds:
    ...     fold_numbers = [4] #[4, 6]
    ...     i0 = [i for i,x in enumerate(folds) if x not in fold_numbers]
    ...     folds[i0] = -1
    >>> min_separation = 10
    >>> erode_ratio = 0.10
    >>> erode_min_size = 10
    >>> save_file = True
    >>> output_file = 'extract_fundi_fold4.vtk'
    >>> background_value = -1
    >>> verbose = False
    >>> o1, o2, fundus_per_fold_file = extract_fundi(folds, curv_file,
    ...     depth_file, min_separation, erode_ratio, erode_min_size,
    ...     save_file, output_file, background_value, verbose)
    >>> lens = [len([x for x in o1 if x == y])
    ...         for y in np.unique(o1) if y != background_value]
    >>> lens[0:10] # [66, 2914, 100, 363, 73, 331, 59, 30, 1, 14] # (if not limit_folds)
    [73]

    View result without background (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
    >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
    >>> rewrite_scalars(fundus_per_fold_file,
    ...                 'extract_fundi_fold4_no_background.vtk', o1,
    ...                 'fundus_per_fold', folds) # doctest: +SKIP
    >>> plot_surfaces('extract_fundi_fold4_no_background.vtk') # doctest: +SKIP

    """

    # Extract a skeleton to connect endpoints in a fold:
    import os
    import numpy as np
    from time import time

    from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.guts.compute import median_abs_dev
    from mindboggle.guts.paths import find_max_values
    from mindboggle.guts.mesh import find_neighbors_from_file
    #from mindboggle.guts.mesh import find_complete_faces
    from mindboggle.guts.paths import find_outer_endpoints
    from mindboggle.guts.paths import connect_points_erosion

    if isinstance(folds, list):
        folds = np.array(folds)

    # Load values, inner anchor threshold, and neighbors:
    if os.path.isfile(curv_file):
        points, indices, lines, faces, curvs, scalar_names, npoints, \
            input_vtk = read_vtk(curv_file, True, True)
    else:
        raise IOError("{0} doesn't exist!".format(curv_file))
    if os.path.isfile(curv_file):
        depths, name = read_scalars(depth_file, True, True)
    else:
        raise IOError("{0} doesn't exist!".format(depth_file))
    values = curvs * depths
    values0 = [x for x in values if x > 0]
    thr = np.median(values0) + 2 * median_abs_dev(values0)
    neighbor_lists = find_neighbors_from_file(curv_file)

    # ------------------------------------------------------------------------
    # Loop through folds:
    # ------------------------------------------------------------------------
    t1 = time()
    skeletons = []
    unique_fold_IDs = [x for x in np.unique(folds) if x != background_value]

    if verbose:
        if len(unique_fold_IDs) == 1:
            print("Extract a fundus from 1 fold...")
        else:
            print("Extract a fundus from each of {0} folds...".format(
                len(unique_fold_IDs)))

    for fold_ID in unique_fold_IDs:
        indices_fold = [i for i, x in enumerate(folds) if x == fold_ID]
        if indices_fold:
            if verbose:
                print('  Fold {0}:'.format(int(fold_ID)))

            # ----------------------------------------------------------------
            # Find outer anchor points on the boundary of the surface region,
            # to serve as fundus endpoints:
            # ----------------------------------------------------------------
            outer_anchors, tracks = find_outer_endpoints(
                indices_fold, neighbor_lists, values, depths, min_separation,
                background_value, verbose)

            # ----------------------------------------------------------------
            # Find inner anchor points:
            # ----------------------------------------------------------------
            inner_anchors = find_max_values(points, values, min_separation,
                                            thr)

            # ----------------------------------------------------------------
            # Connect anchor points to create skeleton:
            # ----------------------------------------------------------------
            B = background_value * np.ones(npoints)
            B[indices_fold] = 1
            skeleton = connect_points_erosion(B, neighbor_lists, outer_anchors,
                                              inner_anchors, values,
                                              erode_ratio, erode_min_size, [],
                                              '', background_value, verbose)
            if skeleton:
                skeletons.extend(skeleton)

            ## ---------------------------------------------------------------
            ## Remove fundus vertices if they make complete triangle faces:
            ## ---------------------------------------------------------------
            #Iremove = find_complete_faces(skeletons, faces)
            #if Iremove:
            #    skeletons = list(frozenset(skeletons).difference(Iremove))

    indices_skel = [x for x in skeletons if folds[x] != background_value]
    fundus_per_fold = background_value * np.ones(npoints)
    fundus_per_fold[indices_skel] = folds[indices_skel]
    n_fundi_in_folds = len(
        [x for x in np.unique(fundus_per_fold) if x != background_value])
    if n_fundi_in_folds == 1:
        sdum = 'fold fundus'
    else:
        sdum = 'fold fundi'
    if verbose:
        print('  ...Extracted {0} {1}; {2} total ({3:.2f} seconds)'.format(
            n_fundi_in_folds, sdum, n_fundi_in_folds,
            time() - t1))

    # ------------------------------------------------------------------------
    # Return fundi, number of fundi, and file name:
    # ------------------------------------------------------------------------
    fundus_per_fold_file = None
    if n_fundi_in_folds > 0:
        fundus_per_fold = [int(x) for x in fundus_per_fold]
        if save_file:
            if output_file:
                fundus_per_fold_file = output_file
            else:
                fundus_per_fold_file = os.path.join(os.getcwd(),
                                                    'fundus_per_fold.vtk')
            rewrite_scalars(curv_file, fundus_per_fold_file, fundus_per_fold,
                            'fundi', [], background_value)
            if not os.path.exists(fundus_per_fold_file):
                raise IOError(fundus_per_fold_file + " not found")

    return fundus_per_fold, n_fundi_in_folds, fundus_per_fold_file
예제 #38
0
파일: fundi.py 프로젝트: liob/mindboggle
def extract_fundi(folds, curv_file, depth_file, min_separation=10,
                  erode_ratio=0.1, erode_min_size=1, save_file=False,
                  output_file='', background_value=-1, verbose=False):
    """
    Extract fundi from folds.

    A fundus is a branching curve that runs along the deepest and most highly
    curved portions of a fold. This function extracts one fundus from each
    fold by finding the deepest vertices inside the fold, finding endpoints
    along the edge of the fold, and connecting the former to the latter with
    tracks that run along deep and curved paths (through vertices with high
    values of travel depth multiplied by curvature), and a final filtration
    step.

    The deepest vertices are those with values at least two median
    absolute deviations above the median (non-zero) value, with the higher
    value chosen if two of the vertices are within (a default of) 10 edges
    from each other (to reduce the number of possible fundus paths as well
    as computation time).

    To find the endpoints, the find_outer_endpoints function propagates
    multiple tracks from seed vertices at median depth in the fold through
    concentric rings toward the fold’s edge, selecting maximal values within
    each ring, and terminating at candidate endpoints. The final endpoints
    are those candidates at the end of tracks that have a high median value,
    with the higher value chosen if two candidate endpoints are within
    (a default of) 10 edges from each other (otherwise, the resulting fundi
    can have spurious branching at the fold’s edge).

    The connect_points_erosion function connects the deepest fold vertices
    to the endpoints with a skeleton of 1-vertex-thick curves by erosion.
    It erodes by iteratively removing simple topological points and endpoints
    in order of lowest to highest values, where a simple topological point
    is a vertex that when added to or removed from an object on a surface
    mesh (such as a fundus curve) does not alter the object's topology.

    Steps ::
        1. Find fundus endpoints (outer anchors) with find_outer_endpoints().
        2. Include inner anchor points.
        3. Connect anchor points using connect_points_erosion();
           inner anchors are removed if they result in endpoints.

    Note ::
        Follow this with segment_by_region() to segment fundi by sulci.

    Parameters
    ----------
    folds : numpy array or list of integers
        fold number for each vertex
    curv_file :  string
        surface mesh file in VTK format with mean curvature values
    depth_file :  string
        surface mesh file in VTK format with rescaled depth values
    likelihoods : list of integers
        fundus likelihood value for each vertex
    min_separation : integer
        minimum number of edges between inner/outer anchor points
    erode_ratio : float
        fraction of indices to test for removal at each iteration
        in connect_points_erosion()
    save_file : bool
        save output VTK file?
    output_file : string
        output VTK file
    background_value : integer or float
        background value
    verbose : bool
        print statements?

    Returns
    -------
    fundus_per_fold : list of integers
        fundus numbers for all vertices, labeled by fold
        (-1 for non-fundus vertices)
    n_fundi_in_folds :  integer
        number of fundi
    fundus_per_fold_file : string (if save_file)
        output VTK file with fundus numbers (-1 for non-fundus vertices)

    Examples
    --------
    >>> # Extract fundus from one or more folds:
    >>> import numpy as np
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.features.fundi import extract_fundi
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> curv_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')
    >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
    >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
    >>> folds, name = read_scalars(folds_file, True, True)
    >>> # Limit number of folds to speed up the test:
    >>> limit_folds = True
    >>> if limit_folds:
    ...     fold_numbers = [4] #[4, 6]
    ...     i0 = [i for i,x in enumerate(folds) if x not in fold_numbers]
    ...     folds[i0] = -1
    >>> min_separation = 10
    >>> erode_ratio = 0.10
    >>> erode_min_size = 10
    >>> save_file = True
    >>> output_file = 'extract_fundi_fold4.vtk'
    >>> background_value = -1
    >>> verbose = False
    >>> o1, o2, fundus_per_fold_file = extract_fundi(folds, curv_file,
    ...     depth_file, min_separation, erode_ratio, erode_min_size,
    ...     save_file, output_file, background_value, verbose)
    >>> lens = [len([x for x in o1 if x == y])
    ...         for y in np.unique(o1) if y != background_value]
    >>> lens[0:10] # [66, 2914, 100, 363, 73, 331, 59, 30, 1, 14] # (if not limit_folds)
    [73]

    View result without background (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
    >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
    >>> rewrite_scalars(fundus_per_fold_file,
    ...                 'extract_fundi_fold4_no_background.vtk', o1,
    ...                 'fundus_per_fold', folds) # doctest: +SKIP
    >>> plot_surfaces('extract_fundi_fold4_no_background.vtk') # doctest: +SKIP

    """

    # Extract a skeleton to connect endpoints in a fold:
    import os
    import numpy as np
    from time import time

    from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.guts.compute import median_abs_dev
    from mindboggle.guts.paths import find_max_values
    from mindboggle.guts.mesh import find_neighbors_from_file
    from mindboggle.guts.mesh import find_complete_faces
    from mindboggle.guts.paths import find_outer_endpoints
    from mindboggle.guts.paths import connect_points_erosion

    if isinstance(folds, list):
        folds = np.array(folds)

    # Load values, inner anchor threshold, and neighbors:
    if os.path.isfile(curv_file):
        points, indices, lines, faces, curvs, scalar_names, npoints, \
            input_vtk = read_vtk(curv_file, True, True)
    else:
        raise IOError("{0} doesn't exist!".format(curv_file))
    if os.path.isfile(curv_file):
        depths, name = read_scalars(depth_file, True, True)
    else:
        raise IOError("{0} doesn't exist!".format(depth_file))
    values = curvs * depths
    values0 = [x for x in values if x > 0]
    thr = np.median(values0) + 2 * median_abs_dev(values0)
    neighbor_lists = find_neighbors_from_file(curv_file)

    # ------------------------------------------------------------------------
    # Loop through folds:
    # ------------------------------------------------------------------------
    t1 = time()
    skeletons = []
    unique_fold_IDs = [x for x in np.unique(folds) if x != background_value]

    if verbose:
        if len(unique_fold_IDs) == 1:
            print("Extract a fundus from 1 fold...")
        else:
            print("Extract a fundus from each of {0} folds...".
                  format(len(unique_fold_IDs)))

    for fold_ID in unique_fold_IDs:
        indices_fold = [i for i,x in enumerate(folds) if x == fold_ID]
        if indices_fold:
            if verbose:
                print('  Fold {0}:'.format(int(fold_ID)))

            # ----------------------------------------------------------------
            # Find outer anchor points on the boundary of the surface region,
            # to serve as fundus endpoints:
            # ----------------------------------------------------------------
            outer_anchors, tracks = find_outer_endpoints(indices_fold,
                neighbor_lists, values, depths, min_separation,
                background_value, verbose)

            # ----------------------------------------------------------------
            # Find inner anchor points:
            # ----------------------------------------------------------------
            inner_anchors = find_max_values(points, values, min_separation,
                                            thr)

            # ----------------------------------------------------------------
            # Connect anchor points to create skeleton:
            # ----------------------------------------------------------------
            B = background_value * np.ones(npoints)
            B[indices_fold] = 1
            skeleton = connect_points_erosion(B, neighbor_lists,
                outer_anchors, inner_anchors, values, erode_ratio,
                erode_min_size, [], '', background_value, verbose)
            if skeleton:
                skeletons.extend(skeleton)

            ## ---------------------------------------------------------------
            ## Remove fundus vertices if they make complete triangle faces:
            ## ---------------------------------------------------------------
            #Iremove = find_complete_faces(skeletons, faces)
            #if Iremove:
            #    skeletons = list(frozenset(skeletons).difference(Iremove))

    indices_skel = [x for x in skeletons if folds[x] != background_value]
    fundus_per_fold = background_value * np.ones(npoints)
    fundus_per_fold[indices_skel] = folds[indices_skel]
    n_fundi_in_folds = len([x for x in np.unique(fundus_per_fold)
                             if x != background_value])
    if n_fundi_in_folds == 1:
        sdum = 'fold fundus'
    else:
        sdum = 'fold fundi'
    if verbose:
        print('  ...Extracted {0} {1}; {2} total ({3:.2f} seconds)'.
              format(n_fundi_in_folds, sdum, n_fundi_in_folds, time() - t1))

    # ------------------------------------------------------------------------
    # Return fundi, number of fundi, and file name:
    # ------------------------------------------------------------------------
    fundus_per_fold_file = None
    if n_fundi_in_folds > 0:
        fundus_per_fold = [int(x) for x in fundus_per_fold]
        if save_file:
            if output_file:
                fundus_per_fold_file = output_file
            else:
                fundus_per_fold_file = os.path.join(os.getcwd(),
                                                    'fundus_per_fold.vtk')
            rewrite_scalars(curv_file, fundus_per_fold_file, fundus_per_fold,
                            'fundi', [], background_value)
            if not os.path.exists(fundus_per_fold_file):
                raise IOError(fundus_per_fold_file + " not found")

    return fundus_per_fold,  n_fundi_in_folds, fundus_per_fold_file
예제 #39
0
def extract_subfolds(depth_file, folds, min_size=10, depth_factor=0.25,
                     depth_ratio=0.1, tolerance=0.01, save_file=False):
    """
    Use depth to segment folds into subfolds in a triangular surface mesh.

    Note ::

        The function extract_sulci() performs about the same whether folds
        or subfolds are used as input.  The latter leads to some loss of
        small subfolds and possibly holes for small subfolds in the middle
        of other subfolds.

    Note about the watershed() function:
    The watershed() function performs individual seed growing from deep seeds,
    repeats segmentation from the resulting seeds until each seed's segment
    touches a boundary. The function segment() fills in the rest. Finally
    segments are joined if their seeds are too close to each other.
    Despite these precautions, the order of seed selection in segment() could
    possibly influence the resulting borders between adjoining segments.
    [The propagate() function is slower and insensitive to depth,
     but is not biased by seed order.]

    Parameters
    ----------
    depth_file : string
        surface mesh file in VTK format with faces and depth scalar values
    folds : list of integers
        fold numbers for all vertices (-1 for non-fold vertices)
    min_size : integer
        minimum number of vertices for a subfold
    depth_factor : float
        watershed() depth_factor:
        factor to determine whether to merge two neighboring watershed catchment
        basins -- they are merged if the Euclidean distance between their basin
        seeds is less than this fraction of the maximum Euclidean distance
        between points having minimum and maximum depths
    depth_ratio : float
        watershed() depth_ratio:
        the minimum fraction of depth for a neighboring shallower
        watershed catchment basin (otherwise merged with the deeper basin)
    tolerance : float
        watershed() tolerance:
        tolerance for detecting differences in depth between vertices
    save_file : Boolean
        save output VTK file?

    Returns
    -------
    subfolds : list of integers
        fold numbers for all vertices (-1 for non-fold vertices)
    n_subfolds :  int
        number of subfolds
    subfolds_file : string (if save_file)
        name of output VTK file with fold IDs (-1 for non-fold vertices)

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars
    >>> from mindboggle.guts.mesh import find_neighbors_from_file
    >>> from mindboggle.features.folds import extract_subfolds
    >>> from mindboggle.mio.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file)
    >>> min_size = 10
    >>> depth_factor = 0.5
    >>> depth_ratio = 0.1
    >>> tolerance = 0.01
    >>> #
    >>> subfolds, n_subfolds, subfolds_file = extract_subfolds(depth_file,
    >>>     folds, min_size, depth_factor, depth_ratio, tolerance, True)
    >>> #
    >>> # View:
    >>> rewrite_scalars(depth_file, 'subfolds.vtk', subfolds, 'subfolds', subfolds)
    >>> plot_surfaces('subfolds.vtk')

    """
    import os
    import numpy as np
    from time import time
    from mindboggle.mio.vtks import rewrite_scalars, read_vtk
    from mindboggle.guts.mesh import find_neighbors
    from mindboggle.guts.segment import segment, propagate, watershed

    print("Segment folds into subfolds")
    t0 = time()

    #-------------------------------------------------------------------------
    # Load depth values for all vertices
    #-------------------------------------------------------------------------
    faces, lines, indices, points, npoints, depths, \
        name, input_vtk = read_vtk(depth_file, return_first=True,
                                   return_array=True)

    #-------------------------------------------------------------------------
    # Find neighbors for each vertex
    #-------------------------------------------------------------------------
    neighbor_lists = find_neighbors(faces, npoints)

    #-------------------------------------------------------------------------
    # Segment folds into "watershed basins"
    #-------------------------------------------------------------------------
    indices_folds = [i for i,x in enumerate(folds) if x != -1]
    subfolds, seed_indices = watershed(depths, points, indices_folds,
                                 neighbor_lists, min_size, depth_factor=0.25,
                                 depth_ratio=0.1, tolerance=0.01, regrow=True)

    # Print statement
    n_subfolds = len([x for x in np.unique(subfolds) if x != -1])
    print('  ...Extracted {0} subfolds ({1:.2f} seconds)'.
          format(n_subfolds, time() - t0))

    #-------------------------------------------------------------------------
    # Return subfolds, number of subfolds, file name
    #-------------------------------------------------------------------------
    if save_file:
        subfolds_file = os.path.join(os.getcwd(), 'subfolds.vtk')
        rewrite_scalars(depth_file, subfolds_file,
                        subfolds, 'subfolds', subfolds)

        if not os.path.exists(subfolds_file):
            raise(IOError(subfolds_file + " not found"))

    else:
        subfolds_file = None

    return subfolds, n_subfolds, subfolds_file
예제 #40
0
def evaluate_deep_features(features_file,
                           labels_file,
                           sulci_file='',
                           hemi='',
                           excludeIDs=[-1],
                           output_vtk_name='',
                           verbose=True):
    """
    Evaluate deep surface features by computing the minimum distance from each
    label border vertex to all of the feature vertices in the same sulcus,
    and from each feature vertex to all of the label border vertices in the
    same sulcus.  The label borders run along the deepest parts of sulci
    and correspond to fundi in the DKT cortical labeling protocol.

    Parameters
    ----------
    features_file : string
        VTK surface file with feature numbers for vertex scalars
    labels_file : string
        VTK surface file with label numbers for vertex scalars
    sulci_file : string
        VTK surface file with sulcus numbers for vertex scalars
    excludeIDs : list of integers
        feature/sulcus/label IDs to exclude (background set to -1)
    output_vtk_name : Boolean
        if not empty, output a VTK file beginning with output_vtk_name that
        contains a surface with mean distances as scalars
    verbose : Boolean
        print mean distances to standard output?

    Returns
    -------
    feature_to_border_mean_distances : numpy array [number of features x 1]
        mean distance from each feature to sulcus label border
    feature_to_border_sd_distances : numpy array [number of features x 1]
        standard deviations of feature-to-border distances
    feature_to_border_distances_vtk : string
        VTK surface file containing feature-to-border distances
    border_to_feature_mean_distances : numpy array [number of features x 1]
        mean distances from each sulcus label border to feature
    border_to_feature_sd_distances : numpy array [number of features x 1]
        standard deviations of border-to-feature distances
    border_to_feature_distances_vtk : string
        VTK surface file containing border-to-feature distances

    """
    import os
    import sys
    import numpy as np
    from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk
    from mindboggle.guts.mesh import find_neighbors, remove_faces
    from mindboggle.guts.segment import extract_borders
    from mindboggle.guts.compute import source_to_target_distances
    from mindboggle.mio.labels import DKTprotocol

    dkt = DKTprotocol()
    #-------------------------------------------------------------------------
    # Load labels, features, and sulci:
    #-------------------------------------------------------------------------
    faces, lines, indices, points, npoints, labels, scalar_names, \
        input_vtk = read_vtk(labels_file, True, True)
    features, name = read_scalars(features_file, True, True)
    if sulci_file:
        sulci, name = read_scalars(sulci_file, True, True)
        # List of indices to sulcus vertices:
        sulcus_indices = [i for i, x in enumerate(sulci) if x != -1]
        segmentIDs = sulci
        sulcus_faces = remove_faces(faces, sulcus_indices)
    else:
        sulcus_indices = range(len(labels))
        segmentIDs = []
        sulcus_faces = faces

    #-------------------------------------------------------------------------
    # Prepare neighbors, label pairs, border IDs, and outputs:
    #-------------------------------------------------------------------------
    # Calculate neighbor lists for all points:
    print('Find neighbors for all vertices...')
    neighbor_lists = find_neighbors(faces, npoints)

    # Find label border points in any of the sulci:
    print('Find label border points in any of the sulci...')
    border_indices, border_label_tuples, unique_border_label_tuples = \
        extract_borders(sulcus_indices, labels, neighbor_lists,
                        ignore_values=[], return_label_pairs=True)
    if not len(border_indices):
        sys.exit('There are no label border points!')

    # Initialize an array of label border IDs
    # (label border vertices that define sulci in the labeling protocol):
    print('Build an array of label border IDs...')
    label_borders = -1 * np.ones(npoints)

    if hemi == 'lh':
        nsulcus_lists = len(dkt.left_sulcus_label_pair_lists)
    else:
        nsulcus_lists = len(dkt.right_sulcus_label_pair_lists)
    feature_to_border_mean_distances = -1 * np.ones(nsulcus_lists)
    feature_to_border_sd_distances = -1 * np.ones(nsulcus_lists)
    border_to_feature_mean_distances = -1 * np.ones(nsulcus_lists)
    border_to_feature_sd_distances = -1 * np.ones(nsulcus_lists)
    feature_to_border_distances_vtk = ''
    border_to_feature_distances_vtk = ''

    #-------------------------------------------------------------------------
    # Loop through sulci:
    #-------------------------------------------------------------------------
    # For each list of sorted label pairs (corresponding to a sulcus):
    for isulcus, label_pairs in enumerate(dkt.sulcus_label_pair_lists):

        # Keep the border points with label pair labels:
        label_pair_border_indices = [
            x for i, x in enumerate(border_indices)
            if np.unique(border_label_tuples[i]).tolist() in label_pairs
        ]

        # Store the points as sulcus IDs in the border IDs array:
        if label_pair_border_indices:
            label_borders[label_pair_border_indices] = isulcus

    if len(np.unique(label_borders)) > 1:

        #---------------------------------------------------------------------
        # Construct a feature-to-border distance matrix and VTK file:
        #---------------------------------------------------------------------
        # Construct a distance matrix:
        print('Construct a feature-to-border distance matrix...')
        sourceIDs = features
        targetIDs = label_borders
        distances, distance_matrix = source_to_target_distances(
            sourceIDs, targetIDs, points, segmentIDs, excludeIDs)

        # Compute mean distances for each feature:
        nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists)
        for ifeature in range(nfeatures):
            feature_distances = [
                x for x in distance_matrix[:, ifeature] if x != -1
            ]
            feature_to_border_mean_distances[ifeature] = \
                np.mean(feature_distances)
            feature_to_border_sd_distances[ifeature] = \
                np.std(feature_distances)

        if verbose:
            print('Feature-to-border mean distances:')
            print(feature_to_border_mean_distances)
            print('Feature-to-border standard deviations of distances:')
            print(feature_to_border_sd_distances)

        # Write resulting feature-label border distances to VTK file:
        if output_vtk_name:
            feature_to_border_distances_vtk = os.path.join(
                os.getcwd(),
                output_vtk_name + '_feature_to_border_mean_distances.vtk')
            print('Write feature-to-border distances to {0}...'.format(
                feature_to_border_distances_vtk))
            write_vtk(feature_to_border_distances_vtk, points, [], [],
                      sulcus_faces, [distances],
                      ['feature-to-border_distances'], 'float')

        #---------------------------------------------------------------------
        # Construct a border-to-feature distance matrix and VTK file:
        #---------------------------------------------------------------------
        # Construct a distance matrix:
        print('Construct a border-to-feature distance matrix...')
        sourceIDs = label_borders
        targetIDs = features
        distances, distance_matrix = source_to_target_distances(
            sourceIDs, targetIDs, points, segmentIDs, excludeIDs)

        # Compute mean distances for each feature:
        nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists)
        for ifeature in range(nfeatures):
            border_distances = [
                x for x in distance_matrix[:, ifeature] if x != -1
            ]
            border_to_feature_mean_distances[ifeature] = \
                np.mean(border_distances)
            border_to_feature_sd_distances[ifeature] = \
                np.std(border_distances)

        if verbose:
            print('border-to-feature mean distances:')
            print(border_to_feature_mean_distances)
            print('border-to-feature standard deviations of distances:')
            print(border_to_feature_sd_distances)

        # Write resulting feature-label border distances to VTK file:
        if output_vtk_name:
            border_to_feature_distances_vtk = os.path.join(
                os.getcwd(),
                output_vtk_name + '_border_to_feature_mean_distances.vtk')
            print('Write border-to-feature distances to {0}...'.format(
                border_to_feature_distances_vtk))
            write_vtk(border_to_feature_distances_vtk, points, [], [],
                      sulcus_faces, [distances],
                      ['border-to-feature_distances'], 'float')

    #-------------------------------------------------------------------------
    # Return outputs:
    #-------------------------------------------------------------------------
    return feature_to_border_mean_distances, feature_to_border_sd_distances,\
           feature_to_border_distances_vtk,\
           border_to_feature_mean_distances, border_to_feature_sd_distances,\
           border_to_feature_distances_vtk
예제 #41
0
def spectrum_from_file(vtk_file,
                       spectrum_size=10,
                       exclude_labels=[-1],
                       normalization="areaindex",
                       area_file='',
                       verbose=False):
    """
    Compute Laplace-Beltrami spectrum of a 3D shape in a VTK file.
    Parameters
    ----------
    vtk_file : string
        the input vtk file
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues
        if None, no normalization is used
        if "area", use area of the 2D structure as in Reuter et al. 2006
        if "index", divide eigenvalue by index to account for linear trend
        if "areaindex", do both (default)
    area_file :  string
        name of VTK file with surface area scalar values
    verbose : bool
        print statements?
    Returns
    -------
    spectrum : list of floats
        first spectrum_size of Laplace-Beltrami spectrum
    Examples
    --------
    >>> # Spectrum for entire left hemisphere of Twins-2-1:
    >>> import numpy as np
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> spectrum = spectrum_from_file(vtk_file, spectrum_size=6,
    ...     exclude_labels=[-1], normalization=None, area_file="", verbose=False)
    >>> [np.float("{0:.{1}f}".format(x, 5)) for x in spectrum[1::]]
    [0.00013, 0.00027, 0.00032, 0.00047, 0.00058]
    >>> spectrum = spectrum_from_file(vtk_file, spectrum_size=6,
    ...     exclude_labels=[-1], normalization="areaindex", area_file="",
    ...     verbose=False)
    >>> [np.float("{0:.{1}f}".format(x, 5)) for x in spectrum[1::]]
    [14.12801, 14.93573, 11.75397, 12.93141, 12.69348]
    """
    from mindboggle.mio.vtks import read_vtk, read_scalars
    from mindboggle.shapes.laplace_beltrami import spectrum_of_largest

    points, indices, lines, faces, scalars, scalar_names, npoints, \
            input_vtk = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    spectrum = spectrum_of_largest(points, faces, spectrum_size,
                                   exclude_labels, normalization, areas,
                                   verbose)

    return spectrum
예제 #42
0
def write_vertex_measures(output_table,
                          labels_or_file,
                          sulci=[],
                          fundi=[],
                          affine_transform_files=[],
                          inverse_booleans=[],
                          transform_format='itk',
                          area_file='',
                          mean_curvature_file='',
                          travel_depth_file='',
                          geodesic_depth_file='',
                          freesurfer_thickness_file='',
                          freesurfer_curvature_file='',
                          freesurfer_sulc_file=''):
    """
    Make a table of shape values per vertex.

    Note ::
        This function is tailored for Mindboggle outputs.

    Parameters
    ----------
    output_table : string
        output file (full path)
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    sulci :  list of integers
        indices to sulci, one per vertex, with -1 indicating no sulcus
    fundi :  list of integers
        indices to fundi, one per vertex, with -1 indicating no fundus
    affine_transform_files : list of strings
        affine transform files to standard space
    inverse_booleans : list of of zeros and ones
        for each transform, 1 to take the inverse, else 0
    transform_format : string
        format for transform file
        Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format
    area_file :  string
        name of VTK file with surface area scalar values
    mean_curvature_file :  string
        name of VTK file with mean curvature scalar values
    travel_depth_file :  string
        name of VTK file with travel depth scalar values
    geodesic_depth_file :  string
        name of VTK file with geodesic depth scalar values
    freesurfer_thickness_file :  string
        name of VTK file with FreeSurfer thickness scalar values
    freesurfer_curvature_file :  string
        name of VTK file with FreeSurfer curvature (curv) scalar values
    freesurfer_sulc_file :  string
        name of VTK file with FreeSurfer convexity (sulc) scalar values

    Returns
    -------
    output_table : table file name for vertex shape values

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.tables import write_vertex_measures
    >>> #
    >>> output_table = ''#vertex_shapes.csv'
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> labels_or_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> sulci_file = os.path.join(path, 'arno', 'features', 'sulci.vtk')
    >>> fundi_file = os.path.join(path, 'arno', 'features', 'fundi.vtk')
    >>> sulci, name = read_scalars(sulci_file)
    >>> fundi, name = read_scalars(fundi_file)
    >>> affine_transform_files = [os.path.join(path, 'arno', 'mri',
    >>>     't1weighted_brain.MNI152Affine.txt')]
    >>> inverse_booleans = [1]
    >>> transform_format = 'itk'
    >>> swap_xy = True
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> mean_curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> travel_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> geodesic_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.geodesic_depth.vtk')
    >>> freesurfer_thickness_file = ''
    >>> freesurfer_curvature_file = ''
    >>> freesurfer_sulc_file = ''
    >>> #
    >>> write_vertex_measures(output_table, labels_or_file, sulci, fundi,
    >>>     affine_transform_files, inverse_booleans, transform_format, area_file,
    >>>     mean_curvature_file, travel_depth_file, geodesic_depth_file,
    >>>     freesurfer_thickness_file, freesurfer_curvature_file, freesurfer_sulc_file)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.mio.vtks import read_scalars, read_vtk, \
        apply_affine_transforms

    # Make sure inputs are lists:
    if isinstance(labels_or_file, np.ndarray):
        labels = [int(x) for x in labels_or_file]
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    elif isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file)
    if isinstance(sulci, np.ndarray):
        sulci = [int(x) for x in sulci]
    if isinstance(fundi, np.ndarray):
        fundi = [int(x) for x in fundi]

    if not labels and not sulci and not fundi:
        import sys
        sys.exit('No feature data to tabulate in write_vertex_measures().')

    # Feature names and corresponding feature lists:
    feature_names = ['label ID', 'sulcus ID', 'fundus ID']
    feature_lists = [labels, sulci, fundi]

    # Shape names corresponding to shape files below:
    shape_names = [
        'area', 'travel depth', 'geodesic depth', 'mean curvature',
        'freesurfer curvature', 'freesurfer thickness',
        'freesurfer convexity (sulc)'
    ]

    # Load shape files as a list of numpy arrays of per-vertex shape values:
    shape_files = [
        area_file, travel_depth_file, geodesic_depth_file, mean_curvature_file,
        freesurfer_curvature_file, freesurfer_thickness_file,
        freesurfer_sulc_file
    ]

    # Append columns of per-vertex scalar values:
    columns = []
    column_names = []
    for ifeature, values in enumerate(feature_lists):
        if values:
            columns.append(values)
            column_names.append(feature_names[ifeature])

    first_pass = True
    for ishape, shape_file in enumerate(shape_files):
        if os.path.exists(shape_file):
            if first_pass:

                # Append x,y,z position per vertex to columns:
                u1, u2, u3, points, u4, scalars, u5, u6 = read_vtk(shape_file)
                xyz_positions = np.asarray(points)
                for ixyz, xyz in enumerate(['x', 'y', 'z']):
                    column_names.append('position: {0}'.format(xyz))
                    columns.append(xyz_positions[:, ixyz].tolist())
                first_pass = False

                # Append standard space x,y,z position to columns:
                if affine_transform_files and transform_format:
                    affine_points, \
                        foo1 = apply_affine_transforms(affine_transform_files,
                                    inverse_booleans, transform_format,
                                    points, vtk_file_stem='')
                    xyz_std_positions = affine_points
                    for ixyz, xyz in enumerate(['x', 'y', 'z']):
                        column_names.append('position in standard space:'
                                            ' {0}'.format(xyz))
                        columns.append(xyz_std_positions[:, ixyz].tolist())
            else:
                scalars, name = read_scalars(shape_file)
            if len(scalars):
                columns.append(scalars)
                column_names.append(shape_names[ishape])

    # Prepend with column of indices and write table
    if not output_table:
        output_table = os.path.join(os.getcwd(), 'vertices.csv')

    df = pd.DataFrame(np.transpose(columns), columns=column_names)
    df.to_csv(output_table, index=False)

    if not os.path.exists(output_table):
        raise (IOError(output_table + " not found"))

    return output_table
예제 #43
0
def zernike_moments_per_label(vtk_file,
                              order=10,
                              exclude_labels=[-1],
                              scale_input=True,
                              decimate_fraction=0,
                              decimate_smooth=25):
    """
    Compute the Zernike moments per labeled region in a file.

    Optionally decimate the input mesh.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file containing index scalars (labels)
    order : integer
        number of moments to compute
    exclude_labels : list of integers
        labels to be excluded
    scale_input : Boolean
        translate and scale each object so it is bounded by a unit sphere?
        (this is the expected input to zernike_moments())
    decimate_fraction : float
        fraction of mesh faces to remove for decimation (1 for no decimation)
    decimate_smooth : integer
        number of smoothing steps for decimation

    Returns
    -------
    descriptors_lists : list of lists of floats
        Zernike descriptors per label
    label_list : list of integers
        list of unique labels for which moments are computed

    Examples
    --------
    >>> # Uncomment "if label==22:" below to run example:
    >>> # Twins-2-1 left postcentral (22) pial surface:
    >>> import os
    >>> from mindboggle.shapes.zernike.zernike import zernike_moments_per_label
    >>> path = os.path.join(os.environ['HOME'], 'mindboggled', 'OASIS-TRT-20-1')
    >>> vtk_file = os.path.join(path, 'labels', 'left_surface', 'relabeled_classifier.vtk')
    >>> order = 3
    >>> exclude_labels = [-1, 0]
    >>> scale_input = True
    >>> zernike_moments_per_label(vtk_file, order, exclude_labels, scale_input)
    ([[0.00528486237819844,
       0.009571754617699853,
       0.0033489494903015944,
       0.00875603468268444,
       0.0015879536633349918,
       0.0008080165707033097]],
     [22])


    ([[0.0018758013185778298,
       0.001757973693050823,
       0.002352403177686726,
       0.0032281044369938286,
       0.002215900343702539,
       0.0019646380916703856]],
     [14.0])
    Arthur Mikhno's result:
    1.0e+07 *
    0.0000
    0.0179
    0.0008
    4.2547
    0.0534
    4.4043



    """
    import numpy as np
    from mindboggle.mio.vtks import read_vtk
    from mindboggle.guts.mesh import remove_faces
    from mindboggle.shapes.zernike.zernike import zernike_moments

    min_points_faces = 4

    #-------------------------------------------------------------------------
    # Read VTK surface mesh file:
    #-------------------------------------------------------------------------
    faces, u1, u2, points, u3, labels, u4, u5 = read_vtk(vtk_file)

    #-------------------------------------------------------------------------
    # Loop through labeled regions:
    #-------------------------------------------------------------------------
    ulabels = [x for x in np.unique(labels) if x not in exclude_labels]
    label_list = []
    descriptors_lists = []
    for label in ulabels:
        #if label == 1022:  # 22:
        #    print("DEBUG: COMPUTE FOR ONLY ONE LABEL")

        #---------------------------------------------------------------------
        # Determine the indices per label:
        #---------------------------------------------------------------------
        Ilabel = [i for i, x in enumerate(labels) if x == label]
        print('  {0} vertices for label {1}'.format(len(Ilabel), label))
        if len(Ilabel) > min_points_faces:

            #-----------------------------------------------------------------
            # Remove background faces:
            #-----------------------------------------------------------------
            pick_faces = remove_faces(faces, Ilabel)
            if len(pick_faces) > min_points_faces:

                #-------------------------------------------------------------
                # Compute Zernike moments for the label:
                #-------------------------------------------------------------
                descriptors = zernike_moments(points, pick_faces, order,
                                              scale_input, decimate_fraction,
                                              decimate_smooth)

                #-------------------------------------------------------------
                # Append to a list of lists of spectra:
                #-------------------------------------------------------------
                descriptors_lists.append(descriptors)
                label_list.append(label)

    return descriptors_lists, label_list
예제 #44
0
def write_face_vertex_averages(input_file, output_table='', area_file=''):
    """
    Make table of average vertex values per face
    (divided by face area if area_file provided).

    Parameters
    ----------
    input_file : string
        name of VTK file with scalars to average
    area_file :  string
        name of VTK file with surface area scalar values
    output_table :  string
        output table filename

    Returns
    -------
    output_table :  string
        output table filename

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.tables import write_face_vertex_averages
    >>> path = '/homedir/mindboggled'
    >>> input_file = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'freesurfer_thickness.vtk')
    >>> area_file = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'area.vtk')
    >>> output_table = ''
    >>> #
    >>> write_face_vertex_averages(input_file, output_table, area_file)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.mio.vtks import read_vtk, read_scalars

    faces, lines, indices, points, npoints, scalars, name, \
        input_vtk = read_vtk(input_file, True, True)
    if area_file:
        area_scalars, name = read_scalars(area_file, True, True)

    #---------------------------------------------------------------------
    # For each face, average vertex values:
    #---------------------------------------------------------------------
    columns = []
    for face in faces:
        values = []
        for index in face:
            if area_file:
                values.append(scalars[index] / area_scalars[index])
            else:
                values.append(scalars[index])
        columns.append(np.mean(values))

    #-----------------------------------------------------------------
    # Write to table:
    #-----------------------------------------------------------------
    if not output_table:
        output_table = os.path.join(os.getcwd(), 'average_face_values.csv')

    df = pd.DataFrame({'': columns})
    df.to_csv(output_table, index=False)

    if not os.path.exists(output_table):
        raise (IOError(output_table + " not found"))

    return output_table
예제 #45
0
def extract_folds(depth_file, depth_threshold=2, min_fold_size=50,
                  save_file=False, output_file='', background_value=-1,
                  verbose=False):
    """
    Use depth threshold to extract folds from a triangular surface mesh.

    A fold is a group of connected, deep vertices. To extract folds,
    a depth threshold is used to segment deep vertices of the surface mesh.
    We have observed in the histograms of travel depth measures of cortical
    surfaces that there is a rapidly decreasing distribution of low depth
    values (corresponding to the outer surface, or gyral crowns) with a
    long tail of higher depth values (corresponding to the folds).

    The find_depth_threshold function therefore computes a histogram of
    travel depth measures, smooths the histogram's bin values, convolves
    to compute slopes, and finds the depth value for the first bin with
    zero slope. The extract_folds function uses this depth value, segments
    deep vertices, and removes extremely small folds (empirically set at 50
    vertices or fewer out of a total mesh size of over 100,000 vertices).

    Steps ::
        1. Segment deep vertices as an initial set of folds.
        2. Remove small folds.
        3. Renumber folds.

    Note ::
        Removed option: Find and fill holes in the folds:
        Folds could have holes in areas shallower than the depth threshold.
        Calling fill_holes() could accidentally include very shallow areas
        (in an annulus-shaped fold, for example).
        However, we could include the argument exclude_range to check for
        any values from zero to min_hole_depth; holes would not be filled
        if they were to contain values within this range.

    Parameters
    ----------
    depth_file : string
        surface mesh file in VTK format with faces and depth scalar values
    depth_threshold :  float
        threshold defining the minimum depth for vertices to be in a fold
    min_fold_size : integer
        minimum fold size (number of vertices)
    save_file : bool
        save output VTK file?
    output_file : string
        name of output file in VTK format
    background_value : integer or float
        background value
    verbose : bool
        print statements?

    Returns
    -------
    folds : list of integers
        fold numbers for all vertices (-1 for non-fold vertices)
    n_folds :  int
        number of folds
    folds_file : string (if save_file)
        name of output VTK file with fold IDs (-1 for non-fold vertices)

    Examples
    --------
    >>> from mindboggle.features.folds import extract_folds
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
    >>> depth_threshold = 2.36089
    >>> min_fold_size = 50
    >>> save_file = True
    >>> output_file = 'extract_folds.vtk'
    >>> background_value = -1
    >>> verbose = False
    >>> folds, n_folds, folds_file = extract_folds(depth_file,
    ...     depth_threshold, min_fold_size, save_file, output_file,
    ...     background_value, verbose)
    >>> n_folds
    33
    >>> lens = [len([x for x in folds if x == y]) for y in range(n_folds)]
    >>> lens[0:10]
    [726, 67241, 2750, 5799, 1151, 6360, 1001, 505, 228, 198]

    View folds (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
    >>> plot_surfaces('extract_folds.vtk') # doctest: +SKIP

    View folds without background (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
    >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
    >>> rewrite_scalars(depth_file, 'extract_folds_no_background.vtk', folds,
    ...     'just_folds', folds, -1) # doctest: +SKIP
    >>> plot_surfaces('extract_folds_no_background.vtk') # doctest: +SKIP

    """
    import os
    import numpy as np
    from time import time

    from mindboggle.mio.vtks import rewrite_scalars, read_vtk
    from mindboggle.guts.mesh import find_neighbors
    from mindboggle.guts.segment import segment_regions

    if verbose:
        print("Extract folds in surface mesh")
        t0 = time()

    # ------------------------------------------------------------------------
    # Load depth values for all vertices
    # ------------------------------------------------------------------------
    points, indices, lines, faces, depths, scalar_names, npoints, \
        input_vtk = read_vtk(depth_file, return_first=True, return_array=True)

    # ------------------------------------------------------------------------
    # Find the deepest vertices
    # ------------------------------------------------------------------------
    indices_deep = [i for i,x in enumerate(depths) if x >= depth_threshold]
    if indices_deep:

        # --------------------------------------------------------------------
        # Find neighbors for each vertex
        # --------------------------------------------------------------------
        neighbor_lists = find_neighbors(faces, npoints)

        # --------------------------------------------------------------------
        # Segment deep vertices as an initial set of folds
        # --------------------------------------------------------------------
        if verbose:
            print("  Segment vertices deeper than {0:.2f} as folds".format(depth_threshold))
            t1 = time()
        folds = segment_regions(indices_deep, neighbor_lists, 1, [], False,
                                False, [], [], [], '', background_value, False)
        if verbose:
            print('  ...Segmented folds ({0:.2f} seconds)'.format(time() - t1))

        # --------------------------------------------------------------------
        # Remove small folds
        # --------------------------------------------------------------------
        if min_fold_size > 1:
            if verbose:
                print('  Remove folds smaller than {0}'.format(min_fold_size))
            unique_folds = [x for x in np.unique(folds)
                            if x != background_value]
            for nfold in unique_folds:
                indices_fold = [i for i,x in enumerate(folds) if x == nfold]
                if len(indices_fold) < min_fold_size:
                    folds[indices_fold] = background_value

        # --------------------------------------------------------------------
        # Find and fill holes in the folds
        # Note: Surfaces surrounded by folds can be mistaken for holes,
        #       so exclude_range includes outer surface values close to zero.
        # --------------------------------------------------------------------
        # folds = fill_holes(folds, neighbor_lists, values=depths,
        #                    exclude_range=[0, min_hole_depth])

        # --------------------------------------------------------------------
        # Renumber folds so they are sequential.
        # NOTE: All vertices are included (-1 for non-fold vertices).
        # --------------------------------------------------------------------
        renumber_folds = background_value * np.ones(npoints)
        fold_numbers = [x for x in np.unique(folds) if x != background_value]
        for i_fold, n_fold in enumerate(fold_numbers):
            fold_indices = [i for i,x in enumerate(folds) if x == n_fold]
            renumber_folds[fold_indices] = i_fold
        folds = renumber_folds
        folds = [int(x) for x in folds]
        n_folds = i_fold + 1

        # Print statement
        if verbose:
            print('  ...Extracted {0} folds ({1:.2f} seconds)'.
                  format(n_folds, time() - t0))
    else:
        if verbose:
            print('  No deep vertices')

    # ------------------------------------------------------------------------
    # Return folds, number of folds, file name
    # ------------------------------------------------------------------------
    if save_file:

        if output_file:
            folds_file = output_file
        else:
            folds_file = os.path.join(os.getcwd(), 'folds.vtk')
        rewrite_scalars(depth_file, folds_file, folds, 'folds', [],
                        background_value)

        if not os.path.exists(folds_file):
            raise IOError(folds_file + " not found")

    else:
        folds_file = None

    return folds, n_folds, folds_file
예제 #46
0
def write_average_face_values_per_label(input_indices_vtk,
                                        input_values_vtk='',
                                        area_file='',
                                        output_stem='',
                                        exclude_values=[-1],
                                        background_value=-1):
    """
    Write out a separate VTK file for each integer
    in (the first) scalar list of an input VTK file.
    Optionally write the values drawn from a second VTK file.

    Parameters
    ----------
    input_indices_vtk : string
        path of the input VTK file that contains indices as scalars
    input_values_vtk : string
        path of the input VTK file that contains values as scalars
    output_stem : string
        path and stem of the output VTK file
    exclude_values : list or array
        values to exclude
    background_value : integer or float
        background value in output VTK files
    scalar_name : string
        name of a lookup table of scalars values

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.tables import write_average_face_values_per_label
    >>> path = '/homedir/mindboggled'
    >>> input_indices_vtk = os.path.join(path, 'Twins-2-1', 'labels', 'left_cortical_surface', 'freesurfer_cortex_labels.vtk')
    >>> input_values_vtk = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'freesurfer_thickness.vtk')
    >>> area_file = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'area.vtk')
    >>> output_stem = 'labels_thickness'
    >>> exclude_values = [-1]
    >>> background_value = -1
    >>> #
    >>> write_average_face_values_per_label(input_indices_vtk,
    >>>     input_values_vtk, area_file, output_stem, exclude_values, background_value)
    >>> #
    >>> # View:
    >>> #example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk')
    >>> #from mindboggle.mio.plots import plot_surfaces
    >>> #plot_surfaces(example_vtk)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.mio.vtks import read_scalars, read_vtk, write_vtk
    from mindboggle.guts.mesh import remove_faces

    # Load VTK file:
    faces, lines, indices, points, npoints, scalars, scalar_names, \
        foo1 = read_vtk(input_indices_vtk, True, True)
    if area_file:
        area_scalars, name = read_scalars(area_file, True, True)
    print("Explode the scalar list in {0}".format(
        os.path.basename(input_indices_vtk)))
    if input_values_vtk != input_indices_vtk:
        values, name = read_scalars(input_values_vtk, True, True)
        print("Explode the scalar list of values in {0} "
              "with the scalar list of indices in {1}".format(
                  os.path.basename(input_values_vtk),
                  os.path.basename(input_indices_vtk)))
    else:
        values = np.copy(scalars)

    # Loop through unique (non-excluded) scalar values:
    unique_scalars = [
        int(x) for x in np.unique(scalars) if x not in exclude_values
    ]
    for scalar in unique_scalars:

        keep_indices = [x for sublst in faces for x in sublst]
        new_faces = remove_faces(faces, keep_indices)

        # Create array and indices for scalar value:
        select_scalars = np.copy(scalars)
        select_scalars[scalars != scalar] = background_value
        scalar_indices = [
            i for i, x in enumerate(select_scalars) if x == scalar
        ]
        print("  Scalar {0}: {1} vertices".format(scalar, len(scalar_indices)))

        #---------------------------------------------------------------------
        # For each face, average vertex values:
        #---------------------------------------------------------------------
        output_table = os.path.join(os.getcwd(),
                                    output_stem + str(scalar) + '.csv')
        columns = []
        for face in new_faces:
            values = []
            for index in face:
                if area_file:
                    values.append(scalars[index] / area_scalars[index])
                else:
                    values.append(scalars[index])
            columns.append(np.mean(values))

        #-----------------------------------------------------------------
        # Write to table:
        #-----------------------------------------------------------------
        df = pd.DataFrame({'': columns})
        df.to_csv(output_table, index=False)

        # Write VTK file with scalar value:
        #output_vtk = os.path.join(os.getcwd(), output_stem + str(scalar) + '.vtk')
        #write_vtk(output_vtk, points, indices, lines, new_faces,
        #          [select_values.tolist()], [output_scalar_name])

        if not os.path.exists(output_table):
            raise (IOError(output_table + " not found"))
예제 #47
0
def write_vertex_measures(output_table,
                          labels_or_file,
                          sulci=[],
                          fundi=[],
                          affine_transform_files=[],
                          inverse_booleans=[],
                          transform_format='itk',
                          area_file='',
                          mean_curvature_file='',
                          travel_depth_file='',
                          geodesic_depth_file='',
                          freesurfer_thickness_file='',
                          freesurfer_curvature_file='',
                          freesurfer_sulc_file=''):
    """
    Make a table of shape values per vertex.

    Note ::
        This function is tailored for Mindboggle outputs.

    Parameters
    ----------
    output_table : string
        output file (full path)
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    sulci :  list of integers
        indices to sulci, one per vertex, with -1 indicating no sulcus
    fundi :  list of integers
        indices to fundi, one per vertex, with -1 indicating no fundus
    affine_transform_files : list of strings
        affine transform files to standard space
    inverse_booleans : list of of zeros and ones
        for each transform, 1 to take the inverse, else 0
    transform_format : string
        format for transform file
        Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format
    area_file :  string
        name of VTK file with surface area scalar values
    mean_curvature_file :  string
        name of VTK file with mean curvature scalar values
    travel_depth_file :  string
        name of VTK file with travel depth scalar values
    geodesic_depth_file :  string
        name of VTK file with geodesic depth scalar values
    freesurfer_thickness_file :  string
        name of VTK file with FreeSurfer thickness scalar values
    freesurfer_curvature_file :  string
        name of VTK file with FreeSurfer curvature (curv) scalar values
    freesurfer_sulc_file :  string
        name of VTK file with FreeSurfer convexity (sulc) scalar values

    Returns
    -------
    output_table : table file name for vertex shape values

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.tables import write_vertex_measures
    >>> output_table = '' #vertex_shapes.csv'
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> labels_or_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> sulci_file = fetch_data(urls['left_sulci'], '', '.vtk')
    >>> fundi_file = fetch_data(urls['left_fundus_per_sulcus'], '', '.vtk')
    >>> mean_curvature_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')
    >>> travel_depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
    >>> geodesic_depth_file = fetch_data(urls['left_geodesic_depth'], '', '.vtk')
    >>> area_file = fetch_data(urls['left_area'], '', '.vtk')
    >>> freesurfer_thickness_file = fetch_data(urls['left_freesurfer_thickness'], '', '.vtk')
    >>> freesurfer_curvature_file = fetch_data(urls['left_freesurfer_curvature'], '', '.vtk')
    >>> freesurfer_sulc_file = fetch_data(urls['left_freesurfer_sulc'], '', '.vtk')
    >>> sulci, name = read_scalars(sulci_file)
    >>> if fundi_file:
    ...     fundi, name = read_scalars(fundi_file)
    ... else:
    ...     fundi = []
    >>> affine_transform_file = fetch_data(urls['affine_mni_transform'], '', '.txt')
    >>> inverse_booleans = [1]
    >>> transform_format = 'itk'
    >>> swap_xy = True
    >>> affine_rename = affine_transform_file + '.txt'
    >>> os.rename(affine_transform_file, affine_rename)
    >>> os.rename(labels_or_file, labels_or_file + '.vtk')
    >>> os.rename(area_file, area_file + '.vtk')
    >>> os.rename(mean_curvature_file, mean_curvature_file + '.vtk')
    >>> os.rename(travel_depth_file, travel_depth_file + '.vtk')
    >>> os.rename(geodesic_depth_file, geodesic_depth_file + '.vtk')
    >>> os.rename(freesurfer_thickness_file, freesurfer_thickness_file + '.vtk')
    >>> os.rename(freesurfer_curvature_file, freesurfer_curvature_file + '.vtk')
    >>> os.rename(freesurfer_sulc_file, freesurfer_sulc_file + '.vtk')
    >>> labels_or_file = labels_or_file + '.vtk'
    >>> area_file = area_file + '.vtk'
    >>> mean_curvature_file = mean_curvature_file + '.vtk'
    >>> travel_depth_file = travel_depth_file + '.vtk'
    >>> geodesic_depth_file = geodesic_depth_file + '.vtk'
    >>> freesurfer_thickness_file = freesurfer_thickness_file + '.vtk'
    >>> freesurfer_curvature_file = freesurfer_curvature_file + '.vtk'
    >>> freesurfer_sulc_file = freesurfer_sulc_file + '.vtk'
    >>> affine_transform_files = [] # [affine_rename] # requires ANTs to test
    >>> output_table = write_vertex_measures(output_table, labels_or_file,
    ...     sulci, fundi, affine_transform_files, inverse_booleans,
    ...     transform_format, area_file, mean_curvature_file,
    ...     travel_depth_file, geodesic_depth_file, freesurfer_thickness_file,
    ...     freesurfer_curvature_file, freesurfer_sulc_file)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.mio.vtks import read_scalars, read_vtk, \
        apply_affine_transforms

    # Make sure inputs are lists:
    if isinstance(labels_or_file, np.ndarray):
        labels = [int(x) for x in labels_or_file]
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    elif isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file)
    if isinstance(sulci, np.ndarray):
        sulci = [int(x) for x in sulci]
    if isinstance(fundi, np.ndarray):
        fundi = [int(x) for x in fundi]

    if not labels and not sulci and not fundi:
        raise IOError(
            'No feature data to tabulate in write_vertex_measures().')

    # Feature names and corresponding feature lists:
    feature_names = ['label ID', 'sulcus ID', 'fundus ID']
    feature_lists = [labels, sulci, fundi]

    # Shape names corresponding to shape files below:
    shape_names = [
        'area', 'travel depth', 'geodesic depth', 'mean curvature',
        'freesurfer curvature', 'freesurfer thickness',
        'freesurfer convexity (sulc)'
    ]

    # Load shape files as a list of numpy arrays of per-vertex shape values:
    shape_files = [
        area_file, travel_depth_file, geodesic_depth_file, mean_curvature_file,
        freesurfer_curvature_file, freesurfer_thickness_file,
        freesurfer_sulc_file
    ]

    # Append columns of per-vertex scalar values:
    columns = []
    column_names = []
    for ifeature, values in enumerate(feature_lists):
        if values:
            columns.append(values)
            column_names.append(feature_names[ifeature])

    first_pass = True
    for ishape, shape_file in enumerate(shape_files):
        if os.path.exists(shape_file):
            if first_pass:

                # Append x,y,z position per vertex to columns:
                points, indices, lines, faces, scalars, scalar_names, \
                    npoints, input_vtk = read_vtk(shape_file)
                xyz_positions = np.asarray(points)
                for ixyz, xyz in enumerate(['x', 'y', 'z']):
                    column_names.append('position: {0}'.format(xyz))
                    columns.append(xyz_positions[:, ixyz].tolist())
                first_pass = False

                # Append standard space x,y,z position to columns:
                if affine_transform_files and transform_format:
                    affine_points, \
                        foo1 = apply_affine_transforms(affine_transform_files,
                                    inverse_booleans, transform_format,
                                    points, vtk_file_stem='')
                    xyz_std_positions = affine_points
                    for ixyz, xyz in enumerate(['x', 'y', 'z']):
                        column_names.append('position in standard space:'
                                            ' {0}'.format(xyz))
                        columns.append(xyz_std_positions[:, ixyz].tolist())
            else:
                scalars, name = read_scalars(shape_file)
            if len(scalars):
                columns.append(scalars)
                column_names.append(shape_names[ishape])

    # Prepend with column of indices and write table
    if not output_table:
        output_table = os.path.join(os.getcwd(), 'vertices.csv')

    df = pd.DataFrame(np.transpose(columns), columns=column_names)
    df.to_csv(output_table, index=False, encoding='utf-8')

    if not os.path.exists(output_table):
        raise IOError(output_table + " not found")

    return output_table
예제 #48
0
def spectrum_from_file(vtk_file, spectrum_size=10, exclude_labels=[-1],
                       normalization="areaindex", area_file='', verbose=False):
    """
    Compute Laplace-Beltrami spectrum of a 3D shape in a VTK file.

    Parameters
    ----------
    vtk_file : string
        the input vtk file
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues
        if None, no normalization is used
        if "area", use area of the 2D structure as in Reuter et al. 2006
        if "index", divide eigenvalue by index to account for linear trend
        if "areaindex", do both (default)
    area_file :  string
        name of VTK file with surface area scalar values
    verbose : bool
        print statements?

    Returns
    -------
    spectrum : list of floats
        first spectrum_size of Laplace-Beltrami spectrum

    Examples
    --------
    >>> # Spectrum for entire left hemisphere of Twins-2-1:
    >>> import numpy as np
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> spectrum = spectrum_from_file(vtk_file, spectrum_size=6,
    ...     exclude_labels=[-1], normalization=None, area_file="", verbose=False)
    >>> [np.float("{0:.{1}f}".format(x, 5)) for x in spectrum[1::]]
    [0.00013, 0.00027, 0.00032, 0.00047, 0.00058]
    >>> spectrum = spectrum_from_file(vtk_file, spectrum_size=6,
    ...     exclude_labels=[-1], normalization="areaindex", area_file="",
    ...     verbose=False)
    >>> [np.float("{0:.{1}f}".format(x, 5)) for x in spectrum[1::]]
    [14.12801, 14.93573, 11.75397, 12.93141, 12.69348]
    """
    from mindboggle.mio.vtks import read_vtk, read_scalars
    from mindboggle.shapes.laplace_beltrami import spectrum_of_largest

    points, indices, lines, faces, scalars, scalar_names, npoints, \
            input_vtk = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    spectrum = spectrum_of_largest(points, faces, spectrum_size,
                                   exclude_labels, normalization, areas,
                                   verbose)

    return spectrum
예제 #49
0
def write_average_face_values_per_label(input_indices_vtk,
                                        input_values_vtk='',
                                        area_file='',
                                        output_stem='',
                                        exclude_values=[-1],
                                        background_value=-1,
                                        verbose=False):
    """
    Write out a separate csv table file for each integer
    in (the first) scalar list of an input VTK file.
    Optionally write the values drawn from a second VTK file.

    Parameters
    ----------
    input_indices_vtk : string
        path of the input VTK file that contains indices as scalars
    input_values_vtk : string
        path of the input VTK file that contains values as scalars
    output_stem : string
        path and stem of the output VTK file
    exclude_values : list or array
        values to exclude
    background_value : integer or float
        background value in output VTK files
    scalar_name : string
        name of a lookup table of scalars values
    verbose : bool
        print statements?

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.tables import write_average_face_values_per_label
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> input_indices_vtk = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> input_values_vtk = fetch_data(urls['left_mean_curvature'], '', '.vtk')
    >>> area_file = fetch_data(urls['left_area'], '', '.vtk')
    >>> output_stem = 'labels_thickness'
    >>> exclude_values = [-1]
    >>> background_value = -1
    >>> verbose = False
    >>> write_average_face_values_per_label(input_indices_vtk,
    ...     input_values_vtk, area_file, output_stem, exclude_values,
    ...     background_value, verbose)

    View vtk file (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces
    >>> example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk')
    >>> plot_surfaces(example_vtk) # doctest: +SKIP

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.mio.vtks import read_scalars, read_vtk, write_vtk
    from mindboggle.guts.mesh import keep_faces

    # Load VTK file:
    points, indices, lines, faces, scalars, scalar_names, npoints, \
        input_vtk = read_vtk(input_indices_vtk, True, True)
    if area_file:
        area_scalars, name = read_scalars(area_file, True, True)
    if verbose:
        print("Explode the scalar list in {0}".format(
            os.path.basename(input_indices_vtk)))
    if input_values_vtk != input_indices_vtk:
        if verbose:
            print("Explode the scalar list of values in {0} "
                  "with the scalar list of indices in {1}".format(
                      os.path.basename(input_values_vtk),
                      os.path.basename(input_indices_vtk)))

    # Loop through unique (non-excluded) scalar values:
    unique_scalars = [
        int(x) for x in np.unique(scalars) if x not in exclude_values
    ]
    for scalar in unique_scalars:

        keep_indices = [x for sublst in faces for x in sublst]
        new_faces = keep_faces(faces, keep_indices)

        # Create array and indices for scalar value:
        select_scalars = np.copy(scalars)
        select_scalars[scalars != scalar] = background_value
        scalar_indices = [
            i for i, x in enumerate(select_scalars) if x == scalar
        ]
        if verbose:
            print("  Scalar {0}: {1} vertices".format(scalar,
                                                      len(scalar_indices)))

        # --------------------------------------------------------------------
        # For each face, average vertex values:
        # --------------------------------------------------------------------
        output_table = os.path.join(os.getcwd(),
                                    output_stem + str(scalar) + '.csv')
        columns = []
        for face in new_faces:
            values = []
            for index in face:
                if area_file:
                    values.append(scalars[index] / area_scalars[index])
                else:
                    values.append(scalars[index])
            columns.append(np.mean(values))

        # ----------------------------------------------------------------
        # Write to table:
        # ----------------------------------------------------------------
        df = pd.DataFrame({'': columns})
        df.to_csv(output_table, index=False, encoding='utf-8')
        if not os.path.exists(output_table):
            raise IOError(output_table + " not found")
예제 #50
0
def extract_fundi(folds, curv_file, depth_file, min_separation=10,
                  erode_ratio=0.1, erode_min_size=1, save_file=False):
    """
    Extract fundi from folds.

    A fundus is a branching curve that runs along the deepest and most
    highly curved portions of a fold.

    Steps ::
        1. Find fundus endpoints (outer anchors) with find_outer_anchors().
        2. Include inner anchor points.
        3. Connect anchor points using connect_points_erosion();
           inner anchors are removed if they result in endpoints.

    Parameters
    ----------
    folds : numpy array or list of integers
        fold number for each vertex
    curv_file :  string
        surface mesh file in VTK format with mean curvature values
    depth_file :  string
        surface mesh file in VTK format with rescaled depth values
    likelihoods : list of integers
        fundus likelihood value for each vertex
    min_separation : integer
        minimum number of edges between inner/outer anchor points
    erode_ratio : float
        fraction of indices to test for removal at each iteration
        in connect_points_erosion()
    save_file : Boolean
        save output VTK file?

    Returns
    -------
    fundus_per_fold : list of integers
        fundus numbers for all vertices, labeled by fold
        (-1 for non-fundus vertices)
    n_fundi_in_folds :  integer
        number of fundi
    fundus_per_fold_file : string (if save_file)
        output VTK file with fundus numbers (-1 for non-fundus vertices)

    Examples
    --------
    >>> # Extract fundus from one or more folds:
    >>> single_fold = True
    >>> import os
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.features.fundi import extract_fundi
    >>> from mindboggle.mio.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> curv_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> depth_file = os.path.join(path, 'arno', 'shapes', 'travel_depth_rescaled.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file, True, True)
    >>> if single_fold:
    >>>     fold_number = 2 #11
    >>>     folds[folds != fold_number] = -1
    >>> min_separation = 10
    >>> erode_ratio = 0.10
    >>> erode_min_size = 10
    >>> save_file = True
    >>> o1, o2, fundus_per_fold_file = extract_fundi(folds, curv_file,
    ...     depth_file, min_separation, erode_ratio, erode_min_size, save_file)
    >>> #
    >>> # View:
    >>> plot_surfaces(fundi_file)

    """

    # Extract a skeleton to connect endpoints in a fold:
    import os
    import numpy as np
    from time import time

    from mindboggle.mio.vtks import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.guts.compute import median_abs_dev
    from mindboggle.guts.paths import find_max_values
    from mindboggle.guts.mesh import find_neighbors_from_file, find_complete_faces
    from mindboggle.guts.paths import find_outer_anchors, connect_points_erosion

    if isinstance(folds, list):
        folds = np.array(folds)

    # Load values, inner anchor threshold, and neighbors:
    points, indices, lines, faces, curvs, scalar_names, npoints, \
        input_vtk = read_vtk(curv_file, True, True)
    depths, name = read_scalars(depth_file, True, True)
    values = curvs * depths
    values0 = [x for x in values if x > 0]
    thr = np.median(values0) + 2 * median_abs_dev(values0)
    neighbor_lists = find_neighbors_from_file(curv_file)

    #-------------------------------------------------------------------------
    # Loop through folds:
    #-------------------------------------------------------------------------
    t1 = time()
    skeletons = []
    unique_fold_IDs = [x for x in np.unique(folds) if x != -1]

    if len(unique_fold_IDs) == 1:
        print("Extract a fundus from 1 fold...")
    else:
        print("Extract a fundus from each of {0} folds...".
              format(len(unique_fold_IDs)))

    for fold_ID in unique_fold_IDs:
        indices_fold = [i for i,x in enumerate(folds) if x == fold_ID]
        if indices_fold:
            print('  Fold {0}:'.format(int(fold_ID)))

            #-----------------------------------------------------------------
            # Find outer anchor points on the boundary of the surface region,
            # to serve as fundus endpoints:
            #-----------------------------------------------------------------
            outer_anchors, tracks = find_outer_anchors(indices_fold,
                neighbor_lists, values, depths, min_separation)

            #-----------------------------------------------------------------
            # Find inner anchor points:
            #-----------------------------------------------------------------
            inner_anchors = find_max_values(points, values, min_separation, thr)

            #-----------------------------------------------------------------
            # Connect anchor points to create skeleton:
            #-----------------------------------------------------------------
            B = -1 * np.ones(npoints)
            B[indices_fold] = 1
            skeleton = connect_points_erosion(B, neighbor_lists,
                outer_anchors, inner_anchors, values,
                erode_ratio, erode_min_size, save_steps=[], save_vtk='')
            if skeleton:
                skeletons.extend(skeleton)

            #-----------------------------------------------------------------
            # Remove fundus vertices if they complete triangle faces:
            #-----------------------------------------------------------------
            Iremove = find_complete_faces(skeletons, faces)
            if Iremove:
                skeletons = list(frozenset(skeletons).difference(Iremove))

    indices_skel = [x for x in skeletons if folds[x] != -1]
    fundus_per_fold = -1 * np.ones(npoints)
    fundus_per_fold[indices_skel] = folds[indices_skel]
    n_fundi_in_folds = len([x for x in np.unique(fundus_per_fold)
                             if x != -1])
    if n_fundi_in_folds == 1:
        sdum = 'fold fundus'
    else:
        sdum = 'fold fundi'
    print('  ...Extracted {0} {1}; {2} total ({3:.2f} seconds)'.
          format(n_fundi_in_folds, sdum, n_fundi_in_folds, time() - t1))

    #-------------------------------------------------------------------------
    # Return fundi, number of fundi, and file name:
    #-------------------------------------------------------------------------
    if n_fundi_in_folds > 0:
        fundus_per_fold = [int(x) for x in fundus_per_fold]
        if save_file:
            fundus_per_fold_file = os.path.join(os.getcwd(),
                                                'fundus_per_fold.vtk')
            rewrite_scalars(curv_file, fundus_per_fold_file, fundus_per_fold,
                            'fundi', folds)
            if not os.path.exists(fundus_per_fold_file):
                raise(IOError(fundus_per_fold_file + " not found"))
        else:
            fundus_per_fold_file = None

    return fundus_per_fold,  n_fundi_in_folds, fundus_per_fold_file
예제 #51
0
def extract_folds(depth_file,
                  min_vertices=10000,
                  min_fold_size=50,
                  do_fill_holes=False,
                  min_hole_depth=0.001,
                  save_file=False):
    """
    Use depth to extract folds from a triangular surface mesh.

    Steps ::
        1. Compute histogram of depth measures.
        2. Define a depth threshold and find the deepest vertices.
        3. Segment deep vertices as an initial set of folds.
        4. Remove small folds.
        5. Find and fill holes in the folds (optional).
        6. Renumber folds.

    Step 2 ::
        To extract an initial set of deep vertices from the surface mesh,
        we anticipate that there will be a rapidly decreasing distribution
        of low depth values (on the outer surface) with a long tail
        of higher depth values (in the folds), so we smooth the histogram's
        bin values, convolve to compute slopes, and find the depth value
        for the first bin with slope = 0. This is our threshold.

    Step 5 ::
        The folds could have holes in areas shallower than the depth threshold.
        Calling fill_holes() could accidentally include very shallow areas
        (in an annulus-shaped fold, for example), so we include the argument
        exclude_range to check for any values from zero to min_hole_depth;
        holes are not filled if they contains values within this range.

    Parameters
    ----------
    depth_file : string
        surface mesh file in VTK format with faces and depth scalar values
    min_fold_size : integer
        minimum fold size (number of vertices)
    do_fill_holes : Boolean
        fill holes in the folds?
    min_hole_depth : float
        largest non-zero depth value that will stop a hole from being filled
    save_file : Boolean
        save output VTK file?

    Returns
    -------
    folds : list of integers
        fold numbers for all vertices (-1 for non-fold vertices)
    n_folds :  int
        number of folds
    depth_threshold :  float
        threshold defining the minimum depth for vertices to be in a fold
    bins :  list of integers
        histogram bins: each is the number of vertices within a range of depth values
    bin_edges :  list of floats
        histogram bin edge values defining the bin ranges of depth values
    folds_file : string (if save_file)
        name of output VTK file with fold IDs (-1 for non-fold vertices)

    Examples
    --------
    >>> import os
    >>> import numpy as np
    >>> import pylab
    >>> from scipy.ndimage.filters import gaussian_filter1d
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.guts.mesh import find_neighbors_from_file
    >>> from mindboggle.mio.plots import plot_surfaces
    >>> from mindboggle.features.folds import extract_folds
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> depth_file = 'travel_depth.vtk' #os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> neighbor_lists = find_neighbors_from_file(depth_file)
    >>> min_vertices = 10000
    >>> min_fold_size = 50
    >>> do_fill_holes = False #True
    >>> min_hole_depth = 0.001
    >>> save_file = True
    >>> #
    >>> folds, n_folds, thr, bins, bin_edges, folds_file = extract_folds(depth_file,
    >>>     min_vertices, min_fold_size, do_fill_holes, min_hole_depth, save_file)
    >>> #
    >>> # View folds:
    >>> plot_surfaces('folds.vtk')
    >>> # Plot histogram and depth threshold:
    >>> depths, name = read_scalars(depth_file)
    >>> nbins = np.round(len(depths) / 100.0)
    >>> a,b,c = pylab.hist(depths, bins=nbins)
    >>> pylab.plot(thr*np.ones((100,1)), np.linspace(0, max(bins), 100), 'r.')
    >>> pylab.show()
    >>> # Plot smoothed histogram:
    >>> bins_smooth = gaussian_filter1d(bins.tolist(), 5)
    >>> pylab.plot(range(len(bins)), bins, '.', range(len(bins)), bins_smooth,'-')
    >>> pylab.show()

    """
    import os
    import sys
    import numpy as np
    from time import time
    from scipy.ndimage.filters import gaussian_filter1d
    from mindboggle.mio.vtks import rewrite_scalars, read_vtk
    from mindboggle.guts.mesh import find_neighbors
    from mindboggle.guts.morph import fill_holes
    from mindboggle.guts.segment import segment

    print("Extract folds in surface mesh")
    t0 = time()

    #-------------------------------------------------------------------------
    # Load depth values for all vertices
    #-------------------------------------------------------------------------
    points, indices, lines, faces, depths, scalar_names, npoints, \
        input_vtk = read_vtk(depth_file, return_first=True, return_array=True)

    #-------------------------------------------------------------------------
    # Find neighbors for each vertex
    #-------------------------------------------------------------------------
    neighbor_lists = find_neighbors(faces, npoints)

    #-------------------------------------------------------------------------
    # Compute histogram of depth measures
    #-------------------------------------------------------------------------
    if npoints > min_vertices:
        nbins = np.round(npoints / 100.0)
    else:
        sys.err("  Expecting at least {0} vertices to create depth histogram".
                format(min_vertices))
    bins, bin_edges = np.histogram(depths, bins=nbins)

    #-------------------------------------------------------------------------
    # Anticipating that there will be a rapidly decreasing distribution
    # of low depth values (on the outer surface) with a long tail of higher
    # depth values (in the folds), smooth the bin values (Gaussian), convolve
    # to compute slopes, and find the depth for the first bin with slope = 0.
    #-------------------------------------------------------------------------
    bins_smooth = gaussian_filter1d(bins.tolist(), 5)
    window = [-1, 0, 1]
    bin_slopes = np.convolve(bins_smooth, window,
                             mode='same') / (len(window) - 1)
    ibins0 = np.where(bin_slopes == 0)[0]
    if ibins0.shape:
        depth_threshold = bin_edges[ibins0[0]]
    else:
        depth_threshold = np.median(depths)

    #-------------------------------------------------------------------------
    # Find the deepest vertices
    #-------------------------------------------------------------------------
    indices_deep = [i for i, x in enumerate(depths) if x >= depth_threshold]
    if indices_deep:

        #---------------------------------------------------------------------
        # Segment deep vertices as an initial set of folds
        #---------------------------------------------------------------------
        print("  Segment vertices deeper than {0:.2f} as folds".format(
            depth_threshold))
        t1 = time()
        folds = segment(indices_deep, neighbor_lists)
        # Slightly slower alternative -- fill boundaries:
        #regions = -1 * np.ones(len(points))
        #regions[indices_deep] = 1
        #folds = segment_by_filling_borders(regions, neighbor_lists)
        print('  ...Segmented folds ({0:.2f} seconds)'.format(time() - t1))

        #---------------------------------------------------------------------
        # Remove small folds
        #---------------------------------------------------------------------
        if min_fold_size > 1:
            print('  Remove folds smaller than {0}'.format(min_fold_size))
            unique_folds = [x for x in np.unique(folds) if x != -1]
            for nfold in unique_folds:
                indices_fold = [i for i, x in enumerate(folds) if x == nfold]
                if len(indices_fold) < min_fold_size:
                    folds[indices_fold] = -1

        #---------------------------------------------------------------------
        # Find and fill holes in the folds
        # Note: Surfaces surrounded by folds can be mistaken for holes,
        #       so exclude_range includes outer surface values close to zero.
        #---------------------------------------------------------------------
        if do_fill_holes:
            print("  Find and fill holes in the folds")
            folds = fill_holes(folds,
                               neighbor_lists,
                               values=depths,
                               exclude_range=[0, min_hole_depth])

        #---------------------------------------------------------------------
        # Renumber folds so they are sequential
        #---------------------------------------------------------------------
        renumber_folds = -1 * np.ones(len(folds))
        fold_numbers = [int(x) for x in np.unique(folds) if x != -1]
        for i_fold, n_fold in enumerate(fold_numbers):
            fold = [i for i, x in enumerate(folds) if x == n_fold]
            renumber_folds[fold] = i_fold
        folds = renumber_folds
        n_folds = i_fold + 1

        # Print statement
        print('  ...Extracted {0} folds ({1:.2f} seconds)'.format(
            n_folds,
            time() - t0))
    else:
        print('  No deep vertices')

    folds = [int(x) for x in folds]

    #-------------------------------------------------------------------------
    # Return folds, number of folds, file name
    #-------------------------------------------------------------------------
    if save_file:

        folds_file = os.path.join(os.getcwd(), 'folds.vtk')
        rewrite_scalars(depth_file, folds_file, folds, 'folds', folds)

        if not os.path.exists(folds_file):
            raise (IOError(folds_file + " not found"))

    else:
        folds_file = None

    return folds, n_folds, depth_threshold, bins, bin_edges, folds_file
예제 #52
0
def find_depth_threshold(depth_file, min_vertices=10000, verbose=False):
    """
    Find depth threshold to extract folds from a triangular surface mesh.

    Steps ::
        1. Compute histogram of depth measures.
        2. Define a depth threshold and find the deepest vertices.
           To extract an initial set of deep vertices from the surface mesh,
           we anticipate that there will be a rapidly decreasing distribution
           of low depth values (on the outer surface) with a long tail
           of higher depth values (in the folds), so we smooth the histogram's
           bin values, convolve to compute slopes, and find the depth value
           for the first bin with slope = 0. This is our threshold.

    Parameters
    ----------
    depth_file : string
        surface mesh file in VTK format with faces and depth scalar values
    min_vertices : integer
        minimum number of vertices
    verbose : bool
        print statements?

    Returns
    -------
    depth_threshold :  float
        threshold defining the minimum depth for vertices to be in a fold
    bins :  list of integers
        histogram bins: each is the number of vertices within a range of depth values
    bin_edges :  list of floats
        histogram bin edge values defining the bin ranges of depth values

    Examples
    --------
    >>> import numpy as np
    >>> from mindboggle.features.folds import find_depth_threshold
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
    >>> min_vertices = 10000
    >>> verbose = False
    >>> depth_threshold, bins, bin_edges = find_depth_threshold(depth_file,
    ...     min_vertices, verbose)
    >>> np.float(np.array_str(np.array([depth_threshold]),
    ...     precision=5).strip('[').strip(']').strip())
    2.36089

    View threshold histogram plots (skip test):

    >>> def vis():
    ...     import numpy as np
    ...     import pylab
    ...     from scipy.ndimage.filters import gaussian_filter1d
    ...     from mindboggle.mio.vtks import read_scalars
    ...     # Plot histogram and depth threshold:
    ...     depths, name = read_scalars(depth_file)
    ...     nbins = np.round(len(depths) / 100.0)
    ...     a,b,c = pylab.hist(depths, bins=nbins)
    ...     pylab.plot(depth_threshold * np.ones((100,1)),
    ...                np.linspace(0, max(bins), 100), 'r.')
    ...     pylab.title('Histogram of depth values with threshold')
    ...     pylab.xlabel('Depth')
    ...     pylab.ylabel('Number of vertices')
    ...     pylab.show()
    ...     # Plot smoothed histogram:
    ...     bins_smooth = gaussian_filter1d(bins.tolist(), 5)
    ...     pylab.plot(list(range(len(bins))), bins, '.',
    ...                list(range(len(bins))), bins_smooth,'-')
    ...     pylab.title('Smoothed histogram of depth values')
    ...     pylab.show()
    >>> vis() # doctest: +SKIP

    """
    import numpy as np
    from scipy.ndimage.filters import gaussian_filter1d

    from mindboggle.mio.vtks import read_vtk

    # ------------------------------------------------------------------------
    # Load depth values for all vertices:
    # ------------------------------------------------------------------------
    points, indices, lines, faces, depths, scalar_names, npoints, \
        input_vtk = read_vtk(depth_file, return_first=True, return_array=True)

    # ------------------------------------------------------------------------
    # Compute histogram of depth measures:
    # ------------------------------------------------------------------------
    if npoints > min_vertices:
        nbins = np.round(npoints / 100.0)
    else:
        raise IOError("  Expecting at least {0} vertices to create "
                      "depth histogram".format(min_vertices))
    bins, bin_edges = np.histogram(depths, bins=nbins)

    # ------------------------------------------------------------------------
    # Anticipating that there will be a rapidly decreasing distribution
    # of low depth values (on the outer surface) with a long tail of higher
    # depth values (in the folds), smooth the bin values (Gaussian), convolve
    # to compute slopes, and find the depth for the first bin with slope = 0.
    # ------------------------------------------------------------------------
    bins_smooth = gaussian_filter1d(bins.tolist(), 5)
    window = [-1, 0, 1]
    bin_slopes = np.convolve(bins_smooth, window, mode='same') / \
                 (len(window) - 1)
    ibins0 = np.where(bin_slopes == 0)[0]
    if ibins0.shape:
        depth_threshold = bin_edges[ibins0[0]]
    else:
        depth_threshold = np.median(depths)

    # Print statement:
    if verbose:
        print('  Depth threshold: {0}'.format(depth_threshold))

    return depth_threshold, bins, bin_edges
예제 #53
0
def write_shape_stats(labels_or_file=[], sulci=[], fundi=[],
        affine_transform_files=[], inverse_booleans=[], transform_format='itk',
        area_file='', normalize_by_area=False, mean_curvature_file='',
        travel_depth_file='', geodesic_depth_file='',
        freesurfer_thickness_file='', freesurfer_curvature_file='',
        freesurfer_sulc_file='',
        labels_spectra=[], labels_spectra_IDs=[],
        sulci_spectra=[], sulci_spectra_IDs=[],
        labels_zernike=[], labels_zernike_IDs=[],
        sulci_zernike=[], sulci_zernike_IDs=[],
        exclude_labels=[-1]):
    """
    Make tables of shape statistics per label, sulcus, and/or fundus.

    Note ::
        This function is tailored for Mindboggle outputs.

    Parameters
    ----------
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    sulci :  list of integers
        indices to sulci, one per vertex, with -1 indicating no sulcus
    fundi :  list of integers
        indices to fundi, one per vertex, with -1 indicating no fundus
    affine_transform_files : list of strings
        affine transform files to standard space
    inverse_booleans : list of of zeros and ones
        for each transform, 1 to take the inverse, else 0
    transform_format : string
        format for transform file
        Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format
    area_file :  string
        name of VTK file with surface area scalar values
    normalize_by_area : Boolean
        normalize all shape measures by area of label/feature? (UNTESTED)
    mean_curvature_file :  string
        name of VTK file with mean curvature scalar values
    travel_depth_file :  string
        name of VTK file with travel depth scalar values
    geodesic_depth_file :  string
        name of VTK file with geodesic depth scalar values
    freesurfer_thickness_file :  string
        name of VTK file with FreeSurfer thickness scalar values
    freesurfer_curvature_file :  string
        name of VTK file with FreeSurfer curvature (curv) scalar values
    freesurfer_sulc_file :  string
        name of VTK file with FreeSurfer convexity (sulc) scalar values
    labels_spectra : list of lists of floats
        Laplace-Beltrami spectra for each labeled region
    labels_spectra_IDs : list of integers
        unique labels for labels_spectra
    sulci_spectra : list of lists of floats
        Laplace-Beltrami spectra for each sulcus
    sulci_spectra_IDs : list of integers
        unique sulcus IDs for sulci_spectra
    labels_zernike : list of lists of floats
        Zernike moments for each labeled region
    labels_zernike_IDs : list of integers
        unique labels for labels_zernike
    sulci_zernike : list of lists of floats
        Zernike moments for each sulcus
    sulci_zernike_IDs : list of integers
        unique sulcus IDs for sulci_zernike
    exclude_labels : list of lists of integers
        indices to be excluded (in addition to -1)

    Returns
    -------
    label_table :  string
        output table filename for label shapes
    sulcus_table :  string
        output table filename for sulcus shapes
    fundus_table :  string
        output table filename for fundus shapes

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.tables import write_shape_stats
    >>> path = '/homedir/mindboggled/Twins-2-1'
    >>> labels_or_file = os.path.join(path, 'labels', 'left_cortical_surface', 'freesurfer_cortex_labels.vtk')
    >>> sulci_file = os.path.join(path, 'features', 'left_cortical_surface', 'sulci.vtk')
    >>> fundi_file = os.path.join(path, 'features', 'left_cortical_surface', 'fundus_per_sulcus.vtk')
    >>> sulci, name = read_scalars(sulci_file)
    >>> fundi, name = read_scalars(fundi_file)
    >>> affine_transform_files = [] #os.path.join(path, 'arno', 'mri', 't1weighted_brain.MNI152Affine.txt')
    >>> inverse_booleans = []
    >>> #transform_format = 'mat'
    >>> transform_format = 'itk'
    >>> area_file = os.path.join(path, 'shapes', 'left_cortical_surface', 'area.vtk')
    >>> normalize_by_area = False
    >>> mean_curvature_file = os.path.join(path, 'shapes', 'left_cortical_surface', 'mean_curvature.vtk')
    >>> travel_depth_file = os.path.join(path, 'shapes', 'left_cortical_surface', 'travel_depth.vtk')
    >>> geodesic_depth_file = os.path.join(path, 'shapes', 'left_cortical_surface', 'geodesic_depth.vtk')
    >>> freesurfer_thickness_file = ''
    >>> freesurfer_curvature_file = ''
    >>> freesurfer_sulc_file = ''
    >>> #
    >>> labels, name = read_scalars(labels_or_file)
    >>> labels_spectra = []
    >>> labels_spectra_IDs = []
    >>> sulci_spectra = []
    >>> sulci_spectra_IDs = []
    >>> labels_zernike = []
    >>> labels_zernike_IDs = []
    >>> sulci_zernike = []
    >>> sulci_zernike_IDs = []
    >>> exclude_labels = [-1]
    >>> #
    >>> write_shape_stats(labels_or_file, sulci, fundi,
    >>>     affine_transform_files, inverse_booleans, transform_format,
    >>>     area_file, normalize_by_area,
    >>>     mean_curvature_file, travel_depth_file, geodesic_depth_file,
    >>>     freesurfer_thickness_file, freesurfer_curvature_file,
    >>>     freesurfer_sulc_file,
    >>>     labels_spectra, labels_spectra_IDs,
    >>>     sulci_spectra, sulci_spectra_IDs,
    >>>     labels_zernike, labels_zernike_IDs,
    >>>     sulci_zernike, sulci_zernike_IDs,
    >>>     exclude_labels)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.guts.compute import means_per_label, stats_per_label, \
        sum_per_label
    from mindboggle.mio.vtks import read_scalars, read_vtk, \
        apply_affine_transforms
    from mindboggle.mio.labels import DKTprotocol

    dkt = DKTprotocol()

    # Make sure inputs are lists:
    if isinstance(labels_or_file, np.ndarray):
        labels = [int(x) for x in labels_or_file]
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    elif isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file)
    if isinstance(sulci, np.ndarray):
        sulci = [int(x) for x in sulci]
    if isinstance(fundi, np.ndarray):
        fundi = [int(x) for x in fundi]

    if not labels and not sulci and not fundi:
        import sys
        sys.exit('No feature data to tabulate in write_shape_stats().')

    spectrum_start = 1  # Store all columns of spectral components (0),
                        # or start from higher frequency components (>=1)

    #-------------------------------------------------------------------------
    # Feature lists, shape names, and shape files:
    #-------------------------------------------------------------------------
    # Feature lists:
    feature_lists = [labels, sulci, fundi]
    feature_names = ['label', 'sulcus', 'fundus']
    spectra_lists = [labels_spectra, sulci_spectra]
    spectra_ID_lists = [labels_spectra_IDs, sulci_spectra_IDs]
    zernike_lists = [labels_zernike, sulci_zernike]
    zernike_ID_lists = [labels_zernike_IDs, sulci_zernike_IDs]
    table_names = ['label_shapes.csv', 'sulcus_shapes.csv',
                   'fundus_shapes.csv']

    # Shape names corresponding to shape files below:
    shape_names = ['area', 'travel depth', 'geodesic depth',
                   'mean curvature', 'freesurfer curvature',
                   'freesurfer thickness', 'freesurfer convexity (sulc)']

    # Load shape files as a list of numpy arrays of per-vertex shape values:
    shape_files = [area_file, travel_depth_file, geodesic_depth_file,
                   mean_curvature_file, freesurfer_curvature_file,
                   freesurfer_thickness_file, freesurfer_sulc_file]
    shape_arrays = []
    first_pass = True
    area_array = []

    for ishape, shape_file in enumerate(shape_files):
        if os.path.exists(shape_file):
            if first_pass:
                points, indices, lines, faces, scalars_array, scalar_names, \
                    npoints, input_vtk = read_vtk(shape_file, True, True)
                points = np.array(points)
                first_pass = False
                if affine_transform_files and transform_format:
                    affine_points, \
                        foo1 = apply_affine_transforms(affine_transform_files,
                                    inverse_booleans, transform_format,
                                    points, vtk_file_stem='')
            else:
                scalars_array, name = read_scalars(shape_file, True, True)
            if scalars_array.size:
                shape_arrays.append(scalars_array)

                # Store area array:
                if ishape == 0:
                    area_array = scalars_array.copy()

    if normalize_by_area:
        use_area = area_array
    else:
        use_area = []

    # Initialize table file names:
    label_table = ''
    sulcus_table = ''
    fundus_table = ''

    # Loop through features / tables:
    for itable, feature_list in enumerate(feature_lists):
        column_names = []

        #-----------------------------------------------------------------
        # Label names:
        #-----------------------------------------------------------------
        label_title = 'name'
        if itable == 0:
            label_numbers = dkt.cerebrum_cortex_DKT31_numbers
            label_names = dkt.cerebrum_cortex_DKT31_names
        elif itable in [1, 2]:
            label_numbers = dkt.sulcus_numbers
            label_names = dkt.sulcus_names
        else:
            label_numbers = []
            label_names = []
        include_labels = label_numbers
        nlabels = len(label_numbers)

        #---------------------------------------------------------------------
        # For each feature, construct a table of average shape values:
        #---------------------------------------------------------------------
        if feature_list:
            feature_name = feature_names[itable]
            columns = []

            #-----------------------------------------------------------------
            # Loop through shape measures:
            #-----------------------------------------------------------------
            column_names.extend(column_names[:])
            for ishape, shape_array in enumerate(shape_arrays):
                shape = shape_names[ishape]
                print('  Compute statistics on {0} {1}...'.
                      format(feature_name, shape))
                #-------------------------------------------------------------
                # Append feature areas to columns:
                #-------------------------------------------------------------
                if ishape == 0 and np.size(area_array):
                    sums, label_list = sum_per_label(shape_array,
                        feature_list, include_labels, exclude_labels)
                    column_names.append(shape)
                    columns.append(sums)
                #-------------------------------------------------------------
                # Append feature shape statistics to columns:
                #-------------------------------------------------------------
                else:
                    medians, mads, means, sdevs, skews, kurts, \
                    lower_quarts, upper_quarts, \
                    label_list = stats_per_label(shape_array, feature_list,
                                        include_labels, exclude_labels,
                                        area_array, precision=1)

                    column_names.append(shape + ': median')
                    column_names.append(shape + ': MAD')
                    column_names.append(shape + ': mean')
                    column_names.append(shape + ': SD')
                    column_names.append(shape + ': skew')
                    column_names.append(shape + ': kurtosis')
                    column_names.append(shape + ': 25%')
                    column_names.append(shape + ': 75%')
                    columns.append(medians)
                    columns.append(mads)
                    columns.append(means)
                    columns.append(sdevs)
                    columns.append(skews)
                    columns.append(kurts)
                    columns.append(lower_quarts)
                    columns.append(upper_quarts)

            #-----------------------------------------------------------------
            # Mean positions in the original space:
            #-----------------------------------------------------------------
            # Compute mean position per feature:
            positions, sdevs, label_list, foo = means_per_label(points,
                feature_list, include_labels, exclude_labels, use_area)

            # Append mean x,y,z position per feature to columns:
            xyz_positions = np.asarray(positions)
            for ixyz, xyz in enumerate(['x','y','z']):
                column_names.append('mean position: {0}'.format(xyz))
                columns.append(xyz_positions[:, ixyz].tolist())

            #-----------------------------------------------------------------
            # Mean positions in standard space:
            #-----------------------------------------------------------------
            if affine_transform_files and transform_format:
                # Compute standard space mean position per feature:
                standard_positions, sdevs, label_list, \
                foo = means_per_label(affine_points,
                    feature_list, include_labels, exclude_labels, use_area)

                # Append standard space x,y,z position per feature to columns:
                xyz_std_positions = np.asarray(standard_positions)
                for ixyz, xyz in enumerate(['x','y','z']):
                    column_names.append('mean position in standard space:'
                                        ' {0}'.format(xyz))
                    columns.append(xyz_std_positions[:, ixyz].tolist())

            #-----------------------------------------------------------------
            # Laplace-Beltrami spectra:
            #-----------------------------------------------------------------
            if itable in [0, 1]:
                spectra = spectra_lists[itable]
                if spectra:
                    spectra_IDs = spectra_ID_lists[itable]

                    # Construct a matrix of spectra:
                    len_spectrum = len(spectra[0])
                    spectrum_matrix = np.zeros((nlabels, len_spectrum))
                    for ilabel, label in enumerate(include_labels):
                        if label in spectra_IDs:
                            spectrum = spectra[spectra_IDs.index(label)]
                            spectrum_matrix[ilabel, 0:len_spectrum] = spectrum

                    # Append spectral shape name and values to columns:
                    for ispec in range(spectrum_start, len_spectrum):
                        columns.append(spectrum_matrix[:, ispec].tolist())
                        column_names.append('Laplace-Beltrami spectrum:'
                                            ' component {0}'.format(ispec+1))

            #-----------------------------------------------------------------
            # Zernike moments:
            #-----------------------------------------------------------------
            if itable in [0, 1]:
                zernike = zernike_lists[itable]
                if zernike:
                    zernike_IDs = zernike_ID_lists[itable]

                    # Construct a matrix of Zernike moments:
                    len_moments = len(zernike[0])
                    moments_matrix = np.zeros((nlabels, len_moments))
                    for ilabel, label in enumerate(include_labels):
                        if label in zernike_IDs:
                            moments = zernike[zernike_IDs.index(label)]
                            moments_matrix[ilabel, 0:len_moments] = moments

                    # Append Zernike shape name and values to columns:
                    for imoment in range(0, len_moments):
                        columns.append(moments_matrix[:, imoment].tolist())
                        column_names.append('Zernike moments: component {0}'.
                                            format(imoment+1))

            #-----------------------------------------------------------------
            # Write labels/IDs and values to table:
            #-----------------------------------------------------------------
            # Write labels/IDs to table:
            output_table = os.path.join(os.getcwd(), table_names[itable])

            if columns:
                df1 = pd.DataFrame({'ID': label_numbers})
                df2 = pd.DataFrame(np.transpose(columns),
                                   columns = column_names)
                df = pd.concat([df1, df2], axis=1)
                if label_names:
                    df0 = pd.DataFrame({'name': label_names})
                    df = pd.concat([df0, df], axis=1)
                df.to_csv(output_table, index=False)

            if not os.path.exists(output_table):
                raise(IOError(output_table + " not found"))

            #-----------------------------------------------------------------
            # Return correct table file name:
            #-----------------------------------------------------------------
            if itable == 0:
                label_table = output_table
            elif itable == 1:
                sulcus_table = output_table
            elif itable == 2:
                fundus_table = output_table

    return label_table, sulcus_table, fundus_table
예제 #54
0
def extract_folds(depth_file,
                  depth_threshold=2,
                  min_fold_size=50,
                  save_file=False,
                  output_file='',
                  background_value=-1,
                  verbose=False):
    """
    Use depth threshold to extract folds from a triangular surface mesh.

    A fold is a group of connected, deep vertices. To extract folds,
    a depth threshold is used to segment deep vertices of the surface mesh.
    We have observed in the histograms of travel depth measures of cortical
    surfaces that there is a rapidly decreasing distribution of low depth
    values (corresponding to the outer surface, or gyral crowns) with a
    long tail of higher depth values (corresponding to the folds).

    The find_depth_threshold function therefore computes a histogram of
    travel depth measures, smooths the histogram's bin values, convolves
    to compute slopes, and finds the depth value for the first bin with
    zero slope. The extract_folds function uses this depth value, segments
    deep vertices, and removes extremely small folds (empirically set at 50
    vertices or fewer out of a total mesh size of over 100,000 vertices).

    Steps ::
        1. Segment deep vertices as an initial set of folds.
        2. Remove small folds.
        3. Renumber folds.

    Note ::
        Removed option: Find and fill holes in the folds:
        Folds could have holes in areas shallower than the depth threshold.
        Calling fill_holes() could accidentally include very shallow areas
        (in an annulus-shaped fold, for example).
        However, we could include the argument exclude_range to check for
        any values from zero to min_hole_depth; holes would not be filled
        if they were to contain values within this range.

    Parameters
    ----------
    depth_file : string
        surface mesh file in VTK format with faces and depth scalar values
    depth_threshold :  float
        threshold defining the minimum depth for vertices to be in a fold
    min_fold_size : integer
        minimum fold size (number of vertices)
    save_file : bool
        save output VTK file?
    output_file : string
        name of output file in VTK format
    background_value : integer or float
        background value
    verbose : bool
        print statements?

    Returns
    -------
    folds : list of integers
        fold numbers for all vertices (-1 for non-fold vertices)
    n_folds :  int
        number of folds
    folds_file : string (if save_file)
        name of output VTK file with fold IDs (-1 for non-fold vertices)

    Examples
    --------
    >>> from mindboggle.features.folds import extract_folds
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
    >>> depth_threshold = 2.36089
    >>> min_fold_size = 50
    >>> save_file = True
    >>> output_file = 'extract_folds.vtk'
    >>> background_value = -1
    >>> verbose = False
    >>> folds, n_folds, folds_file = extract_folds(depth_file,
    ...     depth_threshold, min_fold_size, save_file, output_file,
    ...     background_value, verbose)
    >>> n_folds
    33
    >>> lens = [len([x for x in folds if x == y]) for y in range(n_folds)]
    >>> lens[0:10]
    [726, 67241, 2750, 5799, 1151, 6360, 1001, 505, 228, 198]

    View folds (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
    >>> plot_surfaces('extract_folds.vtk') # doctest: +SKIP

    View folds without background (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
    >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
    >>> rewrite_scalars(depth_file, 'extract_folds_no_background.vtk', folds,
    ...     'just_folds', folds, -1) # doctest: +SKIP
    >>> plot_surfaces('extract_folds_no_background.vtk') # doctest: +SKIP

    """
    import os
    import numpy as np
    from time import time

    from mindboggle.mio.vtks import rewrite_scalars, read_vtk
    from mindboggle.guts.mesh import find_neighbors
    from mindboggle.guts.segment import segment_regions

    if verbose:
        print("Extract folds in surface mesh")
        t0 = time()

    # ------------------------------------------------------------------------
    # Load depth values for all vertices
    # ------------------------------------------------------------------------
    points, indices, lines, faces, depths, scalar_names, npoints, \
        input_vtk = read_vtk(depth_file, return_first=True, return_array=True)

    # ------------------------------------------------------------------------
    # Find the deepest vertices
    # ------------------------------------------------------------------------
    indices_deep = [i for i, x in enumerate(depths) if x >= depth_threshold]
    if indices_deep:

        # --------------------------------------------------------------------
        # Find neighbors for each vertex
        # --------------------------------------------------------------------
        neighbor_lists = find_neighbors(faces, npoints)

        # --------------------------------------------------------------------
        # Segment deep vertices as an initial set of folds
        # --------------------------------------------------------------------
        if verbose:
            print("  Segment vertices deeper than {0:.2f} as folds".format(
                depth_threshold))
            t1 = time()
        folds = segment_regions(indices_deep, neighbor_lists, 1, [], False,
                                False, [], [], [], '', background_value, False)
        if verbose:
            print('  ...Segmented folds ({0:.2f} seconds)'.format(time() - t1))

        # --------------------------------------------------------------------
        # Remove small folds
        # --------------------------------------------------------------------
        if min_fold_size > 1:
            if verbose:
                print('  Remove folds smaller than {0}'.format(min_fold_size))
            unique_folds = [
                x for x in np.unique(folds) if x != background_value
            ]
            for nfold in unique_folds:
                indices_fold = [i for i, x in enumerate(folds) if x == nfold]
                if len(indices_fold) < min_fold_size:
                    folds[indices_fold] = background_value

        # --------------------------------------------------------------------
        # Find and fill holes in the folds
        # Note: Surfaces surrounded by folds can be mistaken for holes,
        #       so exclude_range includes outer surface values close to zero.
        # --------------------------------------------------------------------
        # folds = fill_holes(folds, neighbor_lists, values=depths,
        #                    exclude_range=[0, min_hole_depth])

        # --------------------------------------------------------------------
        # Renumber folds so they are sequential.
        # NOTE: All vertices are included (-1 for non-fold vertices).
        # --------------------------------------------------------------------
        renumber_folds = background_value * np.ones(npoints)
        fold_numbers = [x for x in np.unique(folds) if x != background_value]
        for i_fold, n_fold in enumerate(fold_numbers):
            fold_indices = [i for i, x in enumerate(folds) if x == n_fold]
            renumber_folds[fold_indices] = i_fold
        folds = renumber_folds
        folds = [int(x) for x in folds]
        n_folds = i_fold + 1

        # Print statement
        if verbose:
            print('  ...Extracted {0} folds ({1:.2f} seconds)'.format(
                n_folds,
                time() - t0))
    else:
        if verbose:
            print('  No deep vertices')

    # ------------------------------------------------------------------------
    # Return folds, number of folds, file name
    # ------------------------------------------------------------------------
    if save_file:

        if output_file:
            folds_file = output_file
        else:
            folds_file = os.path.join(os.getcwd(), 'folds.vtk')
        rewrite_scalars(depth_file, folds_file, folds, 'folds', [],
                        background_value)

        if not os.path.exists(folds_file):
            raise IOError(folds_file + " not found")

    else:
        folds_file = None

    return folds, n_folds, folds_file
예제 #55
0
def write_face_vertex_averages(input_file, output_table='', area_file=''):
    """
    Make table of average vertex values per face
    (divided by face area if area_file provided).

    Parameters
    ----------
    input_file : string
        name of VTK file with scalars to average
    area_file :  string
        name of VTK file with surface area scalar values
    output_table :  string
        output table filename

    Returns
    -------
    output_table :  string
        output table filename

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.tables import write_face_vertex_averages
    >>> path = '/homedir/mindboggled'
    >>> input_file = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'freesurfer_thickness.vtk')
    >>> area_file = os.path.join(path, 'Twins-2-1', 'shapes', 'left_cortical_surface', 'area.vtk')
    >>> output_table = ''
    >>> #
    >>> write_face_vertex_averages(input_file, output_table, area_file)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.mio.vtks import read_vtk, read_scalars

    points, indices, lines, faces, scalars, scalar_names, \
        npoints, input_vtk = read_vtk(input_file, True, True)
    if area_file:
        area_scalars, name = read_scalars(area_file, True, True)

    #---------------------------------------------------------------------
    # For each face, average vertex values:
    #---------------------------------------------------------------------
    columns = []
    for face in faces:
        values = []
        for index in face:
            if area_file:
                values.append(scalars[index] / area_scalars[index])
            else:
                values.append(scalars[index])
        columns.append(np.mean(values))

    #-----------------------------------------------------------------
    # Write to table:
    #-----------------------------------------------------------------
    if not output_table:
        output_table = os.path.join(os.getcwd(), 'average_face_values.csv')

    df = pd.DataFrame({'': columns})
    df.to_csv(output_table, index=False)

    if not os.path.exists(output_table):
        raise(IOError(output_table + " not found"))

    return output_table
예제 #56
0
def spectrum_per_label(vtk_file,
                       spectrum_size=10,
                       exclude_labels=[-1],
                       normalization='area',
                       area_file='',
                       largest_segment=True):
    """
    Compute Laplace-Beltrami spectrum per labeled region in a file.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file containing index scalars (labels)
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    area_file :  string (optional)
        name of VTK file with surface area scalar values
    largest_segment :  Boolean
        compute spectrum only for largest segment with a given label?

    Returns
    -------
    spectrum_lists : list of lists
        first eigenvalues for each label's Laplace-Beltrami spectrum
    label_list : list of integers
        list of unique labels for which spectra are obtained

    Examples
    --------
    >>> # Uncomment "if label==22:" below to run example:
    >>> # Spectrum for Twins-2-1 left postcentral (22) pial surface:
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk')
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> spectrum_size = 6
    >>> exclude_labels = [0]  #[-1]
    >>> largest_segment = True
    >>> spectrum_per_label(vtk_file, spectrum_size, exclude_labels, None,
    >>>                    area_file, largest_segment)
    ([[6.3469513010430304e-18,
       0.0005178862383467463,
       0.0017434911095630772,
       0.003667561767487686,
       0.005429017880363784,
       0.006309346984678924]],
     [22])

    """
    from mindboggle.mio.vtks import read_vtk, read_scalars
    from mindboggle.guts.mesh import remove_faces, reindex_faces_points
    from mindboggle.shapes.laplace_beltrami import fem_laplacian,\
        spectrum_of_largest

    # Read VTK surface mesh file:
    faces, u1, u2, points, u4, labels, u5, u6 = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    # Loop through labeled regions:
    ulabels = []
    [
        ulabels.append(int(x)) for x in labels if x not in ulabels
        if x not in exclude_labels
    ]
    label_list = []
    spectrum_lists = []
    for label in ulabels:
        #if label == 22:
        #  print("DEBUG: COMPUTE FOR ONLY ONE LABEL")

        # Determine the indices per label:
        Ilabel = [i for i, x in enumerate(labels) if x == label]
        print('{0} vertices for label {1}'.format(len(Ilabel), label))

        # Remove background faces:
        pick_faces = remove_faces(faces, Ilabel)
        pick_faces, pick_points, o1 = reindex_faces_points(pick_faces, points)

        # Compute Laplace-Beltrami spectrum for the label:
        if largest_segment:
            exclude_labels_inner = [-1]
            spectrum = spectrum_of_largest(pick_points, pick_faces,
                                           spectrum_size, exclude_labels_inner,
                                           normalization, areas)
        else:
            spectrum = fem_laplacian(pick_points, pick_faces, spectrum_size,
                                     normalization)

        # Append to a list of lists of spectra:
        spectrum_lists.append(spectrum)
        label_list.append(label)

    return spectrum_lists, label_list
예제 #57
0
def relabel_surface(vtk_file, hemi='', old_labels=[], new_labels=[],
                    erase_remaining=True, erase_labels=[], erase_value=-1,
                    output_file=''):
    """
    Relabel surface in a VTK file.

    Parameters
    ----------
    vtk_file : string
         input labeled VTK file
    hemi : string
        hemisphere ('lh' or 'rh' or '')
        if set, add 1000 to left and 2000 to right hemisphere labels;
    old_labels : list of integers
        old labels (empty list if labels drawn from vtk scalars);
        may be used in conjunction with hemi
    new_labels : list of integers
        new labels (empty list if labels drawn from vtk scalars);
        may be used in conjunction with hemi
    erase_remaining : bool
        set all values not in old_labels to erase_value?
    erase_labels : list of integers
        values to erase (set to erase_value)
    erase_value : integer
        set vertices with labels in erase_labels to this value
    output_file : string
        new vtk file name

    Returns
    -------
    output_file : string
        new vtk file name

    Examples
    --------
    >>> import numpy as np
    >>> from mindboggle.guts.relabel import relabel_surface
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> vtk_file = fetch_data(urls['left_freesurfer_labels'])
    >>> hemi = 'lh'
    >>> old_labels = [1003,1009,1030]
    >>> new_labels = [0,500,1000]
    >>> erase_remaining = True
    >>> erase_labels = [0]
    >>> erase_value = -1
    >>> output_file = ''
    >>> output_file = relabel_surface(vtk_file, hemi, old_labels, new_labels,
    ...     erase_remaining, erase_labels, erase_value, output_file)
    >>> labels, name = read_scalars(output_file, True, True)
    >>> np.unique(labels)
    array([  -1, 1000, 1500, 2000])

    View relabeled surface file (skip test):

    >>> from mindboggle.mio.plots import plot_surfaces
    >>> plot_surfaces(output_file) # doctest: +SKIP

    """
    import os
    import numpy as np
    from mindboggle.mio.vtks import read_vtk, write_vtk

    # Load labeled vtk surfaces:
    points, indices, lines, faces, scalars, scalar_names, npoints, \
        input_vtk = read_vtk(vtk_file, return_first=True, return_array=True)
    new_scalars = scalars[:]

    # Raise an error if inputs set incorrectly:
    if (new_labels and not old_labels) or \
       (hemi and hemi not in ['lh','rh']) or \
       (new_labels and len(old_labels) != len(new_labels)) or \
       (erase_remaining and not old_labels):
        raise IOError("Please check inputs for relabel_surface().")

    # Loop through unique labels in scalars:
    ulabels = np.unique(scalars)
    for label in ulabels:
        I = np.where(scalars == label)[0]

        # If label in erase_labels list, replace with erase_value:
        if label in erase_labels:
            new_scalars[I] = erase_value

        # If label in old_labels list, replace with corresponding new label,
        # and if hemi set, add 1000 or 2000 to the new label:
        elif label in old_labels and (len(old_labels) == len(new_labels)):
            new_label = new_labels[old_labels.index(label)]
            if hemi == 'lh':
                new_scalars[I] = 1000 + new_label
            elif hemi == 'rh':
                new_scalars[I] = 2000 + new_label
            else:
                new_scalars[I] = new_label

        # If labels not set then optionally add hemi value:
        elif hemi and not new_labels:
            if hemi == 'lh':
                new_scalars[I] = 1000 + label
            elif hemi == 'rh':
                new_scalars[I] = 2000 + label

        # If label unaccounted for and erase_remaining, set to erase_value:
        elif erase_remaining:
            new_scalars[I] = erase_value

    # Ensure that the new scalars are integer values:
    new_scalars = [int(x) for x in new_scalars]

    # Write output VTK file:
    if not output_file:
        output_file = os.path.join(os.getcwd(),
                                   'relabeled_' + os.path.basename(vtk_file))
    write_vtk(output_file, points, indices, lines, faces,
              [new_scalars], ['Labels'], scalar_type='int')
    if not os.path.exists(output_file):
        raise IOError("relabel_surface() did not create " + output_file + ".")

    return output_file
예제 #58
0
def realign_boundaries_to_fundus_lines(
    surf_file, init_label_file, fundus_lines_file, thickness_file,
    out_label_file=None):
    """
    Fix label boundaries to fundus lines.

    Parameters
    ----------
    surf_file : file containing the surface geometry in vtk format
    init_label_file : file containing scalars that represent the
                      initial guess at labels
    fundus_lines_file : file containing scalars representing fundus lines.
    thickness_file: file containing cortical thickness scalar data
    (for masking out the medial wall only)
    out_label_file : if specified, the realigned labels will be writen to
                     this file

    Returns
    -------
    numpy array representing the realigned label for each surface vertex.
    """

    import numpy as np
    from mindboggle.guts.segment import extract_borders
    import mindboggle.guts.graph as go
    from mindboggle.mio.vtks import read_vtk, read_scalars, write_vtk
    from mindboggle.guts.mesh import find_neighbors
    import propagate_fundus_lines

    ## read files
    faces, _, indices, points, num_points, _, _, _ = read_vtk(
        surf_file, return_first=True, return_array=True)
    indices = range(num_points)

    init_labels, _ = read_scalars(init_label_file,
                                  return_first=True, return_array=True)

    fundus_lines, _ = read_scalars(fundus_lines_file,
                                   return_first=True, return_array=True)

    thickness, _ = read_scalars(thickness_file,
                             return_first=True, return_array=True)

    # remove labels from vertices with zero thickness (get around
    # DKT40 annotations having the label '3' for all the Corpus
    # Callosum vertices).
    cc_inds = [x for x in indices if thickness[x] < 0.001]
    init_labels[cc_inds] = 0

    ## setup seeds from initial label boundaries
    neighbor_lists = find_neighbors(faces, num_points)

    # extract all vertices that are on a boundary between labels
    boundary_indices, label_pairs, _ = extract_borders(
        indices, init_labels, neighbor_lists,
        return_label_pairs=True)

    # split boundary vertices into segments with common boundary pairs.
    boundary_segments = {}
    for boundary_index, label_pair in zip(boundary_indices, label_pairs):
        key = ((label_pair[0], label_pair[1]) if label_pair[0] < label_pair[1]
               else (label_pair[1], label_pair[0]))
        if key not in boundary_segments:
            boundary_segments[key] = []

        boundary_segments[key].append(boundary_index)

    boundary_matrix, boundary_matrix_keys = _build_boundary_matrix(
        boundary_segments, num_points)

    # build the affinity matrix
    affinity_matrix = go.weight_graph(
       np.array(points), indices, np.array(faces), sigma=10, add_to_graph=False)

    ## propagate boundaries to fundus line vertices
    learned_matrix = _propagate_labels(
       affinity_matrix, boundary_matrix, boundary_indices, 100, 1)

    # assign labels to fundus line vertices based on highest probability
    new_boundaries = -1 * np.ones(init_labels.shape)
    fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5]

    # tile the surface into connected components delimited by fundus lines
    closed_fundus_lines, _, _ = propagate_fundus_lines.propagate_fundus_lines(
        points, faces, fundus_line_indices, thickness)

    closed_fundus_line_indices = np.where(closed_fundus_lines > 0)[0]

    # split surface into connected components
    connected_component_faces = _remove_boundary_faces(
        points, faces, closed_fundus_line_indices)

    # label components based on most probable label assignment
    new_labels = _label_components(
        connected_component_faces, num_points, boundary_indices, learned_matrix,
        boundary_matrix_keys)

    # propagate new labels to fill holes
    label_matrix, label_map = _build_label_matrix(new_labels)
    new_learned_matrix = _propagate_labels(
        affinity_matrix, label_matrix,
        [i for i in range(num_points) if new_labels[i] >= 0], 100, 1)

    # assign most probable labels
    for idx in [i for i in range(num_points) if new_labels[i] == -1]:
        max_idx = np.argmax(new_learned_matrix[idx])
        new_labels[idx] = label_map[max_idx]

    # save
    if out_label_file is not None:
        write_vtk(out_label_file, points, faces=faces,
                  scalars=[int(x) for x in new_labels], scalar_type='int')

    return new_labels