コード例 #1
0
def propagate_fundus_lines(surf_file, fundus_lines_file, thickness_file):
    """Propagate fundus lines to tile the surface.

    Parameters
    ----------
    surf_file: file containing the surface geometry in vtk format
    fundus_lines_file: file containing scalars representing fundus lines
    thickness_file: file containing cortical thickness scalar data
    (for masking out the medial wall only)

    Returns
    -------
    scalars indicating whether each vertex is part of the closed
    fundus lines or not
    """
    from mindboggle.utils.io_vtk import read_vtk, read_scalars

    faces, _, _, points, num_points, fundus_lines, _, _ = read_vtk(
        surf_file, return_first=True, return_array=True)

    fundus_lines, _ = read_scalars(fundus_lines_file)
    fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5]

    thickness, _ = read_scalars(thickness_file,
                             return_first=True, return_array=True)

    return propagate_fundus_lines(
        points, faces, fundus_line_indices, thickness)
コード例 #2
0
ファイル: plots.py プロジェクト: jsalva/mindboggle
def histogram_of_vtk_scalars(vtk_file, nbins=100):
    """
    Plot histogram of VTK surface mesh scalar values.

    Parameters
    ----------
    vtk_file : string
        name of VTK file with scalar values to plot
    nbins : integer
        number of histogram bins

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.plots import histogram_of_vtk_scalars
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> histogram_of_vtk_scalars(vtk_file, nbins=500)

    """
    import matplotlib.pyplot as plt
    from mindboggle.utils.io_vtk import read_scalars

    # Load values:
    values, name = read_scalars(vtk_file)

    # Histogram:
    fig = plt.figure()
    ax = fig.add_subplot(1,1,1)
    ax.hist(values, nbins, normed=False, facecolor='gray', alpha=0.5)
    plt.show()
コード例 #3
0
def plot_scalar_histogram(vtk_file, nbins=100):
    """
    Plot histogram of VTK surface mesh scalar values.

    Inputs
    ------
    vtk_file : string
        name of VTK file
    nbins : integer
        number of histogram bins

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.plots import plot_scalar_histogram
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> plot_scalar_histogram(vtk_file, nbins=500)

    """
    import matplotlib.pyplot as plt
    from mindboggle.utils.io_vtk import read_scalars

    # Load values:
    values, name = read_scalars(vtk_file)

    # Histogram:
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.hist(values, nbins, normed=False, facecolor='gray', alpha=0.5)
    plt.show()
コード例 #4
0
ファイル: plots.py プロジェクト: TankThinkLabs/mindboggle
def plot_vtk(vtk_file, mask_file='', masked_output=''):
    """
    Use mayavi2 to visualize VTK surface mesh data.

    Inputs
    ------
    vtk_file : string
        name of VTK surface mesh file

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.plots import plot_vtk
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> mask_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> masked_output = ''
    >>> plot_vtk(vtk_file, mask_file, masked_output)

    """
    import os
#    import subprocess

    # Filter mesh with the non -1 values from a second (same-size) mesh:
    if mask_file:

        from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars

        scalars, name = read_scalars(vtk_file)
        mask, name = read_scalars(mask_file)
        if not masked_output:
            masked_output = 'temp.vtk'
        rewrite_scalars(vtk_file, masked_output, scalars, 'masked', mask)

        cmd = ["mayavi2", "-d", masked_output, "-m", "Surface"]

    else:

        cmd = ["mayavi2", "-d", vtk_file, "-m", "Surface"]

# Note: subprocess won't allow me to put the command in the background:
#    p = subprocess.Popen(cmd)
#    p.communicate()
    cmd = ' '.join(cmd) + ' &'
    print(cmd)
    os.system(cmd)
コード例 #5
0
def plot_vtk(vtk_file, mask_file='', masked_output=''):
    """
    Use mayavi2 to visualize VTK surface mesh data.

    Inputs
    ------
    vtk_file : string
        name of VTK surface mesh file

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.plots import plot_vtk
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> mask_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> masked_output = ''
    >>> plot_vtk(vtk_file, mask_file, masked_output)

    """
    from mindboggle.utils.utils import execute

    # Filter mesh with the non -1 values from a second (same-size) mesh:
    if mask_file:

        from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars

        scalars, name = read_scalars(vtk_file)
        mask, name = read_scalars(mask_file)
        if not masked_output:
            masked_output = 'temp.vtk'
        rewrite_scalars(vtk_file, masked_output, scalars, 'masked', mask)

        cmd = ["mayavi2", "-d", masked_output, "-m", "Surface"]

    else:

        cmd = ["mayavi2", "-d", vtk_file, "-m", "Surface"]
    cmd.extend('&')
    execute(cmd, 'os')
コード例 #6
0
def extract_sulci(labels_file,
                  folds_or_file,
                  hemi,
                  min_boundary=1,
                  sulcus_names=[]):
    """
    Identify sulci from folds in a brain surface according to a labeling
    protocol that includes a list of label pairs defining each sulcus.

    A fold is a group of connected, deep vertices.

    Steps for each fold ::

        1. Remove fold if it has fewer than two labels.
        2. Remove fold if its labels do not contain a sulcus label pair.
        3. Find vertices with labels that are in only one of the fold's
           label boundary pairs. Assign the vertices the sulcus with the label
           pair if they are connected to the label boundary for that pair.
        4. If there are remaining vertices, segment into sets of vertices
           connected to label boundaries, and assign a unique ID to each set.

    Parameters
    ----------
    labels_file : string
        file name for surface mesh VTK containing labels for all vertices
    folds_or_file : list or string
        fold number for each vertex / name of VTK file containing fold scalars
    hemi : string
        hemisphere abbreviation in {'lh', 'rh'} for sulcus labels
    min_boundary : integer
        minimum number of vertices for a sulcus label boundary segment
    sulcus_names : list of strings
        names of sulci

    Returns
    -------
    sulci : list of integers
        sulcus numbers for all vertices (-1 for non-sulcus vertices)
    n_sulci : integers
        number of sulci
    sulci_file : string
        output VTK file with sulcus numbers (-1 for non-sulcus vertices)

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars
    >>> from mindboggle.features.sulci import extract_sulci
    >>> from mindboggle.utils.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> # Load labels, folds, neighbor lists, and sulcus names and label pairs
    >>> labels_file = os.path.join(path, 'arno', 'labels', 'relabeled_lh.DKTatlas40.gcs.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds_or_file, name = read_scalars(folds_file)
    >>> hemi = 'lh'
    >>> min_boundary = 10
    >>> sulcus_names = []
    >>> #
    >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file, hemi, min_boundary, sulcus_names)
    >>> # View:
    >>> plot_surfaces('sulci.vtk')

    """
    import os
    from time import time
    import numpy as np

    from mindboggle.utils.io_vtk import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.utils.mesh import find_neighbors
    from mindboggle.utils.segment import extract_borders, propagate, segment
    from mindboggle.LABELS import DKTprotocol

    # Load fold numbers if folds_or_file is a string:
    if isinstance(folds_or_file, str):
        folds, name = read_scalars(folds_or_file)
    elif isinstance(folds_or_file, list):
        folds = folds_or_file

    dkt = DKTprotocol()

    if hemi == 'lh':
        pair_lists = dkt.left_sulcus_label_pair_lists
    elif hemi == 'rh':
        pair_lists = dkt.right_sulcus_label_pair_lists
    else:
        print("Warning: hemisphere not properly specified ('lh' or 'rh').")

    # Load points, faces, and neighbors:
    faces, o1, o2, points, npoints, labels, o3, o4 = read_vtk(labels_file)
    neighbor_lists = find_neighbors(faces, npoints)

    # Array of sulcus IDs for fold vertices, initialized as -1.
    # Since we do not touch gyral vertices and vertices whose labels
    # are not in the label list, or vertices having only one label,
    # their sulcus IDs will remain -1:
    sulci = -1 * np.ones(npoints)

    #-------------------------------------------------------------------------
    # Loop through folds
    #-------------------------------------------------------------------------
    fold_numbers = [int(x) for x in np.unique(folds) if x != -1]
    n_folds = len(fold_numbers)
    print("Extract sulci from {0} folds...".format(n_folds))
    t0 = time()
    for n_fold in fold_numbers:
        fold = [i for i, x in enumerate(folds) if x == n_fold]
        len_fold = len(fold)

        # List the labels in this fold:
        fold_labels = [labels[x] for x in fold]
        unique_fold_labels = [
            int(x) for x in np.unique(fold_labels) if x != -1
        ]

        #---------------------------------------------------------------------
        # NO MATCH -- fold has fewer than two labels
        #---------------------------------------------------------------------
        if len(unique_fold_labels) < 2:
            # Ignore: sulci already initialized with -1 values:
            if not unique_fold_labels:
                print("  Fold {0} ({1} vertices): "
                      "NO MATCH -- fold has no labels".format(
                          n_fold, len_fold))
            else:
                print("  Fold {0} ({1} vertices): "
                      "NO MATCH -- fold has only one label ({2})".format(
                          n_fold, len_fold, unique_fold_labels[0]))
            # Ignore: sulci already initialized with -1 values

        else:
            # Find all label boundary pairs within the fold:
            indices_fold_pairs, fold_pairs, unique_fold_pairs = \
                extract_borders(fold, labels, neighbor_lists,
                                ignore_values=[], return_label_pairs=True)

            # Find fold label pairs in the protocol (pairs are already sorted):
            fold_pairs_in_protocol = [
                x for x in unique_fold_pairs
                if x in dkt.unique_sulcus_label_pairs
            ]

            if unique_fold_labels:
                print("  Fold {0} labels: {1} ({2} vertices)".format(
                    n_fold, ', '.join([str(x) for x in unique_fold_labels]),
                    len_fold))
            #-----------------------------------------------------------------
            # NO MATCH -- fold has no sulcus label pair
            #-----------------------------------------------------------------
            if not fold_pairs_in_protocol:
                print("  Fold {0}: NO MATCH -- fold has no sulcus label pair".
                      format(n_fold, len_fold))

            #-----------------------------------------------------------------
            # Possible matches
            #-----------------------------------------------------------------
            else:
                print("  Fold {0} label pairs in protocol: {1}".format(
                    n_fold,
                    ', '.join([str(x) for x in fold_pairs_in_protocol])))

                # Labels in the protocol (includes repeats across label pairs):
                labels_in_pairs = [
                    x for lst in fold_pairs_in_protocol for x in lst
                ]

                # Labels that appear in one or more sulcus label boundary:
                unique_labels = []
                nonunique_labels = []
                for label in np.unique(labels_in_pairs):
                    if len([x for x in labels_in_pairs if x == label]) == 1:
                        unique_labels.append(label)
                    else:
                        nonunique_labels.append(label)

                #-------------------------------------------------------------
                # Vertices whose labels are in only one sulcus label pair
                #-------------------------------------------------------------
                # Find vertices with a label that is in only one of the fold's
                # label pairs (the other label in the pair can exist in other
                # pairs). Assign the vertices the sulcus with the label pair
                # if they are connected to the label boundary for that pair.
                #-------------------------------------------------------------
                if unique_labels:

                    for pair in fold_pairs_in_protocol:

                        # If one or both labels in label pair is/are unique:
                        unique_labels_in_pair = [
                            x for x in pair if x in unique_labels
                        ]
                        n_unique = len(unique_labels_in_pair)
                        if n_unique:

                            ID = None
                            for i, pair_list in enumerate(pair_lists):
                                if not isinstance(pair_list, list):
                                    pair_list = [pair_list]
                                if pair in pair_list:
                                    ID = i
                                    break
                            if ID:
                                # Seeds from label boundary vertices
                                # (fold_pairs and pair already sorted):
                                indices_pair = [
                                    x for i, x in enumerate(indices_fold_pairs)
                                    if fold_pairs[i] == pair
                                ]

                                # Vertices with unique label(s) in pair:
                                indices_unique_labels = [
                                    fold[i] for i, x in enumerate(fold_labels)
                                    if x in dkt.unique_sulcus_label_pairs
                                ]

                                # Propagate from seeds to labels in label pair:
                                sulci2 = segment(indices_unique_labels,
                                                 neighbor_lists,
                                                 min_region_size=1,
                                                 seed_lists=[indices_pair],
                                                 keep_seeding=False,
                                                 spread_within_labels=True,
                                                 labels=labels)
                                sulci[sulci2 != -1] = ID

                                # Print statement:
                                if n_unique == 1:
                                    ps1 = '1 label'
                                else:
                                    ps1 = 'Both labels'
                                if len(sulcus_names):
                                    ps2 = sulcus_names[ID]
                                else:
                                    ps2 = ''
                                print("    {0} unique to one fold pair: "
                                      "{1} {2}".format(ps1, ps2,
                                                       unique_labels_in_pair))

                #-------------------------------------------------------------
                # Vertex labels shared by multiple label pairs
                #-------------------------------------------------------------
                # Propagate labels from label borders to vertices with labels
                # that are shared by multiple label pairs in the fold.
                #-------------------------------------------------------------
                if len(nonunique_labels):
                    # For each label shared by different label pairs:
                    for label in nonunique_labels:
                        # Print statement:
                        print("    Propagate sulcus borders with label {0}".
                              format(int(label)))

                        # Construct seeds from label boundary vertices:
                        seeds = -1 * np.ones(len(points))

                        for ID, pair_list in enumerate(pair_lists):
                            if not isinstance(pair_list, list):
                                pair_list = [pair_list]
                            label_pairs = [x for x in pair_list if label in x]
                            for label_pair in label_pairs:
                                indices_pair = [
                                    x for i, x in enumerate(indices_fold_pairs)
                                    if np.sort(fold_pairs[i]).tolist() ==
                                    label_pair
                                ]
                                if indices_pair:

                                    # Do not include short boundary segments:
                                    if min_boundary > 1:
                                        indices_pair2 = []
                                        seeds2 = segment(
                                            indices_pair, neighbor_lists)
                                        useeds2 = [
                                            x for x in np.unique(seeds2)
                                            if x != -1
                                        ]
                                        for seed2 in useeds2:
                                            iseed2 = [
                                                i for i, x in enumerate(seeds2)
                                                if x == seed2
                                            ]
                                            if len(iseed2) >= min_boundary:
                                                indices_pair2.extend(iseed2)
                                            else:
                                                if len(iseed2) == 1:
                                                    print("    Remove "
                                                          "assignment "
                                                          "of ID {0} from "
                                                          "1 vertex".format(
                                                              seed2))
                                                else:
                                                    print(
                                                        "    Remove "
                                                        "assignment "
                                                        "of ID {0} from "
                                                        "{1} vertices".format(
                                                            seed2,
                                                            len(iseed2)))
                                        indices_pair = indices_pair2

                                    # Assign sulcus IDs to seeds:
                                    seeds[indices_pair] = ID

                        # Identify vertices with the label:
                        label_array = -1 * np.ones(len(points))
                        indices_label = [
                            fold[i] for i, x in enumerate(fold_labels)
                            if x == label
                        ]
                        if len(indices_label):
                            label_array[indices_label] = 1

                            # Propagate from seeds to vertices with label:
                            #indices_seeds = []
                            #for seed in range(int(max(seeds))+1):
                            #    indices_seeds.append([i for i,x
                            #                          in enumerate(seeds)
                            #                          if x == seed])
                            #sulci2 = segment(indices_label, neighbor_lists,
                            #                 50, indices_seeds, False, True,
                            #                 labels)
                            sulci2 = propagate(points,
                                               faces,
                                               label_array,
                                               seeds,
                                               sulci,
                                               max_iters=10000,
                                               tol=0.001,
                                               sigma=5)
                            sulci[sulci2 != -1] = sulci2[sulci2 != -1]

    #-------------------------------------------------------------------------
    # Print out assigned sulci
    #-------------------------------------------------------------------------
    sulcus_numbers = [int(x) for x in np.unique(sulci) if x != -1]
    # if not np.isnan(x)]
    n_sulci = len(sulcus_numbers)
    print("Extracted {0} sulci from {1} folds ({2:.1f}s):".format(
        n_sulci, n_folds,
        time() - t0))
    if sulcus_names:
        for sulcus_number in sulcus_numbers:
            print("  {0}: {1}".format(sulcus_number,
                                      sulcus_names[sulcus_number]))
    elif sulcus_numbers:
        print("  " + ", ".join([str(x) for x in sulcus_numbers]))

    #-------------------------------------------------------------------------
    # Print out unresolved sulci
    #-------------------------------------------------------------------------
    unresolved = [i for i in range(len(pair_lists)) if i not in sulcus_numbers]
    if len(unresolved) == 1:
        print("The following sulcus is unaccounted for:")
    else:
        print("The following {0} sulci are unaccounted for:".format(
            len(unresolved)))
    if sulcus_names:
        for sulcus_number in unresolved:
            print("  {0}: {1}".format(sulcus_number,
                                      sulcus_names[sulcus_number]))
    else:
        print("  " + ", ".join([str(x) for x in unresolved]))

    #-------------------------------------------------------------------------
    # Return sulci, number of sulci, and file name
    #-------------------------------------------------------------------------
    sulci = [int(x) for x in sulci]
    sulci_file = os.path.join(os.getcwd(), 'sulci.vtk')
    rewrite_scalars(labels_file, sulci_file, sulci, 'sulci', sulci)

    if not os.path.exists(sulci_file):
        raise (IOError(sulci_file + " not found"))

    return sulci, n_sulci, sulci_file
コード例 #7
0
ファイル: io_table.py プロジェクト: TankThinkLabs/mindboggle
def write_face_vertex_averages(input_file, area_file="", delimiter=","):
    """
    Make table of average vertex values per face.

    Parameters
    ----------
    input_file : string
        name of VTK file with scalars to average
    area_file :  string
        name of VTK file with surface area scalar values
    delimiter : string
        delimiter between columns, such as ','

    Returns
    -------
    output_table :  string
        output table filename

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_scalars
    >>> from mindboggle.utils.io_table import write_face_vertex_averages
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> #input_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> #input_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> input_file = os.path.join(path, 'arno', 'shapes', 'lh.thickness.vtk')
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> delimiter = ','
    >>> #
    >>> write_face_vertex_averages(input_file, area_file, delimiter)

    """
    import os
    import numpy as np

    from mindboggle.utils.io_vtk import read_vtk, read_scalars
    from mindboggle.utils.io_table import write_columns

    faces, lines, indices, points, npoints, scalars, name, input_vtk = read_vtk(input_file, True, True)
    if area_file:
        area_scalars, name = read_scalars(area_file, True, True)

    # ---------------------------------------------------------------------
    # For each face, average vertex values:
    # ---------------------------------------------------------------------
    output_table = os.path.join(os.getcwd(), "average_face_values.csv")
    columns = []
    for face in faces:
        values = []
        for index in face:
            if area_file:
                values.append(scalars[index] / area_scalars[index])
            else:
                values.append(scalars[index])
        columns.append(np.mean(values))

    # -----------------------------------------------------------------
    # Write to table:
    # -----------------------------------------------------------------
    write_columns(columns, "", output_table, delimiter, quote=False)

    return output_table
コード例 #8
0
ファイル: io_table.py プロジェクト: TankThinkLabs/mindboggle
def write_shape_stats(
    labels_or_file,
    sulci=[],
    fundi=[],
    affine_transform_file="",
    transform_format="itk",
    area_file="",
    mean_curvature_file="",
    travel_depth_file="",
    geodesic_depth_file="",
    convexity_file="",
    thickness_file="",
    labels_spectra=[],
    labels_spectra_IDs=[],
    sulci_spectra=[],
    sulci_spectra_IDs=[],
    exclude_labels=[-1],
    delimiter=",",
):
    """
    Make tables of shape statistics per label, fundus, and/or sulcus.

    Parameters
    ----------
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    sulci :  list of integers
        indices to sulci, one per vertex, with -1 indicating no sulcus
    fundi :  list of integers
        indices to fundi, one per vertex, with -1 indicating no fundus
    affine_transform_file : string
        affine transform file to standard space
    transform_format : string
        format for transform file
        Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format
    area_file :  string
        name of VTK file with surface area scalar values
    mean_curvature_file :  string
        name of VTK file with mean curvature scalar values
    travel_depth_file :  string
        name of VTK file with travel depth scalar values
    geodesic_depth_file :  string
        name of VTK file with geodesic depth scalar values
    convexity_file :  string
        name of VTK file with convexity scalar values
    thickness_file :  string
        name of VTK file with thickness scalar values
    labels_spectra : list of lists of floats
        Laplace-Beltrami spectra for labeled regions
    labels_spectra_IDs : list of integers
        unique ID numbers (labels) for labels_spectra
    sulci_spectra : list of lists of floats
        Laplace-Beltrami spectra for sulci
    sulci_spectra_IDs : list of integers
        unique ID numbers (labels) for sulci_spectra
    exclude_labels : list of lists of integers
        indices to be excluded (in addition to -1)
    delimiter : string
        delimiter between columns, such as ','

    Returns
    -------
    label_table :  string
        output table filename for label shapes
    sulcus_table :  string
        output table filename for sulcus shapes
    fundus_table :  string
        output table filename for fundus shapes

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_scalars
    >>> from mindboggle.utils.io_table import write_shape_stats
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> labels_or_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> sulci_file = os.path.join(path, 'arno', 'features', 'sulci.vtk')
    >>> fundi_file = os.path.join(path, 'arno', 'features', 'fundi.vtk')
    >>> sulci, name = read_scalars(sulci_file)
    >>> fundi, name = read_scalars(fundi_file)
    >>> affine_transform_file = os.path.join(path, 'arno', 'mri',
    >>> #    'affine_to_template.mat')
    >>>     't1weighted_brain.MNI152Affine.txt')
    >>> #transform_format = 'mat'
    >>> transform_format = 'itk'
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> mean_curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> travel_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> geodesic_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.geodesic_depth.vtk')
    >>> convexity_file = ''
    >>> thickness_file = ''
    >>> delimiter = ','
    >>> #
    >>> import numpy as np
    >>> labels, name = read_scalars(labels_or_file)
    >>> labels_spectra = [[1,2,3] for x in labels]
    >>> labels_spectra_IDs = np.unique(labels).tolist()
    >>> sulci_spectra = [[1,2,3] for x in sulci]
    >>> sulci_spectra_IDs = np.unique(sulci).tolist()
    >>> exclude_labels = [-1]
    >>> #
    >>> write_shape_stats(labels_or_file, sulci, fundi,
    >>>     affine_transform_file, transform_format, area_file,
    >>>     mean_curvature_file, travel_depth_file, geodesic_depth_file,
    >>>     convexity_file, thickness_file, labels_spectra,
    >>>     labels_spectra_IDs, sulci_spectra,
    >>>     sulci_spectra_IDs, exclude_labels, delimiter)

    """
    import os
    import numpy as np
    from mindboggle.shapes.measure import means_per_label, stats_per_label, sum_per_label
    from mindboggle.utils.io_vtk import read_scalars, read_vtk, apply_affine_transform
    from mindboggle.utils.io_table import write_columns

    # Make sure inputs are lists:
    if isinstance(labels_or_file, np.ndarray):
        labels = labels_or_file.tolist()
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    elif isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file)
    if isinstance(sulci, np.ndarray):
        sulci = sulci.tolist()
    if isinstance(fundi, np.ndarray):
        fundi = fundi.tolist()

    # -------------------------------------------------------------------------
    # Feature lists, shape names, and shape files:
    # -------------------------------------------------------------------------
    # Feature lists:
    feature_lists = [labels, sulci, fundi]
    feature_names = ["label", "sulcus", "fundus"]
    spectra_lists = [labels_spectra, sulci_spectra]
    spectra_ID_lists = [labels_spectra_IDs, sulci_spectra_IDs]
    spectra_names = ["label spectrum", "sulcus spectrum"]
    table_names = ["label_shapes.csv", "sulcus_shapes.csv", "fundus_shapes.csv"]

    # Shape names corresponding to shape files below:
    shape_names = ["area", "mean curvature", "travel depth", "geodesic depth", "convexity", "thickness"]

    # Load shape files as a list of numpy arrays of per-vertex shape values:
    shape_files = [
        area_file,
        mean_curvature_file,
        travel_depth_file,
        geodesic_depth_file,
        convexity_file,
        thickness_file,
    ]
    shape_arrays = []
    column_names = []
    first_pass = True
    area_array = []
    for ishape, shape_file in enumerate(shape_files):
        if os.path.exists(shape_file):
            if first_pass:
                faces, lines, indices, points, npoints, scalars_array, name, input_vtk = read_vtk(
                    shape_file, True, True
                )
                points = np.array(points)
                first_pass = False
                if affine_transform_file:
                    affine_points, foo1 = apply_affine_transform(
                        affine_transform_file, points, transform_format, save_file=False
                    )
                    affine_points = np.array(affine_points)
            else:
                scalars_array, name = read_scalars(shape_file, True, True)
            if scalars_array.size:
                shape_arrays.append(scalars_array)

                # Store area array:
                if ishape == 0:
                    area_array = scalars_array.copy()

    # Initialize table file names:
    sulcus_table = None
    fundus_table = None

    # Loop through features / tables:
    for itable, feature_list in enumerate(feature_lists):
        table_column_names = []

        # ---------------------------------------------------------------------
        # For each feature, construct a table of average shape values:
        # ---------------------------------------------------------------------
        table_file = os.path.join(os.getcwd(), table_names[itable])
        if feature_list:
            feature_name = feature_names[itable]
            columns = []

            # -----------------------------------------------------------------
            # Mean positions in the original space:
            # -----------------------------------------------------------------
            # Compute mean position per feature:
            positions, sdevs, label_list, foo = means_per_label(points, feature_list, exclude_labels, area_array)

            # Append mean position per feature to columns:
            table_column_names.append("mean position")
            columns.append(positions)

            # -----------------------------------------------------------------
            # Mean positions in standard space:
            # -----------------------------------------------------------------
            if affine_transform_file:
                # Compute standard space mean position per feature:
                standard_positions, sdevs, label_list, foo = means_per_label(
                    affine_points, feature_list, exclude_labels, area_array
                )

                # Append standard space mean position per feature to columns:
                table_column_names.append("mean position in standard space")
                columns.append(standard_positions)

            # -----------------------------------------------------------------
            # Loop through shape measures:
            # -----------------------------------------------------------------
            table_column_names.extend(column_names[:])
            for ishape, shape_array in enumerate(shape_arrays):
                shape_name = shape_names[ishape]
                print("  Compute statistics on {0} {1}".format(feature_name, shape_name))

                # Append shape names and values per feature to columns:
                pr = feature_name + ": " + shape_name + ": "
                if np.size(area_array):
                    po = " (weighted)"
                else:
                    po = ""
                # -------------------------------------------------------------
                # Append total feature areas to columns:
                # -------------------------------------------------------------
                if ishape == 0 and np.size(area_array):
                    sums, label_list = sum_per_label(shape_array, feature_list, exclude_labels)
                    table_column_names.append(pr + "total")
                    columns.append(sums)
                # -------------------------------------------------------------
                # Append feature shape statistics to columns:
                # -------------------------------------------------------------
                else:
                    medians, mads, means, sdevs, skews, kurts, lower_quarts, upper_quarts, label_list = stats_per_label(
                        shape_array, feature_list, exclude_labels, area_array, precision=1
                    )

                    table_column_names.append(pr + "median" + po)
                    table_column_names.append(pr + "median absolute deviation" + po)
                    table_column_names.append(pr + "mean" + po)
                    table_column_names.append(pr + "standard deviation" + po)
                    table_column_names.append(pr + "skew" + po)
                    table_column_names.append(pr + "kurtosis" + po)
                    table_column_names.append(pr + "lower quartile" + po)
                    table_column_names.append(pr + "upper quartile" + po)
                    columns.append(medians)
                    columns.append(mads)
                    columns.append(means)
                    columns.append(sdevs)
                    columns.append(skews)
                    columns.append(kurts)
                    columns.append(lower_quarts)
                    columns.append(upper_quarts)

            # -----------------------------------------------------------------
            # Laplace-Beltrami spectra:
            # -----------------------------------------------------------------
            if itable in [0, 1]:
                spectra = spectra_lists[itable]
                spectra_name = spectra_names[itable]
                spectra_IDs = spectra_ID_lists[itable]

                # Order spectra into a list:
                spectrum_list = []
                for label in label_list:
                    if label in spectra_IDs:
                        spectrum = spectra[spectra_IDs.index(label)]
                        spectrum_list.append(spectrum)
                    else:
                        spectrum_list.append("")

                # Append spectral shape name and values to relevant columns:
                columns.append(spectrum_list)
                table_column_names.append(spectra_name)

            # -----------------------------------------------------------------
            # Write labels/IDs and values to table:
            # -----------------------------------------------------------------
            # Write labels/IDs to table:
            write_columns(label_list, feature_name, table_file, delimiter)

            # Append columns of shape values to table:
            if columns:
                write_columns(columns, table_column_names, table_file, delimiter, quote=True, input_table=table_file)
        else:
            # Write something to table:
            write_columns([], "", table_file, delimiter)

        # ---------------------------------------------------------------------
        # Return correct table file name:
        # ---------------------------------------------------------------------
        if itable == 0:
            label_table = table_file
        elif itable == 1:
            sulcus_table = table_file
        elif itable == 2:
            fundus_table = table_file

    return label_table, sulcus_table, fundus_table
コード例 #9
0
ファイル: io_vtk.py プロジェクト: ccraddock/mindboggle
def explode_scalars(input_indices_vtk, input_values_vtk='', output_stem='',
                    exclude_values=[-1], background_value=-1,
                    output_scalar_name='scalars',
                    remove_background_faces=True, reindex=True):
    """
    Write out a separate VTK file for each integer (not in exclude_values)
    in (the first) scalar list of an input VTK file.
    Optionally write the values drawn from a second VTK file,
    remove background values, and reindex indices.

    Parameters
    ----------
    input_indices_vtk : string
        path of the input VTK file that contains indices as scalars
        (assumes that the scalars are a list of floats or integers)
    input_values_vtk : string
        path of the input VTK file that contains values as scalars
    output_stem : string
        path and stem of the output VTK file
    exclude_values : list or array
        values to exclude
    background_value : integer or float
        background value in output VTK files
    remove_background_faces : Boolean
        remove all faces whose three vertices are not all a given index?
    reindex : Boolean
        reindex all indices in faces?

    Examples
    --------
    >>> # Example 1:  explode sulci with thickness values
    >>> import os
    >>> from mindboggle.utils.io_vtk import explode_scalars
    >>> from mindboggle.utils.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> input_indices_vtk = os.path.join(path, 'arno', 'features', 'sulci.vtk')
    >>> input_values_vtk = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> output_stem = 'sulci_depth'
    >>> #
    >>> explode_scalars(input_indices_vtk, input_values_vtk, output_stem)
    >>> #
    >>> # View:
    >>> example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk')
    >>> plot_surfaces(example_vtk)
    >>> #
    >>> # Example 2:  explode labels
    >>> import os
    >>> from mindboggle.utils.io_vtk import explode_scalars
    >>> from mindboggle.utils.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> input_values_vtk = os.path.join(path, 'arno', 'labels',
    >>>                                 'lh.labels.DKT25.manual.vtk')
    >>> input_indices_vtk = input_values_vtk
    >>> output_stem = 'label'
    >>> exclude_values = [-1]
    >>> background_value = -1,
    >>> output_scalar_name = 'scalars'
    >>> remove_background_faces = True
    >>> reindex = True
    >>> #
    >>> explode_scalars(input_indices_vtk, input_values_vtk, output_stem,
    >>>                 exclude_values, background_value,
    >>>                 output_scalar_name, remove_background_faces, reindex)
    >>> # View:
    >>> example_vtk = os.path.join(os.getcwd(), output_stem + '2.vtk')
    >>> plot_surfaces(example_vtk)

    """
    import os
    import numpy as np
    from mindboggle.utils.io_vtk import read_scalars, read_vtk, write_vtk
    from mindboggle.utils.mesh import reindex_faces_points, remove_faces

    # Load VTK file:
    faces, lines, indices, points, npoints, scalars, scalar_names, \
        foo1 = read_vtk(input_indices_vtk, True, True)
    print("Explode the scalar list in {0}".
          format(os.path.basename(input_indices_vtk)))
    if input_values_vtk != input_indices_vtk:
        values, name = read_scalars(input_values_vtk, True, True)
        print("Explode the scalar list of values in {0} "
              "with the scalar list of indices in {1}".
              format(os.path.basename(input_values_vtk),
                     os.path.basename(input_indices_vtk)))
    else:
        values = np.copy(scalars)

    # Loop through unique (non-excluded) scalar values:
    unique_scalars = np.unique(scalars)
    if all(unique_scalars==np.round(unique_scalars)):
        unique_scalars = [int(x) for x in unique_scalars
                          if x not in exclude_values]
    else:
        unique_scalars = [x for x in unique_scalars
                          if x not in exclude_values]

    for scalar in unique_scalars:

        # Remove background (keep only faces with the scalar):
        if remove_background_faces:
            scalar_indices = [i for i,x in enumerate(scalars) if x == scalar]
            scalar_faces = remove_faces(faces, scalar_indices)
        else:
            scalar_faces = faces

        # Reindex:
        if reindex:
            scalar_faces, select_points, \
            o1 = reindex_faces_points(scalar_faces, points)
        else:
            select_points = points

        # Create array and indices for scalar value:
        if reindex:
            len_indices = len(select_points)
            select_values = scalar * np.ones(len_indices)
        else:
            select_values = np.copy(values)
            select_values[scalars != scalar] = background_value
            len_indices = len([i for i,x in enumerate(select_values)
                               if x != background_value])

        print("  Scalar {0}: {1} vertices".format(scalar, len_indices))

        # Write VTK file with scalar values (list of values):
        if np.ndim(select_values) == 1:
            scalar_type = type(select_values[0]).__name__
        elif np.ndim(select_values) == 2:
            scalar_type = type(select_values[0][0]).__name__
        else:
            print("Undefined scalar type!")
        output_vtk = os.path.join(os.getcwd(),
                                  output_stem + str(scalar) + '.vtk')
        write_vtk(output_vtk, select_points, indices, lines, scalar_faces,
                  select_values.tolist(), output_scalar_name,
                  scalar_type=scalar_type)
コード例 #10
0
def extract_borders_2nd_surface(labels_file, mask_file="", values_file=""):
    """
    Extract borders (between labels) on a surface.
    Options: Mask out values; extract border values on a second surface.

    Parameters
    ----------
    labels_file : string
        file name for surface mesh with labels
    mask_file : string
        file name for surface mesh with mask (>-1) values
    values_file : string
        file name for surface mesh with values to extract along borders

    Returns
    -------
    border_file : string
        file name for surface mesh with label borders (-1 background values)
    border_values : numpy array
        values for all vertices (-1 for vertices not along label borders)

    Examples
    --------
    >>> # Extract depth values along label borders in sulci (mask):
    >>> import os
    >>> from mindboggle.labels.labels import extract_borders_2nd_surface
    >>> from mindboggle.utils.plots import plot_vtk
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> labels_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> mask_file = os.path.join(path, 'arno', 'features', 'sulci.vtk')
    >>> values_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> #
    >>> border_file, border_values = extract_borders_2nd_surface(labels_file, mask_file, values_file)
    >>> #
    >>> plot_vtk(border_file)

    """
    import os
    import numpy as np
    from mindboggle.utils.io_vtk import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.utils.mesh import find_neighbors
    from mindboggle.labels.labels import extract_borders

    # Load labeled surface file
    faces, foo1, foo2, foo3, npoints, labels, foo4, foo5 = read_vtk(labels_file, return_first=True, return_array=True)

    # Detect borders
    neighbor_lists = find_neighbors(faces, npoints)
    indices_borders, foo1, foo2 = extract_borders(range(npoints), labels, neighbor_lists)

    # Filter values with label borders
    border_values = -1 * np.ones(npoints)
    if values_file:
        values, name = read_scalars(values_file, return_first=True, return_array=True)
        border_values[indices_borders] = values[indices_borders]
    else:
        border_values[indices_borders] = 1

    # Mask values (for mask >-1)
    if mask_file:
        mask_values, name = read_scalars(mask_file)
    else:
        mask_values = []

    # Write out label boundary vtk file
    border_file = os.path.join(os.getcwd(), "borders_" + os.path.basename(labels_file))
    rewrite_scalars(labels_file, border_file, border_values, "label_borders_in_mask", mask_values)

    if not os.path.exists(border_file):
        raise (IOError(border_file + " not found"))

    return border_file, border_values
コード例 #11
0
def compute_likelihood(trained_file,
                       depth_file,
                       curvature_file,
                       folds,
                       save_file=False):
    """
    Compute likelihoods based on input values, folds, and estimated parameters.

    Compute likelihood values for a given VTK surface mesh file, after training
    on distributions of depth and curvature values from multiple files.

    Parameters
    ----------
    trained_file : pickle compressed file
        contains the following dictionaries containing lists of floats
        (estimates of depth or curvature means, sigmas, and weights
         trained on fold vertices either on or off sulcus label borders)
        depth_border, curv_border, depth_nonborder, curv_nonborder
    depth_file : string
        VTK surface mesh file with depth values in [0,1] for all vertices
    curvature_file : string
        VTK surface mesh file with curvature values in [-1,1] for all vertices
    folds : list of integers
        fold number for all vertices (-1 for non-fold vertices)
    save_file : Boolean
        save output VTK file?

    Returns
    -------
    likelihoods : list of floats
        likelihood values for all vertices (0 for non-fold vertices)
    likelihoods_file : string (if save_file)
        name of output VTK file with likelihood scalars
        (-1 for non-fold vertices)

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars
    >>> from mindboggle.shapes.likelihood import compute_likelihood
    >>> from mindboggle.utils.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> trained_file = os.path.join(path, 'atlases', 'depth_curv_border_nonborder_parameters.pkl')
    >>> #depth_file = os.path.join(path, 'arno', 'shapes', 'travel_depth_rescaled.vtk')
    >>> depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file)
    >>> save_file = True
    >>> #
    >>> compute_likelihood(trained_file, depth_file, curvature_file, folds, save_file)
    >>> # View:
    >>> plot_surfaces('likelihoods.vtk', folds_file)

    """
    import os
    import numpy as np
    from math import pi
    import cPickle as pickle

    from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars

    # Initialize variables:
    tiny = 0.000000001
    L = np.zeros(len(folds))
    probs_border = np.zeros(len(folds))
    probs_nonborder = np.zeros(len(folds))

    # Load estimated depth and curvature distribution parameters:
    depth_border, curv_border, depth_nonborder, curv_nonborder = pickle.load(
        open(trained_file, "r"))

    # Load depths, curvatures:
    depths, name = read_scalars(depth_file, True, True)
    curvatures, name = read_scalars(curvature_file, True, True)

    # Prep for below:
    n = 2
    twopiexp = (2 * pi)**(n / 2)
    border_sigmas = depth_border['sigmas'] * curv_border['sigmas']
    nonborder_sigmas = depth_nonborder['sigmas'] * curv_nonborder['sigmas']
    norm_border = 1 / (twopiexp * border_sigmas + tiny)
    norm_nonborder = 1 / (twopiexp * nonborder_sigmas + tiny)
    I = [i for i, x in enumerate(folds) if x != -1]

    N = depth_border['sigmas'].shape[0]
    for j in range(N):

        # Border:
        expB = depth_border['weights'][j] * \
            ((depths[I]-depth_border['means'][j])**2) / \
            depth_border['sigmas'][j]**2
        expB += curv_border['weights'][j] * \
            ((curvatures[I]-curv_border['means'][j])**2) / \
            curv_border['sigmas'][j]**2
        expB = -expB / 2
        probs_border[I] = probs_border[I] + norm_border[j] * np.exp(expB)

        # Non-border:
        expNB = depth_nonborder['weights'][j] * \
            ((depths[I]-depth_nonborder['means'][j])**2) / \
            depth_nonborder['sigmas'][j]**2
        expNB += curv_nonborder['weights'][j] * \
            ((curvatures[I]-curv_nonborder['means'][j])**2) / \
            curv_nonborder['sigmas'][j]**2
        expNB = -expNB / 2
        probs_nonborder[
            I] = probs_nonborder[I] + norm_nonborder[j] * np.exp(expNB)

    likelihoods = probs_border / (probs_nonborder + probs_border + tiny)
    likelihoods = likelihoods.tolist()

    #-------------------------------------------------------------------------
    # Return likelihoods and output file name
    #-------------------------------------------------------------------------
    if save_file:

        likelihoods_file = os.path.join(os.getcwd(), 'likelihoods.vtk')
        rewrite_scalars(depth_file, likelihoods_file, likelihoods,
                        'likelihoods', likelihoods)
        if not os.path.exists(likelihoods_file):
            raise (IOError(likelihoods_file + " not found"))

    else:
        likelihoods_file = None

    return likelihoods, likelihoods_file
コード例 #12
0
ファイル: measure.py プロジェクト: TankThinkLabs/mindboggle
def rescale_by_label(input_vtk, labels_or_file, combine_all_labels=False,
                     nedges=10, p=99, set_max_to_1=True, save_file=False,
                     output_filestring='rescaled_scalars'):
    """
    Rescale scalars for each label (such as depth values within each fold).

    Default is to normalize the scalar values of a VTK file by
    a percentile value in each vertex's surface mesh for each label.

    Parameters
    ----------
    input_vtk : string
        name of VTK file with a scalar value for each vertex
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    combine_all_labels : Boolean
        combine all labels (scalars not equal to -1) as one label?
    nedges : integer (if norm_by_neighborhood)
        number or edges from vertex, defining the size of its neighborhood
    p : float in range of [0,100] (if norm_by_neighborhood)
        percentile used to rescale each scalar
    set_max_to_1 : Boolean
        set all rescaled values greater than 1 to 1.0?
    save_file : Boolean
        save output VTK file?
    output_filestring : string (if save_file)
        name of output file

    Returns
    -------
    rescaled_scalars : list of floats
        scalar values rescaled for each label, for label numbers not equal to -1
    rescaled_scalars_file : string (if save_file)
        name of output VTK file with rescaled scalar values for each label

    Examples
    --------
    >>> # Rescale depths by neighborhood within each label:
    >>> import os
    >>> from mindboggle.shapes.measure import rescale_by_label
    >>> from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars
    >>> from mindboggle.utils.plots import plot_vtk
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> input_vtk = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> labels_or_file = os.path.join(path, 'arno', 'features', 'subfolds.vtk')
    >>> combine_all_labels = False
    >>> nedges = 10
    >>> p = 99
    >>> set_max_to_1 = True
    >>> save_file = True
    >>> output_filestring = 'rescaled_scalars'
    >>> #
    >>> rescaled_scalars, rescaled_scalars_file = rescale_by_label(input_vtk,
    >>>     labels_or_file, combine_all_labels, nedges, p,
    >>>     set_max_to_1, save_file, output_filestring)
    >>> #
    >>> # View rescaled scalar values per fold:
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file)
    >>> #
    >>> rewrite_scalars(rescaled_scalars_file, rescaled_scalars_file,
    >>>                 rescaled_scalars, 'rescaled_depths', folds)
    >>> plot_vtk(rescaled_scalars_file)

    """
    import os
    import numpy as np
    from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars
    from mindboggle.utils.mesh import find_neighbors_from_file

    # Load scalars and vertex neighbor lists:
    scalars, name = read_scalars(input_vtk, True, True)
    print("  Rescaling scalar values within each label...")

    # Load label numbers:
    if isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file, True, True)
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    unique_labels = np.unique(labels)
    unique_labels = [x for x in unique_labels if x >= 0]

    # Loop through labels:
    for label in unique_labels:
        #print("  Rescaling scalar values within label {0} of {1} labels...".format(
        #    int(label), len(unique_labels)))
        indices = [i for i,x in enumerate(labels) if x == label]
        if indices:

            # Rescale by the maximum label scalar value:
            scalars[indices] = scalars[indices] / np.max(scalars[indices])

    rescaled_scalars = scalars.tolist()

    #---------------------------------------------------------------------------
    # Return rescaled scalars and file name
    #---------------------------------------------------------------------------
    if save_file:

        rescaled_scalars_file = os.path.join(os.getcwd(), output_filestring + '.vtk')
        rewrite_scalars(input_vtk, rescaled_scalars_file,
                        rescaled_scalars, 'rescaled_scalars', labels)

    else:
        rescaled_scalars_file = None

    return rescaled_scalars, rescaled_scalars_file
コード例 #13
0
def evaluate_deep_features(features_file, labels_file, sulci_file='', hemi='',
                           excludeIDs=[-1], output_vtk_name='', verbose=True):
    """
    Evaluate deep surface features by computing the minimum distance from each
    label boundary vertex to all of the feature vertices in the same sulcus,
    and from each feature vertex to all of the label boundary vertices in the
    same sulcus.  The label boundaries run along the deepest parts of sulci
    and correspond to fundi in the DKT cortical labeling protocol.

    Parameters
    ----------
    features_file : string
        VTK surface file with feature numbers for vertex scalars
    labels_file : string
        VTK surface file with label numbers for vertex scalars
    sulci_file : string
        VTK surface file with sulcus numbers for vertex scalars
    excludeIDs : list of integers
        feature/sulcus/label IDs to exclude (background set to -1)
    output_vtk_name : Boolean
        if not empty, output a VTK file beginning with output_vtk_name that
        contains a surface with mean distances as scalars
    verbose : Boolean
        print mean distances to standard output?

    Returns
    -------
    feature_to_fundus_mean_distances : numpy array [number of features x 1]
        mean distance from each feature to sulcus label boundary ("fundus")
    feature_to_fundus_sd_distances : numpy array [number of features x 1]
        standard deviations of feature-to-fundus distances
    feature_to_fundus_mean_distances_vtk : string
        VTK surface file containing feature_to_fundus_mean_distances
    fundus_to_feature_mean_distances : numpy array [number of features x 1]
        mean distances from each sulcus label boundary ("fundus") to feature
    fundus_to_feature_sd_distances : numpy array [number of features x 1]
        standard deviations of fundus-to-feature distances
    fundus_to_feature_mean_distances_vtk : string
        VTK surface file containing fundus_to_feature_mean_distances

    """
    import os
    import sys
    import numpy as np
    from mindboggle.utils.io_vtk import read_vtk, read_scalars, write_vtk
    from mindboggle.utils.mesh import find_neighbors, remove_faces
    from mindboggle.utils.segment import extract_borders
    from mindboggle.utils.compute import source_to_target_distances
    from mindboggle.LABELS import DKTprotocol

    dkt = DKTprotocol()
    #-------------------------------------------------------------------------
    # Load labels, features, and sulci:
    #-------------------------------------------------------------------------
    faces, lines, indices, points, npoints, labels, scalar_names, \
        input_vtk = read_vtk(labels_file, True, True)
    features, name = read_scalars(features_file, True, True)
    if sulci_file:
        sulci, name = read_scalars(sulci_file, True, True)
        # List of indices to sulcus vertices:
        sulcus_indices = [i for i,x in enumerate(sulci) if x != -1]
        segmentIDs = sulci
        sulcus_faces = remove_faces(faces, sulcus_indices)
    else:
        sulcus_indices = range(len(labels))
        segmentIDs = []
        sulcus_faces = faces

    #-------------------------------------------------------------------------
    # Prepare neighbors, label pairs, fundus IDs, and outputs:
    #-------------------------------------------------------------------------
    # Calculate neighbor lists for all points:
    print('Find neighbors to all vertices...')
    neighbor_lists = find_neighbors(faces, npoints)

    # Find label boundary points in any of the sulci:
    print('Find label boundary points in any of the sulci...')
    border_indices, border_label_tuples, unique_border_label_tuples = \
        extract_borders(sulcus_indices, labels, neighbor_lists,
                        ignore_values=[], return_label_pairs=True)
    if not len(border_indices):
        sys.exit('There are no label boundary points!')

    # Initialize an array of label boundaries fundus IDs
    # (label boundary vertices that define sulci in the labeling protocol):
    print('Build an array of label boundary fundus IDs...')
    label_boundary_fundi = -1 * np.ones(npoints)

    if hemi == 'lh':
        nsulcus_lists = len(dkt.left_sulcus_label_pair_lists)
    else:
        nsulcus_lists = len(dkt.right_sulcus_label_pair_lists)
    feature_to_fundus_mean_distances = -1 * np.ones(nsulcus_lists)
    feature_to_fundus_sd_distances = -1 * np.ones(nsulcus_lists)
    fundus_to_feature_mean_distances = -1 * np.ones(nsulcus_lists)
    fundus_to_feature_sd_distances = -1 * np.ones(nsulcus_lists)
    feature_to_fundus_mean_distances_vtk = ''
    fundus_to_feature_mean_distances_vtk = ''

    #-------------------------------------------------------------------------
    # Loop through sulci:
    #-------------------------------------------------------------------------
    # For each list of sorted label pairs (corresponding to a sulcus):
    for isulcus, label_pairs in enumerate(dkt.sulcus_label_pair_lists):

        # Keep the boundary points with label pair labels:
        fundus_indices = [x for i,x in enumerate(border_indices)
                          if np.unique(border_label_tuples[i]).tolist()
                          in label_pairs]

        # Store the points as sulcus IDs in the fundus IDs array:
        if fundus_indices:
            label_boundary_fundi[fundus_indices] = isulcus

    if len(np.unique(label_boundary_fundi)) > 1:

        #---------------------------------------------------------------------
        # Construct a feature-to-fundus distance matrix and VTK file:
        #---------------------------------------------------------------------
        # Construct a distance matrix:
        print('Construct a feature-to-fundus distance matrix...')
        sourceIDs = features
        targetIDs = label_boundary_fundi
        distances, distance_matrix = source_to_target_distances(
            sourceIDs, targetIDs, points, segmentIDs, excludeIDs)

        # Compute mean distances for each feature:
        nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists)
        for ifeature in range(nfeatures):
            feature_distances = [x for x in distance_matrix[:, ifeature]
                                 if x != -1]
            feature_to_fundus_mean_distances[ifeature] = \
                np.mean(feature_distances)
            feature_to_fundus_sd_distances[ifeature] = \
                np.std(feature_distances)

        if verbose:
            print('Feature-to-fundus mean distances:')
            print(feature_to_fundus_mean_distances)
            print('Feature-to-fundus standard deviations of distances:')
            print(feature_to_fundus_sd_distances)

        # Write resulting feature-label boundary distances to VTK file:
        if output_vtk_name:
            feature_to_fundus_mean_distances_vtk = os.path.join(os.getcwd(),
                output_vtk_name + '_feature_to_fundus_mean_distances.vtk')
            print('Write feature-to-fundus distances to {0}...'.
                  format(feature_to_fundus_mean_distances_vtk))
            write_vtk(feature_to_fundus_mean_distances_vtk, points,
                      [], [], sulcus_faces, [distances],
                      ['feature-to-fundus_distances'], 'float')

        #---------------------------------------------------------------------
        # Construct a fundus-to-feature distance matrix and VTK file:
        #---------------------------------------------------------------------
        # Construct a distance matrix:
        print('Construct a fundus-to-feature distance matrix...')
        sourceIDs = label_boundary_fundi
        targetIDs = features
        distances, distance_matrix = source_to_target_distances(
            sourceIDs, targetIDs, points, segmentIDs, excludeIDs)

        # Compute mean distances for each feature:
        nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists)
        for ifeature in range(nfeatures):
            fundus_distances = [x for x in distance_matrix[:, ifeature]
                                if x != -1]
            fundus_to_feature_mean_distances[ifeature] = \
                np.mean(fundus_distances)
            fundus_to_feature_sd_distances[ifeature] = \
                np.std(fundus_distances)

        if verbose:
            print('Fundus-to-feature mean distances:')
            print(fundus_to_feature_mean_distances)
            print('Fundus-to-feature standard deviations of distances:')
            print(fundus_to_feature_sd_distances)

        # Write resulting feature-label boundary distances to VTK file:
        if output_vtk_name:
            fundus_to_feature_mean_distances_vtk = os.path.join(os.getcwd(),
                output_vtk_name + '_fundus_to_feature_mean_distances.vtk')
            print('Write fundus-to-feature distances to {0}...'.
                  format(fundus_to_feature_mean_distances_vtk))
            write_vtk(fundus_to_feature_mean_distances_vtk, points,
                      [], [], sulcus_faces, [distances],
                      ['fundus-to-feature_distances'], 'float')

    #-------------------------------------------------------------------------
    # Return outputs:
    #-------------------------------------------------------------------------
    return feature_to_fundus_mean_distances, feature_to_fundus_sd_distances,\
           feature_to_fundus_mean_distances_vtk,\
           fundus_to_feature_mean_distances, fundus_to_feature_sd_distances,\
           fundus_to_feature_mean_distances_vtk
コード例 #14
0
ファイル: io_table.py プロジェクト: ccraddock/mindboggle
def write_vertex_measures(output_table, labels_or_file, sulci=[], fundi=[],
        affine_transform_file='', transform_format='itk',
        area_file='', mean_curvature_file='', travel_depth_file='',
        geodesic_depth_file='', freesurfer_convexity_file='',
        freesurfer_thickness_file='', delimiter=','):
    """
    Make a table of shape values per vertex.

    Note ::
        This function is tailored for Mindboggle outputs.

    Parameters
    ----------
    output_table : string
        output file (full path)
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    sulci :  list of integers
        indices to sulci, one per vertex, with -1 indicating no sulcus
    fundi :  list of integers
        indices to fundi, one per vertex, with -1 indicating no fundus
    affine_transform_file : string
        affine transform file to standard space
    transform_format : string
        format for transform file
        Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format
    area_file :  string
        name of VTK file with surface area scalar values
    mean_curvature_file :  string
        name of VTK file with mean curvature scalar values
    travel_depth_file :  string
        name of VTK file with travel depth scalar values
    geodesic_depth_file :  string
        name of VTK file with geodesic depth scalar values
    freesurfer_convexity_file :  string
        name of VTK file with FreeSurfer convexity scalar values
    freesurfer_thickness_file :  string
        name of VTK file with FreeSurfer thickness scalar values
    delimiter : string
        delimiter between columns, such as ','

    Returns
    -------
    output_table : table file name for vertex shape values

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_scalars
    >>> from mindboggle.utils.io_table import write_vertex_measures
    >>> #
    >>> output_table = ''#vertex_shapes.csv'
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> labels_or_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> sulci_file = os.path.join(path, 'arno', 'features', 'sulci.vtk')
    >>> fundi_file = os.path.join(path, 'arno', 'features', 'fundi.vtk')
    >>> sulci, name = read_scalars(sulci_file)
    >>> fundi, name = read_scalars(fundi_file)
    >>> affine_transform_file = os.path.join(path, 'arno', 'mri',
    >>>     't1weighted_brain.MNI152Affine.txt')
    >>> transform_format = 'itk'
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> mean_curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> travel_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> geodesic_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.geodesic_depth.vtk')
    >>> freesurfer_convexity_file = ''
    >>> freesurfer_thickness_file = ''
    >>> delimiter = ','
    >>> #
    >>> write_vertex_measures(output_table, labels_or_file, sulci, fundi,
    >>>     affine_transform_file, transform_format, area_file,
    >>>     mean_curvature_file, travel_depth_file, geodesic_depth_file,
    >>>     freesurfer_convexity_file, freesurfer_thickness_file, delimiter)

    """
    import os
    import numpy as np
    from mindboggle.utils.io_vtk import read_scalars, read_vtk, \
        apply_affine_transform
    from mindboggle.utils.io_table import write_columns

    # Make sure inputs are lists:
    if isinstance(labels_or_file, np.ndarray):
        labels = [int(x) for x in labels_or_file]
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    elif isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file)
    if isinstance(sulci, np.ndarray):
        sulci = [int(x) for x in sulci]
    if isinstance(fundi, np.ndarray):
        fundi = [int(x) for x in fundi]

    if not labels and not sulci and not fundi:
        import sys
        sys.exit('No feature data to tabulate in write_vertex_measures().')

    # Feature names and corresponding feature lists:
    feature_names = ['Label', 'Sulcus', 'Fundus']
    feature_lists = [labels, sulci, fundi]

    # Shape names corresponding to shape files below:
    shape_names = ['area', 'mean curvature', 'travel depth', 'geodesic depth',
                   'FreeSurfer convexity', 'FreeSurfer thickness']

    # Load shape files as a list of numpy arrays of per-vertex shape values:
    shape_files = [area_file, mean_curvature_file, travel_depth_file,
                   geodesic_depth_file, freesurfer_convexity_file,
                   freesurfer_thickness_file]

    # Append columns of per-vertex scalar values:
    columns = []
    column_names = []
    for ifeature, values in enumerate(feature_lists):
        if values:
            columns.append(values)
            column_names.append(feature_names[ifeature])

    first_pass = True
    for ishape, shape_file in enumerate(shape_files):
        if os.path.exists(shape_file):
            if first_pass:
                u1, u2, u3, points, u4, scalars, u5, u6 = read_vtk(shape_file)
                columns.append(points)
                column_names.append('coordinates')
                first_pass = False
                if affine_transform_file and transform_format:
                    affine_points, \
                        foo1 = apply_affine_transform(affine_transform_file,
                                    points, transform_format,
                                    vtk_file_stem='')
                    columns.append(affine_points)
                    column_names.append('coordinates in standard space')
            else:
                scalars, name = read_scalars(shape_file)
            if len(scalars):
                columns.append(scalars)
                column_names.append(shape_names[ishape])

    # Prepend with column of indices and write table
    if not output_table:
        output_table = os.path.join(os.getcwd(), 'vertices.csv')
    write_columns(range(len(columns[0])), 'Index', delimiter, quote=True,
                  input_table='', output_table=output_table)
    write_columns(columns, column_names, delimiter, quote=True,
                  input_table=output_table, output_table=output_table)

    if not os.path.exists(output_table):
        raise(IOError(output_table + " not found"))

    return output_table
コード例 #15
0
ファイル: io_table.py プロジェクト: ccraddock/mindboggle
def write_shape_stats(labels_or_file=[], sulci=[], fundi=[],
        affine_transform_file='', transform_format='itk',
        area_file='', normalize_by_area=True, mean_curvature_file='',
        travel_depth_file='', geodesic_depth_file='',
        freesurfer_convexity_file='', freesurfer_thickness_file='',
        labels_spectra=[], labels_spectra_IDs=[],
        sulci_spectra=[], sulci_spectra_IDs=[],
        labels_zernike=[], labels_zernike_IDs=[],
        sulci_zernike=[], sulci_zernike_IDs=[],
        exclude_labels=[-1], delimiter=','):
    """
    Make tables of shape statistics per label, sulcus, and/or fundus.

    Note ::
        This function is tailored for Mindboggle outputs.

    Parameters
    ----------
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    sulci :  list of integers
        indices to sulci, one per vertex, with -1 indicating no sulcus
    fundi :  list of integers
        indices to fundi, one per vertex, with -1 indicating no fundus
    affine_transform_file : string
        affine transform file to standard space
    transform_format : string
        format for transform file
        Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format
    area_file :  string
        name of VTK file with surface area scalar values
    normalize_by_area : Boolean
        normalize all shape measures by area of label/feature?
    mean_curvature_file :  string
        name of VTK file with mean curvature scalar values
    travel_depth_file :  string
        name of VTK file with travel depth scalar values
    geodesic_depth_file :  string
        name of VTK file with geodesic depth scalar values
    freesurfer_convexity_file :  string
        name of VTK file with FreeSurfer convexity scalar values
    freesurfer_thickness_file :  string
        name of VTK file with FreeSurfer thickness scalar values
    labels_zernike : list of lists of floats
        Laplace-Beltrami spectra for labeled regions
    labels_spectra_IDs : list of integers
        unique ID numbers (labels) for labels_spectra
    sulci_spectra : list of lists of floats
        Laplace-Beltrami spectra for sulci
    sulci_spectra_IDs : list of integers
        unique ID numbers (labels) for sulci_spectra
    labels_zernike : list of lists of floats
        Zernike moments for labeled regions
    labels_zernike_IDs : list of integers
        unique ID numbers (labels) for labels_zernike
    sulci_zernike : list of lists of floats
        Zernike moments for sulci
    sulci_zernike_IDs : list of integers
        unique ID numbers (labels) for sulci_zernike
    exclude_labels : list of lists of integers
        indices to be excluded (in addition to -1)
    delimiter : string
        delimiter between columns, such as ','

    Returns
    -------
    label_table :  string
        output table filename for label shapes
    sulcus_table :  string
        output table filename for sulcus shapes
    fundus_table :  string
        output table filename for fundus shapes

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_scalars
    >>> from mindboggle.utils.io_table import write_shape_stats
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> labels_or_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> sulci_file = os.path.join(path, 'arno', 'features', 'sulci.vtk')
    >>> fundi_file = os.path.join(path, 'arno', 'features', 'fundi.vtk')
    >>> sulci, name = read_scalars(sulci_file)
    >>> fundi, name = read_scalars(fundi_file)
    >>> affine_transform_file = os.path.join(path, 'arno', 'mri', 't1weighted_brain.MNI152Affine.txt')
    >>> #transform_format = 'mat'
    >>> transform_format = 'itk'
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> normalize_by_area = True
    >>> mean_curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> travel_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> geodesic_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.geodesic_depth.vtk')
    >>> freesurfer_convexity_file = ''
    >>> freesurfer_thickness_file = ''
    >>> delimiter = ','
    >>> #
    >>> labels, name = read_scalars(labels_or_file)
    >>> labels_spectra = []
    >>> labels_spectra_IDs = []
    >>> sulci_spectra = []
    >>> sulci_spectra_IDs = []
    >>> labels_zernike = []
    >>> labels_zernike_IDs = []
    >>> sulci_zernike = []
    >>> sulci_zernike_IDs = []
    >>> exclude_labels = [-1]
    >>> #
    >>> write_shape_stats(labels_or_file, sulci, fundi,
    >>>     affine_transform_file, transform_format, area_file, normalize_by_area,
    >>>     mean_curvature_file, travel_depth_file, geodesic_depth_file,
    >>>     freesurfer_convexity_file, freesurfer_thickness_file,
    >>>     labels_spectra, labels_spectra_IDs,
    >>>     sulci_spectra, sulci_spectra_IDs,
    >>>     labels_zernike, labels_zernike_IDs,
    >>>     sulci_zernike, sulci_zernike_IDs,
    >>>     exclude_labels, delimiter)

    """
    import os
    import numpy as np

    from mindboggle.utils.compute import means_per_label, stats_per_label, \
        sum_per_label
    from mindboggle.utils.io_vtk import read_scalars, read_vtk, \
        apply_affine_transform
    from mindboggle.utils.io_table import write_columns
    from mindboggle.LABELS import DKTprotocol

    dkt = DKTprotocol()

    # Make sure inputs are lists:
    if isinstance(labels_or_file, np.ndarray):
        labels = [int(x) for x in labels_or_file]
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    elif isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file)
    if isinstance(sulci, np.ndarray):
        sulci = [int(x) for x in sulci]
    if isinstance(fundi, np.ndarray):
        fundi = [int(x) for x in fundi]

    if not labels and not sulci and not fundi:
        import sys
        sys.exit('No feature data to tabulate in write_shape_stats().')

    #-------------------------------------------------------------------------
    # Feature lists, shape names, and shape files:
    #-------------------------------------------------------------------------
    # Feature lists:
    feature_lists = [labels, sulci, fundi]
    feature_names = ['Label', 'Sulcus', 'Fundus']
    spectra_lists = [labels_spectra, sulci_spectra]
    spectra_ID_lists = [labels_spectra_IDs, sulci_spectra_IDs]
    zernike_lists = [labels_zernike, sulci_zernike]
    zernike_ID_lists = [labels_zernike_IDs, sulci_zernike_IDs]
    table_names = ['label_shapes.csv', 'sulcus_shapes.csv',
                   'fundus_shapes.csv']

    # Shape names corresponding to shape files below:
    shape_names = ['area', 'mean curvature', 'travel depth', 'geodesic depth',
                   'FreeSurfer convexity', 'FreeSurfer thickness']

    # Load shape files as a list of numpy arrays of per-vertex shape values:
    shape_files = [area_file, mean_curvature_file, travel_depth_file,
                   geodesic_depth_file, freesurfer_convexity_file,
                   freesurfer_thickness_file]
    shape_arrays = []
    first_pass = True
    area_array = []

    for ishape, shape_file in enumerate(shape_files):
        if os.path.exists(shape_file):
            if first_pass:
                faces, lines, indices, points, npoints, scalars_array, name, \
                    input_vtk = read_vtk(shape_file, True, True)
                points = np.array(points)
                first_pass = False
                if affine_transform_file and transform_format:
                    affine_points, \
                        foo1 = apply_affine_transform(affine_transform_file,
                                    points, transform_format,
                                    vtk_file_stem='')
                    affine_points = np.array(affine_points)
            else:
                scalars_array, name = read_scalars(shape_file, True, True)
            if scalars_array.size:
                shape_arrays.append(scalars_array)

                # Store area array:
                if ishape == 0:
                    area_array = scalars_array.copy()

    if normalize_by_area:
        use_area = area_array
    else:
        use_area = []

    # Initialize table file names:
    label_table = ''
    sulcus_table = ''
    fundus_table = ''

    # Loop through features / tables:
    for itable, feature_list in enumerate(feature_lists):
        column_names = []

        #---------------------------------------------------------------------
        # For each feature, construct a table of average shape values:
        #---------------------------------------------------------------------
        if feature_list:
            feature_name = feature_names[itable]
            columns = []

            #-----------------------------------------------------------------
            # Loop through shape measures:
            #-----------------------------------------------------------------
            column_names.extend(column_names[:])
            for ishape, shape_array in enumerate(shape_arrays):
                shape = shape_names[ishape]
                print('  Compute statistics on {0} {1}...'.
                      format(feature_name, shape))
                #-------------------------------------------------------------
                # Append feature areas to columns:
                #-------------------------------------------------------------
                if ishape == 0 and np.size(area_array):
                    sums, label_list = sum_per_label(shape_array,
                                            feature_list, exclude_labels)
                    column_names.append(shape)
                    columns.append(sums)
                #-------------------------------------------------------------
                # Append feature shape statistics to columns:
                #-------------------------------------------------------------
                else:
                    medians, mads, means, sdevs, skews, kurts, \
                    lower_quarts, upper_quarts, \
                    label_list = stats_per_label(shape_array,
                        feature_list, exclude_labels, area_array, precision=1)

                    column_names.append(shape + ': median')
                    column_names.append(shape + ': MAD')
                    column_names.append(shape + ': mean')
                    column_names.append(shape + ': SD')
                    column_names.append(shape + ': skew')
                    column_names.append(shape + ': kurtosis')
                    column_names.append(shape + ': 25%')
                    column_names.append(shape + ': 75%')
                    columns.append(medians)
                    columns.append(mads)
                    columns.append(means)
                    columns.append(sdevs)
                    columns.append(skews)
                    columns.append(kurts)
                    columns.append(lower_quarts)
                    columns.append(upper_quarts)

            #-----------------------------------------------------------------
            # Mean positions in the original space:
            #-----------------------------------------------------------------
            # Compute mean position per feature:
            positions, sdevs, label_list, foo = means_per_label(points,
                feature_list, exclude_labels, use_area)

            # Append mean position per feature to columns:
            column_names.append('mean position')
            columns.append(positions)

            #-----------------------------------------------------------------
            # Mean positions in standard space:
            #-----------------------------------------------------------------
            if affine_transform_file and transform_format:
                # Compute standard space mean position per feature:
                standard_positions, sdevs, label_list, \
                foo = means_per_label(affine_points,
                    feature_list, exclude_labels, use_area)

                # Append standard space mean position per feature to columns:
                column_names.append('mean position in standard space')
                columns.append(standard_positions)

            #-----------------------------------------------------------------
            # Label names:
            #-----------------------------------------------------------------
            if itable == 0:
                label_numbers = dkt.label_numbers
                label_names = dkt.label_names
                name_list = []
                for label in label_list:
                    name_list.append(label_names[label_numbers.index(label)])

            #-----------------------------------------------------------------
            # Laplace-Beltrami spectra:
            #-----------------------------------------------------------------
            if itable in [0, 1]:
                spectra = spectra_lists[itable]
                if spectra:
                    spectra_IDs = spectra_ID_lists[itable]

                    # Order spectra into a list:
                    spectrum_list = []
                    for label in label_list:
                        if label in spectra_IDs:
                            spectrum = spectra[spectra_IDs.index(label)]
                            spectrum_list.append(spectrum)
                        else:
                            spectrum_list.append('')

                    # Append spectral shape name and values to relevant columns:
                    columns.append(spectrum_list)
                    column_names.append('Laplace-Beltrami spectra')

            #-----------------------------------------------------------------
            # Zernike moments:
            #-----------------------------------------------------------------
            if itable in [0, 1]:
                zernike = zernike_lists[itable]
                if zernike:
                    zernike_IDs = zernike_ID_lists[itable]

                    # Order zernike into a list:
                    spectrum_list = []
                    for label in label_list:
                        if label in zernike_IDs:
                            spectrum = zernike[zernike_IDs.index(label)]
                            spectrum_list.append(spectrum)
                        else:
                            spectrum_list.append('')

                    # Append Zernike shape name and values to relevant columns:
                    columns.append(spectrum_list)
                    column_names.append('Zernike moments')

            #-----------------------------------------------------------------
            # Write labels/IDs and values to table:
            #-----------------------------------------------------------------
            # Write labels/IDs to table:
            output_table = os.path.join(os.getcwd(), table_names[itable])
            output_table = write_columns(label_list, feature_name, delimiter,
                                         quote=True, input_table='',
                                         output_table=output_table)
            if itable == 0:
                write_columns(name_list, 'Label name', delimiter,
                              quote=True, input_table=output_table,
                              output_table=output_table)

            # Append columns of shape values to table:
            if columns:
                write_columns(columns, column_names, delimiter,
                              quote=True, input_table=output_table,
                              output_table=output_table)

            if not os.path.exists(output_table):
                raise(IOError(output_table + " not found"))

            #-----------------------------------------------------------------
            # Return correct table file name:
            #-----------------------------------------------------------------
            if itable == 0:
                label_table = output_table
            elif itable == 1:
                sulcus_table = output_table
            elif itable == 2:
                fundus_table = output_table

    return label_table, sulcus_table, fundus_table
コード例 #16
0
def zernike_moments_per_label(vtk_file, order=20, exclude_labels=[-1], area_file="", largest_segment=True):
    """
    Compute the Zernike moments per labeled region in a file.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file containing index scalars (labels)
    order : integer
        number of moments to compute
    exclude_labels : list of integers
        labels to be excluded
    area_file :  string
        name of VTK file with surface area scalar values
    largest_segment :  Boolean
        compute moments only for largest segment with a given label?

    Returns
    -------
    descriptors_lists : list of lists of floats
        Zernike descriptors per label
    label_list : list of integers
        list of unique labels for which moments are computed

    Examples
    --------
    >>> # Moments for label 22 (postcentral) in Twins-2-1:
    >>> import os
    >>> from mindboggle.shapes.zernike.zernike import zernike_moments_per_label
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> order = 3
    >>> exclude_labels = [0]
    >>> largest_segment = True
    >>> zernike_moments_per_label(vtk_file, order, exclude_labels, area_file,
    >>>                           largest_segment)
    ([[7562.751480397972,
       143262239.5171249,
       1107670.7893994227,
       28487908892.820065,
       112922387.17238183,
       10250734140.30357]],
     [22])
    >>> order = 10
    >>> zernike_moments_per_label(vtk_file, order, exclude_labels, area_file,
    >>>                           largest_segment)
    ([[7562.751480397972,
       143262239.5171249,
       3308874674202.293,
       8.485211965384958e+16,
       2.3330162566631947e+21,
       6.743205749389719e+25,
       1107670.7893994227,
       28487908892.820065,
       750581458956752.5,
       2.08268406178679e+19,
       6.041241636463012e+23,
       112922387.17238183,
       3771094165018.0186,
       1.1436534456761454e+17,
       3.475222918728238e+21,
       1.0745294340540639e+26,
       10250734140.30357,
       429344737184365.75,
       1.4944306620454633e+19,
       4.98685998888202e+23,
       889109957039.494,
       4.5419095219797416e+16,
       1.798809048329269e+21,
       6.5720455808877056e+25,
       76646448525991.2,
       4.648745223427816e+18,
       2.067942924550439e+23,
       6705825311489244.0,
       4.701251187236028e+20,
       2.3147665646780795e+25,
       5.969381989053711e+17,
       4.728007168783364e+22,
       5.360784767352255e+19,
       4.7214146910478664e+24,
       4.813773883638603e+21,
       4.3049570618844856e+23]],
     [22])

    """
    from mindboggle.utils.io_vtk import read_vtk, read_scalars
    from mindboggle.utils.mesh import remove_faces
    from mindboggle.shapes.zernike.zernike import zernike_moments, zernike_moments_of_largest

    # Read VTK surface mesh file:
    faces, u1, u2, points, u4, labels, u5, u6 = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    # Loop through labeled regions:
    ulabels = []
    [ulabels.append(int(x)) for x in labels if x not in ulabels if x not in exclude_labels]
    label_list = []
    descriptors_lists = []
    for label in ulabels:
        # if label==22:
        #  print("DEBUG: COMPUTE FOR ONLY ONE LABEL")

        # Determine the indices per label:
        label_indices = [i for i, x in enumerate(labels) if x == label]
        print("{0} vertices for label {1}".format(len(label_indices), label))

        # Remove background faces:
        select_faces = remove_faces(faces, label_indices)

        # Compute Zernike moments for the label:
        if largest_segment:
            exclude_labels_inner = [-1]
            descriptors = zernike_moments_of_largest(points, select_faces, order, exclude_labels_inner, areas)
        else:
            descriptors = zernike_moments(points, select_faces, order)

        # Append to a list of lists of spectra:
        descriptors_lists.append(descriptors)
        label_list.append(label)

    return descriptors_lists, label_list
コード例 #17
0
ファイル: laplace_beltrami.py プロジェクト: jsalva/mindboggle
def spectrum_per_label(vtk_file, spectrum_size=10, exclude_labels=[-1],
                       normalization='area', area_file='',
                       largest_segment=True):
    """
    Compute Laplace-Beltrami spectrum per labeled region in a file.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file containing index scalars (labels)
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    area_file :  string
        name of VTK file with surface area scalar values
    largest_segment :  Boolean
        compute spectrum only for largest segment with a given label?

    Returns
    -------
    spectrum_lists : list of lists
        first eigenvalues for each label's Laplace-Beltrami spectrum
    label_list : list of integers
        list of unique labels for which spectra are obtained

    Examples
    --------
    >>> # Uncomment "if label==22:" below to run example:
    >>> # Spectrum for Twins-2-1 left postcentral (22) pial surface:
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk')
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> spectrum_size = 6
    >>> exclude_labels = [0]  #[-1]
    >>> largest_segment = True
    >>> spectrum_per_label(vtk_file, spectrum_size, exclude_labels, None,
    >>>                    area_file, largest_segment)
    ([[6.3469513010430304e-18,
       0.0005178862383467463,
       0.0017434911095630772,
       0.003667561767487686,
       0.005429017880363784,
       0.006309346984678924]],
     [22])

    """
    from mindboggle.utils.io_vtk import read_vtk, read_scalars
    from mindboggle.utils.mesh import remove_faces, reindex_faces_points
    from mindboggle.shapes.laplace_beltrami import fem_laplacian,\
        spectrum_of_largest

    # Read VTK surface mesh file:
    faces, u1, u2, points, u4, labels, u5, u6 = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    # Loop through labeled regions:
    ulabels = []
    [ulabels.append(int(x)) for x in labels if x not in ulabels
     if x not in exclude_labels]
    label_list = []
    spectrum_lists = []
    for label in ulabels:
      #if label == 22:
      #  print("DEBUG: COMPUTE FOR ONLY ONE LABEL")

        # Determine the indices per label:
        Ilabel = [i for i,x in enumerate(labels) if x == label]
        print('{0} vertices for label {1}'.format(len(Ilabel), label))

        # Remove background faces:
        pick_faces = remove_faces(faces, Ilabel)
        pick_faces, pick_points, o1 = reindex_faces_points(pick_faces, points)

        # Compute Laplace-Beltrami spectrum for the label:
        if largest_segment:
            exclude_labels_inner = [-1]
            spectrum = spectrum_of_largest(pick_points, pick_faces,
                                           spectrum_size,
                                           exclude_labels_inner,
                                           normalization, areas)
        else:
            spectrum = fem_laplacian(pick_points, pick_faces,
                                     spectrum_size, normalization)

        # Append to a list of lists of spectra:
        spectrum_lists.append(spectrum)
        label_list.append(label)

    return spectrum_lists, label_list
コード例 #18
0
def concatenate_sulcus_scalars(scalar_files, fold_files, label_files):
    """
    Prepare data for estimating scalar distributions along and outside fundi.

    Extract (e.g., depth, curvature) scalar values in folds, along sulcus
    label boundaries as well as outside the sulcus label boundaries.
    Concatenate these scalar values across multiple files.

    Parameters
    ----------
    scalar_files : list of strings
        names of surface mesh VTK files with scalar values to concatenate
    fold_files : list of strings (corr. to each list in scalar_files)
        VTK files with fold numbers as scalars (-1 for non-fold vertices)
    label_files : list of strings (corr. to fold_files)
        VTK files with label numbers (-1 for unlabeled vertices)

    Returns
    -------
    border_scalars : list of floats
        concatenated scalar values within folds along sulcus label boundaries
    nonborder_scalars : list of floats
        concatenated scalar values within folds outside sulcus label boundaries

    Examples
    --------
    >>> # Concatenate (duplicate) depth scalars:
    >>> import os
    >>> from mindboggle.shapes.likelihood import concatenate_sulcus_scalars
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> depth_file = os.path.join(path, 'arno', 'shapes', 'depth_rescaled.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> labels_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> scalar_files = [depth_file, depth_file]
    >>> fold_files = [folds_file, folds_file]
    >>> label_files = [labels_file, labels_file]
    >>> #
    >>> S = concatenate_sulcus_scalars(scalar_files, fold_files, label_files)

    """
    import numpy as np

    from mindboggle.utils.io_vtk import read_scalars
    from mindboggle.utils.mesh import find_neighbors_from_file
    from mindboggle.labels.labels import extract_borders
    from mindboggle.labels.protocol import dkt_protocol

    protocol = 'DKT25'
    sulcus_names, sulcus_label_pair_lists, unique_sulcus_label_pairs, \
        label_names, label_numbers, cortex_names, cortex_numbers, \
        noncortex_names, noncortex_numbers = dkt_protocol(protocol)

    # Prepare (non-unique) list of sulcus label pairs:
    protocol_label_pairs = [x for lst in sulcus_label_pair_lists for x in lst]

    border_scalars = []
    nonborder_scalars = []

    # Loop through files with the scalar values:
    for ifile, scalar_file in enumerate(scalar_files):
        print(scalar_file)

        # Load scalars, folds, and labels:
        folds_file = fold_files[ifile]
        labels_file = label_files[ifile]
        scalars, name = read_scalars(scalar_file, True, True)
        if scalars.shape:
            folds, name = read_scalars(folds_file)
            labels, name = read_scalars(labels_file)
            indices_folds = [i for i,x in enumerate(folds) if x != -1]
            neighbor_lists = find_neighbors_from_file(labels_file)

            # Find all label border pairs within the folds:
            indices_label_pairs, label_pairs, unique_pairs = extract_borders(
                indices_folds, labels, neighbor_lists, ignore_values=[-1],
                return_label_pairs=True)
            indices_label_pairs = np.array(indices_label_pairs)

            # Find vertices with label pairs in the sulcus labeling protocol:
            Ipairs_in_protocol = [i for i,x in enumerate(label_pairs)
                                  if x in protocol_label_pairs]
            indices_label_pairs = indices_label_pairs[Ipairs_in_protocol]
            indices_outside_pairs = list(frozenset(indices_folds).difference(
                indices_label_pairs))

            # Store scalar values in folds along label border pairs:
            border_scalars.extend(scalars[indices_label_pairs].tolist())

            # Store scalar values in folds outside label border pairs:
            nonborder_scalars.extend(scalars[indices_outside_pairs].tolist())

    return border_scalars, nonborder_scalars
コード例 #19
0
def extract_fundi(folds,
                  curv_file,
                  depth_file,
                  min_separation=10,
                  erode_ratio=0.1,
                  erode_min_size=1,
                  save_file=False):
    """
    Extract fundi from folds.

    A fundus is a branching curve that runs along the deepest and most
    highly curved portions of a fold.

    Steps ::
        1. Find fundus endpoints (outer anchors) with find_outer_anchors().
        2. Include inner anchor points.
        3. Connect anchor points using connect_points_erosion();
           inner anchors are removed if they result in endpoints.

    Parameters
    ----------
    folds : numpy array or list of integers
        fold number for each vertex
    curv_file :  string
        surface mesh file in VTK format with mean curvature values
    depth_file :  string
        surface mesh file in VTK format with rescaled depth values
    likelihoods : list of integers
        fundus likelihood value for each vertex
    min_separation : integer
        minimum number of edges between inner/outer anchor points
    erode_ratio : float
        fraction of indices to test for removal at each iteration
        in connect_points_erosion()
    save_file : Boolean
        save output VTK file?

    Returns
    -------
    fundus_per_fold : list of integers
        fundus numbers for all vertices, labeled by fold
        (-1 for non-fundus vertices)
    n_fundi_in_folds :  integer
        number of fundi
    fundus_per_fold_file : string (if save_file)
        output VTK file with fundus numbers (-1 for non-fundus vertices)

    Examples
    --------
    >>> # Extract fundus from one or more folds:
    >>> single_fold = True
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_scalars
    >>> from mindboggle.features.fundi import extract_fundi
    >>> from mindboggle.utils.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> curv_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> depth_file = os.path.join(path, 'arno', 'shapes', 'travel_depth_rescaled.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file, True, True)
    >>> if single_fold:
    >>>     fold_number = 2 #11
    >>>     folds[folds != fold_number] = -1
    >>> min_separation = 10
    >>> erode_ratio = 0.10
    >>> erode_min_size = 10
    >>> save_file = True
    >>> o1, o2, fundus_per_fold_file = extract_fundi(folds, curv_file,
    ...     depth_file, min_separation, erode_ratio, erode_min_size, save_file)
    >>> #
    >>> # View:
    >>> plot_surfaces(fundi_file)

    """

    # Extract a skeleton to connect endpoints in a fold:
    import os
    import numpy as np
    from time import time

    from mindboggle.utils.io_vtk import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.utils.compute import median_abs_dev
    from mindboggle.utils.paths import find_max_values
    from mindboggle.utils.mesh import find_neighbors_from_file, find_complete_faces
    from mindboggle.utils.paths import find_outer_anchors, connect_points_erosion

    if isinstance(folds, list):
        folds = np.array(folds)

    # Load values, inner anchor threshold, and neighbors:
    faces, u1, u2, points, npoints, curvs, u3, u4 = read_vtk(
        curv_file, True, True)
    depths, name = read_scalars(depth_file, True, True)
    values = curvs * depths
    values0 = [x for x in values if x > 0]
    thr = np.median(values0) + 2 * median_abs_dev(values0)
    neighbor_lists = find_neighbors_from_file(curv_file)

    #-------------------------------------------------------------------------
    # Loop through folds:
    #-------------------------------------------------------------------------
    t1 = time()
    skeletons = []
    unique_fold_IDs = [x for x in np.unique(folds) if x != -1]

    if len(unique_fold_IDs) == 1:
        print("Extract a fundus from 1 fold...")
    else:
        print("Extract a fundus from each of {0} folds...".format(
            len(unique_fold_IDs)))

    for fold_ID in unique_fold_IDs:
        indices_fold = [i for i, x in enumerate(folds) if x == fold_ID]
        if indices_fold:
            print('  Fold {0}:'.format(int(fold_ID)))

            #-----------------------------------------------------------------
            # Find outer anchor points on the boundary of the surface region,
            # to serve as fundus endpoints:
            #-----------------------------------------------------------------
            outer_anchors, tracks = find_outer_anchors(indices_fold,
                                                       neighbor_lists, values,
                                                       depths, min_separation)

            #-----------------------------------------------------------------
            # Find inner anchor points:
            #-----------------------------------------------------------------
            inner_anchors = find_max_values(points, values, min_separation,
                                            thr)

            #-----------------------------------------------------------------
            # Connect anchor points to create skeleton:
            #-----------------------------------------------------------------
            B = -1 * np.ones(npoints)
            B[indices_fold] = 1
            skeleton = connect_points_erosion(B,
                                              neighbor_lists,
                                              outer_anchors,
                                              inner_anchors,
                                              values,
                                              erode_ratio,
                                              erode_min_size,
                                              save_steps=[],
                                              save_vtk='')
            if skeleton:
                skeletons.extend(skeleton)

            #-----------------------------------------------------------------
            # Remove fundus vertices if they complete triangle faces:
            #-----------------------------------------------------------------
            Iremove = find_complete_faces(skeletons, faces)
            if Iremove:
                skeletons = list(frozenset(skeletons).difference(Iremove))

    indices = [x for x in skeletons if folds[x] != -1]
    fundus_per_fold = -1 * np.ones(npoints)
    fundus_per_fold[indices] = folds[indices]
    n_fundi_in_folds = len([x for x in np.unique(fundus_per_fold) if x != -1])
    if n_fundi_in_folds == 1:
        sdum = 'fold fundus'
    else:
        sdum = 'fold fundi'
    print('  ...Extracted {0} {1}; {2} total ({3:.2f} seconds)'.format(
        n_fundi_in_folds, sdum, n_fundi_in_folds,
        time() - t1))

    #-------------------------------------------------------------------------
    # Return fundi, number of fundi, and file name:
    #-------------------------------------------------------------------------
    if n_fundi_in_folds > 0:
        fundus_per_fold = [int(x) for x in fundus_per_fold]
        if save_file:
            fundus_per_fold_file = os.path.join(os.getcwd(),
                                                'fundus_per_fold.vtk')
            rewrite_scalars(curv_file, fundus_per_fold_file, fundus_per_fold,
                            'fundi', folds)
            if not os.path.exists(fundus_per_fold_file):
                raise (IOError(fundus_per_fold_file + " not found"))
        else:
            fundus_per_fold_file = None

    return fundus_per_fold, n_fundi_in_folds, fundus_per_fold_file
コード例 #20
0
def compute_likelihood(trained_file, depth_file, curvature_file, folds,
                       save_file=False):
    """
    Compute likelihoods based on input values, folds, and estimated parameters.

    Compute likelihood values for a given VTK surface mesh file, after training
    on distributions of depth and curvature values from multiple files.

    Parameters
    ----------
    trained_file : pickle compressed file
        contains the following dictionaries containing lists of floats
        (estimates of depth or curvature means, sigmas, and weights
         trained on fold vertices either on or off sulcus label borders)
        depth_border, curv_border, depth_nonborder, curv_nonborder
    depth_file : string
        VTK surface mesh file with depth values in [0,1] for all vertices
    curvature_file : string
        VTK surface mesh file with curvature values in [-1,1] for all vertices
    folds : list of integers
        fold number for all vertices (-1 for non-fold vertices)
    save_file : Boolean
        save output VTK file?

    Returns
    -------
    likelihoods : list of floats
        likelihood values for all vertices (0 for non-fold vertices)
    likelihoods_file : string (if save_file)
        name of output VTK file with likelihood scalars
        (-1 for non-fold vertices)

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars
    >>> from mindboggle.shapes.likelihood import compute_likelihood
    >>> from mindboggle.utils.plots import plot_vtk
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> trained_file = os.path.join(path, 'atlases', 'depth_curv_border_nonborder_parameters.pkl')
    >>> #depth_file = os.path.join(path, 'arno', 'shapes', 'travel_depth_rescaled.vtk')
    >>> depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file)
    >>> save_file = True
    >>> #
    >>> compute_likelihood(trained_file, depth_file, curvature_file, folds, save_file)
    >>> # View:
    >>> plot_vtk('likelihoods.vtk', folds_file)

    """
    import os
    import numpy as np
    from math import pi
    import cPickle as pickle

    from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars


    # Initialize variables:
    tiny = 0.000000001
    L = np.zeros(len(folds))
    probs_border = np.zeros(len(folds))
    probs_nonborder = np.zeros(len(folds))

    # Load estimated depth and curvature distribution parameters:
    depth_border, curv_border, depth_nonborder, curv_nonborder = pickle.load(
        open(trained_file, "r"))

    # Load depths, curvatures:
    depths, name = read_scalars(depth_file, True, True)
    curvatures, name = read_scalars(curvature_file, True, True)

    # Prep for below:
    n = 2
    twopiexp = (2*pi)**(n/2)
    border_sigmas = depth_border['sigmas'] * curv_border['sigmas']
    nonborder_sigmas = depth_nonborder['sigmas'] * curv_nonborder['sigmas']
    norm_border = 1 / (twopiexp * border_sigmas + tiny)
    norm_nonborder = 1 / (twopiexp * nonborder_sigmas + tiny)
    I = [i for i,x in enumerate(folds) if x != -1]

    N = depth_border['sigmas'].shape[0]
    for j in range(N):

        # Border:
        expB = depth_border['weights'][j] * \
            ((depths[I]-depth_border['means'][j])**2) / \
            depth_border['sigmas'][j]**2
        expB += curv_border['weights'][j] * \
            ((curvatures[I]-curv_border['means'][j])**2) / \
            curv_border['sigmas'][j]**2
        expB = -expB / 2
        probs_border[I] = probs_border[I] + norm_border[j] * np.exp(expB)

        # Non-border:
        expNB = depth_nonborder['weights'][j] * \
            ((depths[I]-depth_nonborder['means'][j])**2) / \
            depth_nonborder['sigmas'][j]**2
        expNB += curv_nonborder['weights'][j] * \
            ((curvatures[I]-curv_nonborder['means'][j])**2) / \
            curv_nonborder['sigmas'][j]**2
        expNB = -expNB / 2
        probs_nonborder[I] = probs_nonborder[I] + norm_nonborder[j] * np.exp(expNB)

    likelihoods = probs_border / (probs_nonborder + probs_border + tiny)
    likelihoods.tolist()

    #-------------------------------------------------------------------------
    # Return likelihoods and output file name
    #-------------------------------------------------------------------------
    if save_file:

        likelihoods_file = os.path.join(os.getcwd(), 'likelihoods.vtk')
        rewrite_scalars(depth_file, likelihoods_file, likelihoods,
                        'likelihoods', likelihoods)
        if not os.path.exists(likelihoods_file):
            raise(IOError(likelihoods_file + " not found"))

    else:
        likelihoods_file = None

    return likelihoods, likelihoods_file
コード例 #21
0
ファイル: fundi.py プロジェクト: ccraddock/mindboggle
def extract_fundi(folds, curv_file, depth_file, min_separation=10,
                  erode_ratio=0.1, erode_min_size=1, save_file=False):
    """
    Extract fundi from folds.

    A fundus is a branching curve that runs along the deepest and most
    highly curved portions of a fold.

    Steps ::
        1. Find fundus endpoints (outer anchors) with find_outer_anchors().
        2. Include inner anchor points.
        3. Connect anchor points using connect_points_erosion();
           inner anchors are removed if they result in endpoints.
        4. Optionally smooth with smooth_skeleton().

    Parameters
    ----------
    folds : numpy array or list of integers
        fold number for each vertex
    curv_file :  string
        surface mesh file in VTK format with mean curvature values
    depth_file :  string
        surface mesh file in VTK format with rescaled depth values
    likelihoods : list of integers
        fundus likelihood value for each vertex
    min_separation : integer
        minimum number of edges between inner/outer anchor points
    erode_ratio : float
        fraction of indices to test for removal at each iteration
        in connect_points_erosion()
    save_file : Boolean
        save output VTK file?

    Returns
    -------
    fundus_per_fold : list of integers
        fundus numbers for all vertices, labeled by fold
        (-1 for non-fundus vertices)
    n_fundi_in_folds :  integer
        number of fundi
    fundus_per_fold_file : string (if save_file)
        output VTK file with fundus numbers (-1 for non-fundus vertices)

    Examples
    --------
    >>> # Extract fundus from one or more folds:
    >>> single_fold = True
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_scalars
    >>> from mindboggle.features.fundi import extract_fundi
    >>> from mindboggle.utils.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> curv_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> depth_file = os.path.join(path, 'arno', 'shapes', 'travel_depth_rescaled.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file, True, True)
    >>> if single_fold:
    >>>     fold_number = 2 #11
    >>>     folds[folds != fold_number] = -1
    >>> min_separation = 10
    >>> erode_ratio = 0.10
    >>> erode_min_size = 10
    >>> save_file = True
    >>> o1, o2, fundus_per_fold_file = extract_fundi(folds, curv_file,
    ...     depth_file, min_separation, erode_ratio, erode_min_size, save_file)
    >>> #
    >>> # View:
    >>> plot_surfaces(fundi_file)

    """

    # Extract a skeleton to connect endpoints in a fold:
    import os
    import numpy as np
    from time import time

    from mindboggle.utils.io_vtk import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.utils.compute import median_abs_dev
    from mindboggle.utils.paths import find_max_values
    from mindboggle.utils.mesh import find_neighbors_from_file, find_complete_faces
    from mindboggle.utils.paths import find_outer_anchors, connect_points_erosion

    if isinstance(folds, list):
        folds = np.array(folds)

    # Load values, inner anchor threshold, and neighbors:
    faces, u1,u2, points, npoints, curvs, u3,u4 = read_vtk(curv_file, True,True)
    depths, name = read_scalars(depth_file, True, True)
    values = curvs * depths
    values0 = [x for x in values if x > 0]
    thr = np.median(values0) + 2 * median_abs_dev(values0)
    neighbor_lists = find_neighbors_from_file(curv_file)

    #-------------------------------------------------------------------------
    # Loop through folds:
    #-------------------------------------------------------------------------
    t1 = time()
    skeletons = []
    unique_fold_IDs = [x for x in np.unique(folds) if x != -1]

    if len(unique_fold_IDs) == 1:
        print("Extract a fundus from 1 fold...")
    else:
        print("Extract a fundus from each of {0} folds...".
              format(len(unique_fold_IDs)))

    for fold_ID in unique_fold_IDs:
        indices_fold = [i for i,x in enumerate(folds) if x == fold_ID]
        if indices_fold:
            print('  Fold {0}:'.format(int(fold_ID)))

            #-----------------------------------------------------------------
            # Find outer anchor points on the boundary of the surface region,
            # to serve as fundus endpoints:
            #-----------------------------------------------------------------
            outer_anchors, tracks = find_outer_anchors(indices_fold,
                neighbor_lists, values, depths, min_separation)

            #-----------------------------------------------------------------
            # Find inner anchor points:
            #-----------------------------------------------------------------
            inner_anchors = find_max_values(points, values, min_separation, thr)

            #-----------------------------------------------------------------
            # Connect anchor points to create skeleton:
            #-----------------------------------------------------------------
            B = -1 * np.ones(npoints)
            B[indices_fold] = 1
            skeleton = connect_points_erosion(B, neighbor_lists,
                outer_anchors, inner_anchors, values,
                erode_ratio, erode_min_size, save_steps=[], save_vtk='')
            if skeleton:
                skeletons.extend(skeleton)

            #-----------------------------------------------------------------
            # Remove fundus vertices if they complete triangle faces:
            #-----------------------------------------------------------------
            Iremove = find_complete_faces(skeletons, faces)
            if Iremove:
                skeletons = list(frozenset(skeletons).difference(Iremove))

    indices = [x for x in skeletons if folds[x] != -1]
    fundus_per_fold = -1 * np.ones(npoints)
    fundus_per_fold[indices] = folds[indices]
    n_fundi_in_folds = len([x for x in np.unique(fundus_per_fold)
                             if x != -1])
    if n_fundi_in_folds == 1:
        sdum = 'fold fundus'
    else:
        sdum = 'fold fundi'
    print('  ...Extracted {0} {1}; {2} total ({3:.2f} seconds)'.
          format(n_fundi_in_folds, sdum, n_fundi_in_folds, time() - t1))

    #-------------------------------------------------------------------------
    # Return fundi, number of fundi, and file name:
    #-------------------------------------------------------------------------
    if n_fundi_in_folds > 0:
        fundus_per_fold = [int(x) for x in fundus_per_fold]
        if save_file:
            fundus_per_fold_file = os.path.join(os.getcwd(),
                                                'fundus_per_fold.vtk')
            rewrite_scalars(curv_file, fundus_per_fold_file, fundus_per_fold,
                            'fundi', folds)
            if not os.path.exists(fundus_per_fold_file):
                raise(IOError(fundus_per_fold_file + " not found"))
        else:
            fundus_per_fold_file = None

    return fundus_per_fold,  n_fundi_in_folds, fundus_per_fold_file
コード例 #22
0
ファイル: mesh.py プロジェクト: jsalva/mindboggle
def rescale_by_label(input_vtk,
                     labels_or_file,
                     save_file=False,
                     output_filestring='rescaled_scalars'):
    """
    Rescale scalars for each label (such as depth values within each fold).

    Default is to normalize the scalar values of a VTK file by
    a percentile value in each vertex's surface mesh for each label.

    Parameters
    ----------
    input_vtk : string
        name of VTK file with a scalar value for each vertex
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    save_file : Boolean
        save output VTK file?
    output_filestring : string (if save_file)
        name of output file

    Returns
    -------
    rescaled_scalars : list of floats
        scalar values rescaled for each label, for label numbers not equal to -1
    rescaled_scalars_file : string (if save_file)
        name of output VTK file with rescaled scalar values for each label

    Examples
    --------
    >>> # Rescale depths by neighborhood within each label:
    >>> import os
    >>> from mindboggle.utils.mesh import rescale_by_label
    >>> from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars
    >>> from mindboggle.utils.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> input_vtk = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> labels_or_file = os.path.join(path, 'arno', 'features', 'subfolds.vtk')
    >>> save_file = True
    >>> output_filestring = 'rescaled_scalars'
    >>> #
    >>> rescaled_scalars, rescaled_scalars_file = rescale_by_label(input_vtk,
    >>>     labels_or_file, save_file, output_filestring)
    >>> #
    >>> # View rescaled scalar values per fold:
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file)
    >>> #
    >>> rewrite_scalars(rescaled_scalars_file, rescaled_scalars_file,
    >>>                 rescaled_scalars, 'rescaled_depths', folds)
    >>> plot_surfaces(rescaled_scalars_file)

    """
    import os
    import numpy as np
    from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars

    # Load scalars and vertex neighbor lists:
    scalars, name = read_scalars(input_vtk, True, True)
    print("  Rescaling scalar values within each label...")

    # Load label numbers:
    if isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file, True, True)
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    unique_labels = np.unique(labels)
    unique_labels = [x for x in unique_labels if x >= 0]

    # Loop through labels:
    for label in unique_labels:
        #print("  Rescaling scalar values within label {0} of {1} labels...".format(
        #    int(label), len(unique_labels)))
        indices = [i for i, x in enumerate(labels) if x == label]
        if indices:

            # Rescale by the maximum label scalar value:
            scalars[indices] = scalars[indices] / np.max(scalars[indices])

    rescaled_scalars = scalars.tolist()

    #-------------------------------------------------------------------------
    # Return rescaled scalars and file name
    #-------------------------------------------------------------------------
    if save_file:

        rescaled_scalars_file = os.path.join(os.getcwd(),
                                             output_filestring + '.vtk')
        rewrite_scalars(input_vtk, rescaled_scalars_file, rescaled_scalars,
                        'rescaled_scalars', labels)
        if not os.path.exists(rescaled_scalars_file):
            raise (IOError(rescaled_scalars_file + " not found"))

    else:
        rescaled_scalars_file = None

    return rescaled_scalars, rescaled_scalars_file
コード例 #23
0
def realign_boundaries_to_fundus_lines(
    surf_file, init_label_file, fundus_lines_file, out_label_file=None):
    """
    Fix label boundaries to fundus lines.

    Parameters
    ----------
    surf_file : file containing the surface geometry in vtk format
    init_label_file : file containing scalars that represent the
                      initial guess at labels
    fundus_lines_file : file containing scalars representing fundus  lines.
    out_label_file : if specified, the realigned labels will be writen to
                     this file

    Returns
    -------
    numpy array representing the realigned label for each surface vertex.
    """

#    import os
    import numpy as np
    from mindboggle.labels.labels import extract_borders
    import mindboggle.utils.graph as go
    from mindboggle.utils.io_vtk import read_vtk, read_scalars, write_vtk
#    import mindboggle.utils.kernels as kernels
    from mindboggle.utils.mesh import find_neighbors
#    from mindboggle.labels.protocol import dkt_protocol
#
#    protocol = 'DKT25'
#    sulcus_names, sulcus_label_pair_lists, unique_sulcus_label_pairs, \
#        label_names, label_numbers, cortex_names, cortex_numbers, \
#        noncortex_names, noncortex_numbers = dkt_protocol(protocol)

    ## read files
    faces, _, indices, points, num_points, _, _, _ = read_vtk(
        surf_file, return_first=True, return_array=True)
    indices = range(num_points)

    init_labels, _ = read_scalars(init_label_file,
                                  return_first=True, return_array=True)

    fundus_lines, _ = read_scalars(fundus_lines_file,
                                   return_first=True, return_array=True)

    ## setup seeds from initial label boundaries
    neighbor_lists = find_neighbors(faces, num_points)

    # extract all vertices that are on a boundary between labels
    boundary_indices, label_pairs, _ = extract_borders(
        indices, init_labels, neighbor_lists,
        return_label_pairs=True)

    # split boundary vertices into segments with common boundary pairs.
    boundary_segments = {}
    for boundary_index, label_pair in zip(boundary_indices, label_pairs):
        key = ((label_pair[0], label_pair[1]) if label_pair[0] < label_pair[1]
               else (label_pair[1], label_pair[0]))
        if key not in boundary_segments:
            boundary_segments[key] = []

        boundary_segments[key].append(boundary_index)

    boundary_matrix, boundary_matrix_keys = _build_boundary_matrix(
        boundary_segments, num_points)

    # build the affinity matrix
    affinity_matrix = go.weight_graph(
       np.array(points), indices, np.array(faces), sigma=10, add_to_graph=False)

    ## propagate boundaries to fundus line vertices
    learned_matrix = _propagate_labels(
       affinity_matrix, boundary_matrix, boundary_indices, 1000, 1)

    # assign labels to fundus line vertices based on highest probability
    new_boundaries = -1 * np.ones(init_labels.shape)
    fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5]

    # TODO: this currently only works for fundus lines that tile the
    # surface into connected components (which is fine when you want
    # to test this method on fundus lines generated from manual
    # labeling). However, to work on real data, fundus lines will
    # need to be connected together using shortest paths.

    # split surface into connected components
    connected_component_faces = _remove_boundary_faces(
        points, faces, fundus_line_indices)

    # label components based on most probable label assignment
    new_labels = _label_components(
        connected_component_faces, num_points, boundary_indices, learned_matrix,
        boundary_matrix_keys)

    # propagate new labels to fill holes
    label_matrix, label_map = _build_label_matrix(new_labels)
    new_learned_matrix = _propagate_labels(
        affinity_matrix, label_matrix,
        [i for i in range(num_points) if new_labels[i] >= 0], 100, 1)

    # assign most probable labels
    for idx in [i for i in range(num_points) if new_labels[i] == -1]:
        max_idx = np.argmax(new_learned_matrix[idx])
        new_labels[idx] = label_map[max_idx]

    # save
    if out_label_file is not None:
        write_vtk(out_label_file, points, faces=faces,
            scalars=new_labels.tolist())

    return new_labels
コード例 #24
0
ファイル: mesh.py プロジェクト: jsalva/mindboggle
def rescale_by_neighborhood(input_vtk,
                            indices=[],
                            nedges=10,
                            p=99,
                            set_max_to_1=True,
                            save_file=False,
                            output_filestring='rescaled_scalars',
                            background_value=-1):
    """
    Rescale the scalar values of a VTK file by a percentile value
    in each vertex's surface mesh neighborhood.

    Parameters
    ----------
    input_vtk : string
        name of VTK file with a scalar value for each vertex
    indices : list of integers (optional)
        indices of scalars to normalize
    nedges : integer
        number or edges from vertex, defining the size of its neighborhood
    p : float in range of [0,100]
        percentile used to normalize each scalar
    set_max_to_1 : Boolean
        set all rescaled values greater than 1 to 1.0?
    save_file : Boolean
        save output VTK file?
    output_filestring : string (if save_file)
        name of output file
    background_value : integer
        background value

    Returns
    -------
    rescaled_scalars : list of floats
        rescaled scalar values
    rescaled_scalars_file : string (if save_file)
        name of output VTK file with rescaled scalar values

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.mesh import rescale_by_neighborhood
    >>> from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars
    >>> from mindboggle.utils.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> input_vtk = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> indices = []
    >>> nedges = 10
    >>> p = 99
    >>> set_max_to_1 = True
    >>> save_file = True
    >>> output_filestring = 'rescaled_scalars'
    >>> background_value = -1
    >>> #
    >>> rescaled_scalars, rescaled_scalars_file = rescale_by_neighborhood(input_vtk,
    >>>     indices, nedges, p, set_max_to_1, save_file, output_filestring, background_value)
    >>> #
    >>> # View rescaled scalar values per fold:
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file)
    >>> #
    >>> rewrite_scalars(rescaled_scalars_file, rescaled_scalars_file,
    >>>                 rescaled_scalars, 'rescaled_depths', folds)
    >>> plot_surfaces(rescaled_scalars_file)

    """
    import os
    import numpy as np
    from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars
    from mindboggle.utils.mesh import find_neighbors_from_file, find_neighborhood

    # Load scalars and vertex neighbor lists:
    scalars, name = read_scalars(input_vtk, True, True)
    if not indices:
        indices = [i for i, x in enumerate(scalars) if x != background_value]
    print("  Rescaling {0} scalar values by neighborhood...".format(
        len(indices)))
    neighbor_lists = find_neighbors_from_file(input_vtk)

    # Loop through vertices:
    rescaled_scalars = scalars.copy()
    for index in indices:

        # Determine the scalars in the vertex's neighborhood:
        neighborhood = find_neighborhood(neighbor_lists, [index], nedges)

        # Compute a high neighborhood percentile to normalize vertex's value:
        normalization_factor = np.percentile(scalars[neighborhood], p)
        rescaled_scalar = scalars[index] / normalization_factor
        rescaled_scalars[index] = rescaled_scalar

    # Make any rescaled value greater than 1 equal to 1:
    if set_max_to_1:
        rescaled_scalars[[x for x in indices if rescaled_scalars[x] > 1.0]] = 1

    rescaled_scalars = rescaled_scalars.tolist()

    #-------------------------------------------------------------------------
    # Return rescaled scalars and file name
    #-------------------------------------------------------------------------
    if save_file:

        rescaled_scalars_file = os.path.join(os.getcwd(),
                                             output_filestring + '.vtk')
        rewrite_scalars(input_vtk, rescaled_scalars_file, rescaled_scalars,
                        'rescaled_scalars')
        if not os.path.exists(rescaled_scalars_file):
            raise (IOError(rescaled_scalars_file + " not found"))

    else:
        rescaled_scalars_file = None

    return rescaled_scalars, rescaled_scalars_file
コード例 #25
0
def evaluate_deep_features(features_file,
                           labels_file,
                           sulci_file='',
                           hemi='',
                           excludeIDs=[-1],
                           output_vtk_name='',
                           verbose=True):
    """
    Evaluate deep surface features by computing the minimum distance from each
    label border vertex to all of the feature vertices in the same sulcus,
    and from each feature vertex to all of the label border vertices in the
    same sulcus.  The label borders run along the deepest parts of sulci
    and correspond to fundi in the DKT cortical labeling protocol.

    Parameters
    ----------
    features_file : string
        VTK surface file with feature numbers for vertex scalars
    labels_file : string
        VTK surface file with label numbers for vertex scalars
    sulci_file : string
        VTK surface file with sulcus numbers for vertex scalars
    excludeIDs : list of integers
        feature/sulcus/label IDs to exclude (background set to -1)
    output_vtk_name : Boolean
        if not empty, output a VTK file beginning with output_vtk_name that
        contains a surface with mean distances as scalars
    verbose : Boolean
        print mean distances to standard output?

    Returns
    -------
    feature_to_border_mean_distances : numpy array [number of features x 1]
        mean distance from each feature to sulcus label border
    feature_to_border_sd_distances : numpy array [number of features x 1]
        standard deviations of feature-to-border distances
    feature_to_border_distances_vtk : string
        VTK surface file containing feature-to-border distances
    border_to_feature_mean_distances : numpy array [number of features x 1]
        mean distances from each sulcus label border to feature
    border_to_feature_sd_distances : numpy array [number of features x 1]
        standard deviations of border-to-feature distances
    border_to_feature_distances_vtk : string
        VTK surface file containing border-to-feature distances

    """
    import os
    import sys
    import numpy as np
    from mindboggle.utils.io_vtk import read_vtk, read_scalars, write_vtk
    from mindboggle.utils.mesh import find_neighbors, remove_faces
    from mindboggle.utils.segment import extract_borders
    from mindboggle.utils.compute import source_to_target_distances
    from mindboggle.LABELS import DKTprotocol

    dkt = DKTprotocol()
    #-------------------------------------------------------------------------
    # Load labels, features, and sulci:
    #-------------------------------------------------------------------------
    faces, lines, indices, points, npoints, labels, scalar_names, \
        input_vtk = read_vtk(labels_file, True, True)
    features, name = read_scalars(features_file, True, True)
    if sulci_file:
        sulci, name = read_scalars(sulci_file, True, True)
        # List of indices to sulcus vertices:
        sulcus_indices = [i for i, x in enumerate(sulci) if x != -1]
        segmentIDs = sulci
        sulcus_faces = remove_faces(faces, sulcus_indices)
    else:
        sulcus_indices = range(len(labels))
        segmentIDs = []
        sulcus_faces = faces

    #-------------------------------------------------------------------------
    # Prepare neighbors, label pairs, border IDs, and outputs:
    #-------------------------------------------------------------------------
    # Calculate neighbor lists for all points:
    print('Find neighbors for all vertices...')
    neighbor_lists = find_neighbors(faces, npoints)

    # Find label border points in any of the sulci:
    print('Find label border points in any of the sulci...')
    border_indices, border_label_tuples, unique_border_label_tuples = \
        extract_borders(sulcus_indices, labels, neighbor_lists,
                        ignore_values=[], return_label_pairs=True)
    if not len(border_indices):
        sys.exit('There are no label border points!')

    # Initialize an array of label border IDs
    # (label border vertices that define sulci in the labeling protocol):
    print('Build an array of label border IDs...')
    label_borders = -1 * np.ones(npoints)

    if hemi == 'lh':
        nsulcus_lists = len(dkt.left_sulcus_label_pair_lists)
    else:
        nsulcus_lists = len(dkt.right_sulcus_label_pair_lists)
    feature_to_border_mean_distances = -1 * np.ones(nsulcus_lists)
    feature_to_border_sd_distances = -1 * np.ones(nsulcus_lists)
    border_to_feature_mean_distances = -1 * np.ones(nsulcus_lists)
    border_to_feature_sd_distances = -1 * np.ones(nsulcus_lists)
    feature_to_border_distances_vtk = ''
    border_to_feature_distances_vtk = ''

    #-------------------------------------------------------------------------
    # Loop through sulci:
    #-------------------------------------------------------------------------
    # For each list of sorted label pairs (corresponding to a sulcus):
    for isulcus, label_pairs in enumerate(dkt.sulcus_label_pair_lists):

        # Keep the border points with label pair labels:
        label_pair_border_indices = [
            x for i, x in enumerate(border_indices)
            if np.unique(border_label_tuples[i]).tolist() in label_pairs
        ]

        # Store the points as sulcus IDs in the border IDs array:
        if label_pair_border_indices:
            label_borders[label_pair_border_indices] = isulcus

    if len(np.unique(label_borders)) > 1:

        #---------------------------------------------------------------------
        # Construct a feature-to-border distance matrix and VTK file:
        #---------------------------------------------------------------------
        # Construct a distance matrix:
        print('Construct a feature-to-border distance matrix...')
        sourceIDs = features
        targetIDs = label_borders
        distances, distance_matrix = source_to_target_distances(
            sourceIDs, targetIDs, points, segmentIDs, excludeIDs)

        # Compute mean distances for each feature:
        nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists)
        for ifeature in range(nfeatures):
            feature_distances = [
                x for x in distance_matrix[:, ifeature] if x != -1
            ]
            feature_to_border_mean_distances[ifeature] = \
                np.mean(feature_distances)
            feature_to_border_sd_distances[ifeature] = \
                np.std(feature_distances)

        if verbose:
            print('Feature-to-border mean distances:')
            print(feature_to_border_mean_distances)
            print('Feature-to-border standard deviations of distances:')
            print(feature_to_border_sd_distances)

        # Write resulting feature-label border distances to VTK file:
        if output_vtk_name:
            feature_to_border_distances_vtk = os.path.join(
                os.getcwd(),
                output_vtk_name + '_feature_to_border_mean_distances.vtk')
            print('Write feature-to-border distances to {0}...'.format(
                feature_to_border_distances_vtk))
            write_vtk(feature_to_border_distances_vtk, points, [], [],
                      sulcus_faces, [distances],
                      ['feature-to-border_distances'], 'float')

        #---------------------------------------------------------------------
        # Construct a border-to-feature distance matrix and VTK file:
        #---------------------------------------------------------------------
        # Construct a distance matrix:
        print('Construct a border-to-feature distance matrix...')
        sourceIDs = label_borders
        targetIDs = features
        distances, distance_matrix = source_to_target_distances(
            sourceIDs, targetIDs, points, segmentIDs, excludeIDs)

        # Compute mean distances for each feature:
        nfeatures = min(np.shape(distance_matrix)[1], nsulcus_lists)
        for ifeature in range(nfeatures):
            border_distances = [
                x for x in distance_matrix[:, ifeature] if x != -1
            ]
            border_to_feature_mean_distances[ifeature] = \
                np.mean(border_distances)
            border_to_feature_sd_distances[ifeature] = \
                np.std(border_distances)

        if verbose:
            print('border-to-feature mean distances:')
            print(border_to_feature_mean_distances)
            print('border-to-feature standard deviations of distances:')
            print(border_to_feature_sd_distances)

        # Write resulting feature-label border distances to VTK file:
        if output_vtk_name:
            border_to_feature_distances_vtk = os.path.join(
                os.getcwd(),
                output_vtk_name + '_border_to_feature_mean_distances.vtk')
            print('Write border-to-feature distances to {0}...'.format(
                border_to_feature_distances_vtk))
            write_vtk(border_to_feature_distances_vtk, points, [], [],
                      sulcus_faces, [distances],
                      ['border-to-feature_distances'], 'float')

    #-------------------------------------------------------------------------
    # Return outputs:
    #-------------------------------------------------------------------------
    return feature_to_border_mean_distances, feature_to_border_sd_distances,\
           feature_to_border_distances_vtk,\
           border_to_feature_mean_distances, border_to_feature_sd_distances,\
           border_to_feature_distances_vtk
コード例 #26
0
ファイル: mesh.py プロジェクト: ccraddock/mindboggle
def rescale_by_label(input_vtk, labels_or_file, save_file=False,
                     output_filestring='rescaled_scalars'):
    """
    Rescale scalars for each label (such as depth values within each fold).

    Default is to normalize the scalar values of a VTK file by
    a percentile value in each vertex's surface mesh for each label.

    Parameters
    ----------
    input_vtk : string
        name of VTK file with a scalar value for each vertex
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    save_file : Boolean
        save output VTK file?
    output_filestring : string (if save_file)
        name of output file

    Returns
    -------
    rescaled_scalars : list of floats
        scalar values rescaled for each label, for label numbers not equal to -1
    rescaled_scalars_file : string (if save_file)
        name of output VTK file with rescaled scalar values for each label

    Examples
    --------
    >>> # Rescale depths by neighborhood within each label:
    >>> import os
    >>> from mindboggle.utils.mesh import rescale_by_label
    >>> from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars
    >>> from mindboggle.utils.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> input_vtk = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> labels_or_file = os.path.join(path, 'arno', 'features', 'subfolds.vtk')
    >>> save_file = True
    >>> output_filestring = 'rescaled_scalars'
    >>> #
    >>> rescaled_scalars, rescaled_scalars_file = rescale_by_label(input_vtk,
    >>>     labels_or_file, save_file, output_filestring)
    >>> #
    >>> # View rescaled scalar values per fold:
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file)
    >>> #
    >>> rewrite_scalars(rescaled_scalars_file, rescaled_scalars_file,
    >>>                 rescaled_scalars, 'rescaled_depths', folds)
    >>> plot_surfaces(rescaled_scalars_file)

    """
    import os
    import numpy as np
    from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars

    # Load scalars and vertex neighbor lists:
    scalars, name = read_scalars(input_vtk, True, True)
    print("  Rescaling scalar values within each label...")

    # Load label numbers:
    if isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file, True, True)
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    unique_labels = np.unique(labels)
    unique_labels = [x for x in unique_labels if x >= 0]

    # Loop through labels:
    for label in unique_labels:
        #print("  Rescaling scalar values within label {0} of {1} labels...".format(
        #    int(label), len(unique_labels)))
        indices = [i for i,x in enumerate(labels) if x == label]
        if indices:

            # Rescale by the maximum label scalar value:
            scalars[indices] = scalars[indices] / np.max(scalars[indices])

    rescaled_scalars = scalars.tolist()

    #-------------------------------------------------------------------------
    # Return rescaled scalars and file name
    #-------------------------------------------------------------------------
    if save_file:

        rescaled_scalars_file = os.path.join(os.getcwd(), output_filestring + '.vtk')
        rewrite_scalars(input_vtk, rescaled_scalars_file,
                        rescaled_scalars, 'rescaled_scalars', labels)
        if not os.path.exists(rescaled_scalars_file):
            raise(IOError(rescaled_scalars_file + " not found"))

    else:
        rescaled_scalars_file = None

    return rescaled_scalars, rescaled_scalars_file
コード例 #27
0
def realign_boundaries_to_fundus_lines(
    surf_file, init_label_file, fundus_lines_file, thickness_file,
    out_label_file=None):
    """
    Fix label boundaries to fundus lines.

    Parameters
    ----------
    surf_file : file containing the surface geometry in vtk format
    init_label_file : file containing scalars that represent the
                      initial guess at labels
    fundus_lines_file : file containing scalars representing fundus lines.
    thickness_file: file containing cortical thickness scalar data
    (for masking out the medial wall only)
    out_label_file : if specified, the realigned labels will be writen to
                     this file

    Returns
    -------
    numpy array representing the realigned label for each surface vertex.
    """

    import numpy as np
    from mindboggle.utils.segment import extract_borders
    import mindboggle.utils.graph as go
    from mindboggle.utils.io_vtk import read_vtk, read_scalars, write_vtk
    from mindboggle.utils.mesh import find_neighbors
    import propagate_fundus_lines

    ## read files
    faces, _, indices, points, num_points, _, _, _ = read_vtk(
        surf_file, return_first=True, return_array=True)
    indices = range(num_points)

    init_labels, _ = read_scalars(init_label_file,
                                  return_first=True, return_array=True)

    fundus_lines, _ = read_scalars(fundus_lines_file,
                                   return_first=True, return_array=True)

    thickness, _ = read_scalars(thickness_file,
                             return_first=True, return_array=True)

    # remove labels from vertices with zero thickness (get around
    # DKT40 annotations having the label '3' for all the Corpus
    # Callosum vertices).
    cc_inds = [x for x in indices if thickness[x] < 0.001]
    init_labels[cc_inds] = 0

    ## setup seeds from initial label boundaries
    neighbor_lists = find_neighbors(faces, num_points)

    # extract all vertices that are on a boundary between labels
    boundary_indices, label_pairs, _ = extract_borders(
        indices, init_labels, neighbor_lists,
        return_label_pairs=True)

    # split boundary vertices into segments with common boundary pairs.
    boundary_segments = {}
    for boundary_index, label_pair in zip(boundary_indices, label_pairs):
        key = ((label_pair[0], label_pair[1]) if label_pair[0] < label_pair[1]
               else (label_pair[1], label_pair[0]))
        if key not in boundary_segments:
            boundary_segments[key] = []

        boundary_segments[key].append(boundary_index)

    boundary_matrix, boundary_matrix_keys = _build_boundary_matrix(
        boundary_segments, num_points)

    # build the affinity matrix
    affinity_matrix = go.weight_graph(
       np.array(points), indices, np.array(faces), sigma=10, add_to_graph=False)

    ## propagate boundaries to fundus line vertices
    learned_matrix = _propagate_labels(
       affinity_matrix, boundary_matrix, boundary_indices, 100, 1)

    # assign labels to fundus line vertices based on highest probability
    new_boundaries = -1 * np.ones(init_labels.shape)
    fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5]

    # tile the surface into connected components delimited by fundus lines
    closed_fundus_lines, _, _ = propagate_fundus_lines.propagate_fundus_lines(
        points, faces, fundus_line_indices, thickness)

    closed_fundus_line_indices = np.where(closed_fundus_lines > 0)[0]

    # split surface into connected components
    connected_component_faces = _remove_boundary_faces(
        points, faces, closed_fundus_line_indices)

    # label components based on most probable label assignment
    new_labels = _label_components(
        connected_component_faces, num_points, boundary_indices, learned_matrix,
        boundary_matrix_keys)

    # propagate new labels to fill holes
    label_matrix, label_map = _build_label_matrix(new_labels)
    new_learned_matrix = _propagate_labels(
        affinity_matrix, label_matrix,
        [i for i in range(num_points) if new_labels[i] >= 0], 100, 1)

    # assign most probable labels
    for idx in [i for i in range(num_points) if new_labels[i] == -1]:
        max_idx = np.argmax(new_learned_matrix[idx])
        new_labels[idx] = label_map[max_idx]

    # save
    if out_label_file is not None:
        write_vtk(out_label_file, points, faces=faces,
                  scalars=[int(x) for x in new_labels], scalar_type='int')

    return new_labels
コード例 #28
0
ファイル: mesh.py プロジェクト: ccraddock/mindboggle
def rescale_by_neighborhood(input_vtk, indices=[], nedges=10, p=99,
    set_max_to_1=True, save_file=False, output_filestring='rescaled_scalars',
    background_value=-1):
    """
    Rescale the scalar values of a VTK file by a percentile value
    in each vertex's surface mesh neighborhood.

    Parameters
    ----------
    input_vtk : string
        name of VTK file with a scalar value for each vertex
    indices : list of integers (optional)
        indices of scalars to normalize
    nedges : integer
        number or edges from vertex, defining the size of its neighborhood
    p : float in range of [0,100]
        percentile used to normalize each scalar
    set_max_to_1 : Boolean
        set all rescaled values greater than 1 to 1.0?
    save_file : Boolean
        save output VTK file?
    output_filestring : string (if save_file)
        name of output file
    background_value : integer
        background value

    Returns
    -------
    rescaled_scalars : list of floats
        rescaled scalar values
    rescaled_scalars_file : string (if save_file)
        name of output VTK file with rescaled scalar values

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.mesh import rescale_by_neighborhood
    >>> from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars
    >>> from mindboggle.utils.plots import plot_surfaces
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> input_vtk = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> indices = []
    >>> nedges = 10
    >>> p = 99
    >>> set_max_to_1 = True
    >>> save_file = True
    >>> output_filestring = 'rescaled_scalars'
    >>> background_value = -1
    >>> #
    >>> rescaled_scalars, rescaled_scalars_file = rescale_by_neighborhood(input_vtk,
    >>>     indices, nedges, p, set_max_to_1, save_file, output_filestring, background_value)
    >>> #
    >>> # View rescaled scalar values per fold:
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file)
    >>> #
    >>> rewrite_scalars(rescaled_scalars_file, rescaled_scalars_file,
    >>>                 rescaled_scalars, 'rescaled_depths', folds)
    >>> plot_surfaces(rescaled_scalars_file)

    """
    import os
    import numpy as np
    from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars
    from mindboggle.utils.mesh import find_neighbors_from_file, find_neighborhood

    # Load scalars and vertex neighbor lists:
    scalars, name = read_scalars(input_vtk, True, True)
    if not indices:
        indices = [i for i,x in enumerate(scalars) if x != background_value]
    print("  Rescaling {0} scalar values by neighborhood...".format(len(indices)))
    neighbor_lists = find_neighbors_from_file(input_vtk)

    # Loop through vertices:
    rescaled_scalars = scalars.copy()
    for index in indices:

        # Determine the scalars in the vertex's neighborhood:
        neighborhood = find_neighborhood(neighbor_lists, [index], nedges)

        # Compute a high neighborhood percentile to normalize vertex's value:
        normalization_factor = np.percentile(scalars[neighborhood], p)
        rescaled_scalar = scalars[index] / normalization_factor
        rescaled_scalars[index] = rescaled_scalar

    # Make any rescaled value greater than 1 equal to 1:
    if set_max_to_1:
        rescaled_scalars[[x for x in indices if rescaled_scalars[x] > 1.0]] = 1

    rescaled_scalars = rescaled_scalars.tolist()

    #-------------------------------------------------------------------------
    # Return rescaled scalars and file name
    #-------------------------------------------------------------------------
    if save_file:

        rescaled_scalars_file = os.path.join(os.getcwd(), output_filestring + '.vtk')
        rewrite_scalars(input_vtk, rescaled_scalars_file,
                        rescaled_scalars, 'rescaled_scalars')
        if not os.path.exists(rescaled_scalars_file):
            raise(IOError(rescaled_scalars_file + " not found"))

    else:
        rescaled_scalars_file = None

    return rescaled_scalars, rescaled_scalars_file
コード例 #29
0
ファイル: io_table.py プロジェクト: TankThinkLabs/mindboggle
def write_vertex_measures(
    table_file,
    labels_or_file,
    sulci=[],
    fundi=[],
    affine_transform_file="",
    transform_format="itk",
    area_file="",
    mean_curvature_file="",
    travel_depth_file="",
    geodesic_depth_file="",
    convexity_file="",
    thickness_file="",
    delimiter=",",
):
    """
    Make a table of shape values per vertex.

    Parameters
    ----------
    table_file : output filename (without path)
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    sulci :  list of integers
        indices to sulci, one per vertex, with -1 indicating no sulcus
    fundi :  list of integers
        indices to fundi, one per vertex, with -1 indicating no fundus
    affine_transform_file : string
        affine transform file to standard space
    transform_format : string
        format for transform file
        Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format
    area_file :  string
        name of VTK file with surface area scalar values
    mean_curvature_file :  string
        name of VTK file with mean curvature scalar values
    travel_depth_file :  string
        name of VTK file with travel depth scalar values
    geodesic_depth_file :  string
        name of VTK file with geodesic depth scalar values
    convexity_file :  string
        name of VTK file with convexity scalar values
    thickness_file :  string
        name of VTK file with thickness scalar values
    delimiter : string
        delimiter between columns, such as ','

    Returns
    -------
    shape_table : table file name for vertex shape values

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_scalars
    >>> from mindboggle.tables.all_shapes import write_vertex_measures
    >>> #
    >>> table_file = 'vertex_shapes.csv'
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> labels_or_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> sulci_file = os.path.join(path, 'arno', 'features', 'sulci.vtk')
    >>> fundi_file = os.path.join(path, 'arno', 'features', 'fundi.vtk')
    >>> sulci, name = read_scalars(sulci_file)
    >>> fundi, name = read_scalars(fundi_file)
    >>> affine_transform_file = os.path.join(path, 'arno', 'mri',
    >>>     't1weighted_brain.MNI152Affine.txt')
    >>> transform_format = 'itk'
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> mean_curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> travel_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> geodesic_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.geodesic_depth.vtk')
    >>> convexity_file = ''
    >>> thickness_file = ''
    >>> delimiter = ','
    >>> #
    >>> write_vertex_measures(table_file, labels_or_file, sulci, fundi,
    >>>     affine_transform_file, transform_format, area_file,
    >>>     mean_curvature_file, travel_depth_file, geodesic_depth_file,
    >>>     convexity_file, thickness_file, delimiter)

    """
    import os
    import numpy as np
    from mindboggle.utils.io_vtk import read_scalars, read_vtk, apply_affine_transform
    from mindboggle.utils.io_table import write_columns

    # Make sure inputs are lists:
    if isinstance(labels_or_file, np.ndarray):
        labels = labels_or_file.tolist()
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    elif isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file)
    if isinstance(sulci, np.ndarray):
        sulci = sulci.tolist()
    if isinstance(fundi, np.ndarray):
        fundi = fundi.tolist()

    # Feature names and corresponding feature lists:
    feature_names = ["label", "sulcus", "fundus"]
    feature_lists = [labels, sulci, fundi]

    # Shape names corresponding to shape files below:
    shape_names = ["area", "mean curvature", "travel depth", "geodesic depth", "convexity", "thickness"]

    # Load shape files as a list of numpy arrays of per-vertex shape values:
    shape_files = [
        area_file,
        mean_curvature_file,
        travel_depth_file,
        geodesic_depth_file,
        convexity_file,
        thickness_file,
    ]

    # Append columns of per-vertex scalar values:
    columns = []
    column_names = []
    for ifeature, values in enumerate(feature_lists):
        if values:
            columns.append(values)
            column_names.append(feature_names[ifeature])

    first_pass = True
    for ishape, shape_file in enumerate(shape_files):
        if os.path.exists(shape_file):
            if first_pass:
                u1, u2, u3, points, u4, scalars, u5, u6 = read_vtk(shape_file)
                columns.append(points)
                column_names.append("coordinates")
                first_pass = False
                if affine_transform_file:
                    affine_points, foo1 = apply_affine_transform(affine_transform_file, points, transform_format)
                    columns.append(affine_points)
                    column_names.append("coordinates in standard space")
            else:
                scalars, name = read_scalars(shape_file)
            if len(scalars):
                columns.append(scalars)
                column_names.append(shape_names[ishape])

    # Prepend with column of indices and write table
    shapes_table = os.path.join(os.getcwd(), table_file)
    write_columns(range(len(columns[0])), "index", shapes_table, delimiter)
    write_columns(columns, column_names, shapes_table, delimiter, quote=True, input_table=shapes_table)

    return shapes_table
コード例 #30
0
def spectrum_per_label(vtk_file, spectrum_size=10, exclude_labels=[-1],
                       normalization='area', area_file='',
                       largest_segment=True):
    """
    Compute Laplace-Beltrami spectrum per labeled region in a file.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file containing index scalars (labels)
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    area_file :  string
        name of VTK file with surface area scalar values
    largest_segment :  Boolean
        compute spectrum only for largest segment with a given label?

    Returns
    -------
    spectrum_lists : list of lists
        first eigenvalues for each label's Laplace-Beltrami spectrum
    label_list : list of integers
        list of unique labels for which spectra are obtained

    Examples
    --------
    >>> # Uncomment "if label==22:" below to run example:
    >>> # Spectrum for Twins-2-1 left postcentral (22) pial surface:
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_per_label
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk')
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> spectrum_size = 6
    >>> exclude_labels = [0]  #[-1]
    >>> largest_segment = True
    >>> spectrum_per_label(vtk_file, spectrum_size, exclude_labels, None,
    >>>                    area_file, largest_segment)
    ([[6.3469513010430304e-18,
       0.0005178862383467463,
       0.0017434911095630772,
       0.003667561767487686,
       0.005429017880363784,
       0.006309346984678924]],
     [22])

    """
    from mindboggle.utils.io_vtk import read_vtk, read_scalars
    from mindboggle.utils.mesh import remove_faces, reindex_faces_points
    from mindboggle.shapes.laplace_beltrami import fem_laplacian,\
        spectrum_of_largest

    # Read VTK surface mesh file:
    faces, u1, u2, points, u4, labels, u5, u6 = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    # Loop through labeled regions:
    ulabels = []
    [ulabels.append(int(x)) for x in labels if x not in ulabels
     if x not in exclude_labels]
    label_list = []
    spectrum_lists = []
    for label in ulabels:
      #if label == 22:
      #  print("DEBUG: COMPUTE FOR ONLY ONE LABEL")

        # Determine the indices per label:
        Ilabel = [i for i,x in enumerate(labels) if x == label]
        print('{0} vertices for label {1}'.format(len(Ilabel), label))

        # Remove background faces:
        pick_faces = remove_faces(faces, Ilabel)
        pick_faces, pick_points, o1 = reindex_faces_points(pick_faces, points)

        # Compute Laplace-Beltrami spectrum for the label:
        if largest_segment:
            exclude_labels_inner = [-1]
            spectrum = spectrum_of_largest(pick_points, pick_faces,
                                           spectrum_size,
                                           exclude_labels_inner,
                                           normalization, areas)
        else:
            spectrum = fem_laplacian(pick_points, pick_faces,
                                     spectrum_size, normalization)

        # Append to a list of lists of spectra:
        spectrum_lists.append(spectrum)
        label_list.append(label)

    return spectrum_lists, label_list
コード例 #31
0
ファイル: io_table.py プロジェクト: TankThinkLabs/mindboggle
def write_average_face_values_per_label(
    input_indices_vtk, input_values_vtk="", area_file="", output_stem="", exclude_values=[-1], background_value=-1
):
    """
    Write out a separate VTK file for each integer (>-1)
    in (the first) scalar list of an input VTK file.
    Optionally write the values drawn from a second VTK file.

    Parameters
    ----------
    input_indices_vtk : string
        path of the input VTK file that contains indices as scalars
    input_values_vtk : string
        path of the input VTK file that contains values as scalars
    output_stem : string
        path and stem of the output VTK file
    exclude_values : list or array
        values to exclude
    background_value : integer or float
        background value in output VTK files
    scalar_name : string
        name of a lookup table of scalars values

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.io_table import write_average_face_values_per_label
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> input_indices_vtk = os.path.join(path, 'allen', 'labels', 'lh.DKTatlas100.gcs.vtk')
    >>> input_values_vtk = os.path.join(path, 'allen', 'shapes', 'lh.thickness.vtk')
    >>> area_file = os.path.join(path, 'allen', 'shapes', 'lh.pial.area.vtk')
    >>> output_stem = 'labels_thickness'
    >>> exclude_values = [-1]
    >>> background_value = -1
    >>> #
    >>> write_average_face_values_per_label(input_indices_vtk,
    >>>     input_values_vtk, area_file, output_stem, exclude_values, background_value)
    >>> #
    >>> # View:
    >>> #example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk')
    >>> #from mindboggle.utils.plots import plot_vtk
    >>> #plot_vtk(example_vtk)

    """
    import os
    import numpy as np
    from mindboggle.utils.io_vtk import read_scalars, read_vtk, write_vtk
    from mindboggle.utils.io_table import write_columns
    from mindboggle.utils.mesh import remove_faces

    # Load VTK file:
    faces, lines, indices, points, npoints, scalars, scalar_names, foo1 = read_vtk(input_indices_vtk, True, True)
    if area_file:
        area_scalars, name = read_scalars(area_file, True, True)
    print("Explode the scalar list in {0}".format(os.path.basename(input_indices_vtk)))
    if input_values_vtk != input_indices_vtk:
        values, name = read_scalars(input_values_vtk, True, True)
        print(
            "Explode the scalar list of values in {0} "
            "with the scalar list of indices in {1}".format(
                os.path.basename(input_values_vtk), os.path.basename(input_indices_vtk)
            )
        )
    else:
        values = np.copy(scalars)

    # Loop through unique (non-excluded) scalar values:
    unique_scalars = [int(x) for x in np.unique(scalars) if x not in exclude_values]
    for scalar in unique_scalars:

        keep_indices = [x for sublst in faces for x in sublst]
        new_faces = remove_faces(faces, keep_indices)

        # Create array and indices for scalar value:
        select_scalars = np.copy(scalars)
        select_scalars[scalars != scalar] = background_value
        scalar_indices = [i for i, x in enumerate(select_scalars) if x == scalar]
        print("  Scalar {0}: {1} vertices".format(scalar, len(scalar_indices)))

        # ---------------------------------------------------------------------
        # For each face, average vertex values:
        # ---------------------------------------------------------------------
        output_table = os.path.join(os.getcwd(), output_stem + str(scalar) + ".csv")
        columns = []
        for face in new_faces:
            values = []
            for index in face:
                if area_file:
                    values.append(scalars[index] / area_scalars[index])
                else:
                    values.append(scalars[index])
            columns.append(np.mean(values))

        # -----------------------------------------------------------------
        # Write to table:
        # -----------------------------------------------------------------
        write_columns(columns, "", output_table, delimiter=",", quote=False)
コード例 #32
0
def spectrum_per_label(vtk_file, n_eigenvalues=3, exclude_labels=[-1],
                       normalization='area', area_file=''):
    """
    Compute Laplace-Beltrami spectrum per labeled region in a file.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file containing index scalars (labels)
    n_eigenvalues : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    area_file :  string
        name of VTK file with surface area scalar values

    Returns
    -------
    spectrum_lists : list of lists
        first eigenvalues for each label's Laplace-Beltrami spectrum
    label_list : list of integers
        list of unique labels for which spectra are obtained

    Examples
    --------
    >>> # Spectrum for label 22 (postcentral) in Twins-2-1:
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import laplacian_per_label
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')
    >>> n_eigenvalues = 6
    >>> exclude_labels = [0]  #[-1]
    >>> laplacian_per_label(vtk_file, n_eigenvalues, exclude_labels,
    >>>                           normalization=None, area_file=area_file)
        Load "Labels" scalars from lh.labels.DKT25.manual.vtk
        Load "scalars" scalars from lh.pial.area.vtk
        7819 vertices for label 22
        Reduced 290134 to 15230 triangular faces
        Linear FEM Laplace-Beltrami spectrum:
        ([[6.3469513010430304e-18,
           0.0005178862383467463,
           0.0017434911095630772,
           0.003667561767487686,
           0.005429017880363784,
           0.006309346984678924]],
         [22])

    """
    from mindboggle.utils.io_vtk import read_vtk, read_scalars
    from mindboggle.utils.mesh import remove_faces
    from mindboggle.shapes.laplace_beltrami import spectrum_of_largest

    # Read VTK surface mesh file:
    faces, u1, u2, points, u4, labels, u5, u6 = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    # Loop through labeled regions:
    ulabels = []
    [ulabels.append(int(x)) for x in labels if x not in ulabels
     if x not in exclude_labels]
    label_list = []
    spectrum_lists = []
    for label in ulabels:
      #if label==22:

        # Determine the indices per label:
        label_indices = [i for i,x in enumerate(labels) if x == label]
        print('{0} vertices for label {1}'.format(len(label_indices), label))

        # Remove background faces:
        select_faces = remove_faces(faces, label_indices)

        # Compute Laplace-Beltrami spectrum for the label:
        spectrum = spectrum_of_largest(points, select_faces, n_eigenvalues,
                                       exclude_labels, normalization, areas)

        # Append to a list of lists of spectra:
        spectrum_lists.append(spectrum)
        label_list.append(label)

    return spectrum_lists, label_list
コード例 #33
0
ファイル: laplace_beltrami.py プロジェクト: jsalva/mindboggle
def spectrum_from_file(vtk_file, spectrum_size=10, exclude_labels=[-1],
                       normalization=None, area_file=''):
    """
    Compute Laplace-Beltrami spectrum of a 3D shape in a VTK file.

    Parameters
    ----------
    vtk_file : string
        the input vtk file
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    area_file :  string
        name of VTK file with surface area scalar values

    Returns
    -------
    spectrum : list of floats
        first spectrum_size of Laplace-Beltrami spectrum

    Examples
    --------
    >>> # Spectrum for entire left hemisphere of Twins-2-1:
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> spectrum_from_file(vtk_file, spectrum_size=6)
    [4.829758648026223e-18,
     0.00012841730024671977,
     0.0002715181572272744,
     0.00032051508471594173,
     0.000470162807048644,
     0.0005768904023010327]
    >>> # Spectrum for Twins-2-1 left postcentral pial surface (22)
    >>> # (after running explode_scalars() with reindex=True):
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'label22.vtk')
    >>> spectrum_from_file(vtk_file, spectrum_size=6)
    [6.3469513010430304e-18,
     0.0005178862383467463,
     0.0017434911095630772,
     0.003667561767487686,
     0.005429017880363784,
     0.006309346984678924]
    >>> # Loop thru all MB 101 brains
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> for hemidir in os.listdir(header):
    >>>     print hemidir
    >>>     sulci_file = os.path.join(header, hemidir, "sulci.vtk")
    >>>     spectrum = spectrum_from_file(sulci_file)

    """
    from mindboggle.utils.io_vtk import read_vtk, read_scalars
    from mindboggle.shapes.laplace_beltrami import spectrum_of_largest

    faces, u1, u2, points, u4, u5, u6, u7 = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    spectrum = spectrum_of_largest(points, faces, spectrum_size,
                                   exclude_labels, normalization, areas)

    return spectrum
コード例 #34
0
ファイル: plots.py プロジェクト: jsalva/mindboggle
def plot_mask_surface(vtk_file, mask_file='', nonmask_value=-1,
                      masked_output='', remove_nonmask=False,
                      program='vtkviewer',
                      use_colormap=False, colormap_file=''):
    """
    Use vtkviewer or mayavi2 to visualize VTK surface mesh data.

    If a mask_file is provided, a temporary masked file is saved,
    and it is this file that is viewed.

    If using vtkviewer, can optionally provide colormap file
    or set $COLORMAP environment variable.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file
    mask_file : string
        name of VTK surface mesh file to mask vtk_file vertices
    nonmask_value : integer
        nonmask (usually background) value
    masked_output : string
        temporary masked output file name
    remove_nonmask : Boolean
        remove vertices that are not in mask? (otherwise assign nonmask_value)
    program : string {'vtkviewer', 'mayavi2'}
        program to visualize VTK file
    use_colormap : Boolean
        use Paraview-style XML colormap file set by $COLORMAP env variable?
    colormap_file : string
        use colormap in given file if use_colormap==True?  if empty and
        use_colormap==True, use file set by $COLORMAP environment variable

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.plots import plot_mask_surface
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk')
    >>> mask_file = os.path.join(path, 'test_one_label.vtk')
    >>> nonmask_value = 0 #-1
    >>> masked_output = ''
    >>> remove_nonmask = True
    >>> program = 'vtkviewer'
    >>> use_colormap = True
    >>> colormap_file = '' #'/software/mindboggle_tools/colormap.xml'
    >>> plot_mask_surface(vtk_file, mask_file, nonmask_value, masked_output, remove_nonmask, program, use_colormap, colormap_file)

    """
    import os
    import numpy as np

    from mindboggle.utils.mesh import remove_faces, reindex_faces_points
    from mindboggle.utils.utils import execute
    from mindboggle.utils.plots import plot_surfaces
    from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars, \
                                        read_vtk, write_vtk

    #-------------------------------------------------------------------------
    # Filter mesh with non-background values from a second (same-size) mesh:
    #-------------------------------------------------------------------------
    if mask_file:
        mask, name = read_scalars(mask_file, True, True)
        if not masked_output:
            masked_output = os.path.join(os.getcwd(), 'temp.vtk')
        file_to_plot = masked_output

        #---------------------------------------------------------------------
        # Remove nonmask-valued vertices:
        #---------------------------------------------------------------------
        if remove_nonmask:
            #-----------------------------------------------------------------
            # Load VTK files:
            #-----------------------------------------------------------------
            faces, lines, indices, points, npoints, scalars, scalar_names, \
            o1 = read_vtk(vtk_file, True, True)
            #-----------------------------------------------------------------
            # Find mask indices, remove nonmask faces, and reindex:
            #-----------------------------------------------------------------
            Imask = [i for i,x in enumerate(mask) if x != nonmask_value]
            mask_faces = remove_faces(faces, Imask)
            mask_faces, points, \
            original_indices = reindex_faces_points(mask_faces, points)
            #-----------------------------------------------------------------
            # Write VTK file with scalar values:
            #-----------------------------------------------------------------
            if np.ndim(scalars) == 1:
                scalar_type = type(scalars[0]).__name__
            elif np.ndim(scalars) == 2:
                scalar_type = type(scalars[0][0]).__name__
            else:
                print("Undefined scalar type!")
            write_vtk(file_to_plot, points, [], [], mask_faces,
                      scalars[original_indices].tolist(), scalar_names,
                      scalar_type=scalar_type)
        else:
            scalars, name = read_scalars(vtk_file, True, True)
            scalars[mask == nonmask_value] = nonmask_value
            rewrite_scalars(vtk_file, file_to_plot, scalars)
    else:
        file_to_plot = vtk_file

    #-------------------------------------------------------------------------
    # Display with vtkviewer.py:
    #-------------------------------------------------------------------------
    if program == 'vtkviewer':
        plot_surfaces(file_to_plot, use_colormap=use_colormap,
                      colormap_file=colormap_file)
    #-------------------------------------------------------------------------
    # Display with mayavi2:
    #-------------------------------------------------------------------------
    elif program == 'mayavi2':
        cmd = ["mayavi2", "-d", file_to_plot, "-m", "Surface", "&"]
        execute(cmd, 'os')
コード例 #35
0
ファイル: sulci.py プロジェクト: TankThinkLabs/mindboggle
def extract_sulci(labels_file, folds_or_file, hemi, sulcus_label_pair_lists,
                  unique_sulcus_label_pairs, min_boundary=1, sulcus_names=[]):
    """
    Identify sulci from folds in a brain surface according to a labeling
    protocol that includes a list of label pairs defining each sulcus.

    A fold is a group of connected, deep vertices.

    Steps for each fold  ::
        1. Remove fold if it has fewer than two labels.
        2. Remove fold if its labels do not contain a sulcus label pair.
        3. Find vertices with labels that are in only one of the fold's
           label boundary pairs. Assign the vertices the sulcus with the
           label pair if they are connected to the label boundary for that pair.
        4. If there are remaining vertices, segment into sets of vertices
           connected to label boundaries, and assign a unique ID to each segment.

    Parameters
    ----------
    labels_file : string
        file name for surface mesh VTK containing labels for all vertices
    folds_or_file : list or string
        fold number for each vertex or name of VTK file containing folds scalars
    hemi : string
        hemisphere ('lh' or 'rh')
    sulcus_label_pair_lists : list of two lists of multiple lists of integer pairs
        list containing left and right lists, each with multiple lists of
        integer pairs corresponding to label boundaries / sulcus / fundus
    unique_sulcus_label_pairs : list of unique pairs of integers
        unique label pairs
    min_boundary : integer
        minimum number of vertices for a sulcus label boundary segment
    sulcus_names : list of strings
        names of sulci

    Returns
    -------
    sulci : list of integers
        sulcus numbers for all vertices (-1 for non-sulcus vertices)
    n_sulci : integers
        number of sulci
    sulci_file : string
        name of output VTK file with sulcus numbers (-1 for non-sulcus vertices)

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars
    >>> from mindboggle.labels.protocol import dkt_protocol
    >>> from mindboggle.features.sulci import extract_sulci
    >>> from mindboggle.utils.plots import plot_vtk
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> # Load labels, folds, neighbor lists, and sulcus names and label pairs
    >>> labels_file = os.path.join(path, 'arno', 'labels', 'relabeled_lh.DKTatlas40.gcs.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds_or_file, name = read_scalars(folds_file)
    >>> protocol = 'DKT31'
    >>> hemi = 'lh'
    >>> sulcus_names, sulcus_label_pair_lists, unique_sulcus_label_pairs,
    ...    label_names, label_numbers, cortex_names, cortex_numbers,
    ...    noncortex_names, noncortex_numbers = dkt_protocol(protocol)
    >>> min_boundary = 10
    >>> #
    >>> sulci, n_sulci, sulci_file = extract_sulci(labels_file, folds_or_file,
    >>>     hemi, sulcus_label_pair_lists, unique_sulcus_label_pairs,
    >>>     min_boundary, sulcus_names)
    >>> # View:
    >>> plot_vtk('sulci.vtk')

    """
    import os
    from time import time
    import numpy as np
    from mindboggle.utils.io_vtk import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.utils.mesh import find_neighbors
    from mindboggle.labels.labels import extract_borders
    from mindboggle.utils.segment import propagate, segment


    # Load fold numbers if folds_or_file is a string
    if isinstance(folds_or_file, str):
        folds, name = read_scalars(folds_or_file)
    elif isinstance(folds_or_file, list):
        folds = folds_or_file

    if hemi == 'lh':
        sulcus_label_pair_lists = sulcus_label_pair_lists[0]
    elif hemi == 'rh':
        sulcus_label_pair_lists = sulcus_label_pair_lists[1]
    else:
        print("Warning: hemisphere not properly specified ('lh' or 'rh').")

    # Load points, faces, and neighbors
    faces, foo1, foo2, points, npoints, labels, foo3, foo4 = read_vtk(labels_file)
    neighbor_lists = find_neighbors(faces, npoints)

    # Array of sulcus IDs for fold vertices, initialized as -1.
    # Since we do not touch gyral vertices and vertices whose labels
    # are not in the label list, or vertices having only one label,
    # their sulcus IDs will remain -1.
    sulci = -1 * np.ones(npoints)

    #-------------------------------------------------------------------------
    # Loop through folds
    #-------------------------------------------------------------------------
    fold_numbers = [int(x) for x in np.unique(folds) if x > -1]
    n_folds = len(fold_numbers)
    print("Extract sulci from {0} folds...".format(n_folds))
    t0 = time()
    for n_fold in fold_numbers:
        fold = [i for i,x in enumerate(folds) if x == n_fold]
        len_fold = len(fold)
        # List the labels in this fold (greater than zero)
        fold_labels = [labels[x] for x in fold]
        unique_fold_labels = [int(x) for x in np.unique(fold_labels) if x > 0]

        #---------------------------------------------------------------------
        # NO MATCH -- fold has fewer than two labels
        #---------------------------------------------------------------------
        if len(unique_fold_labels) < 2:
            # Ignore: sulci already initialized with -1 values
            if not unique_fold_labels:
                print("  Fold {0} ({1} vertices): NO MATCH -- fold has no labels".
                      format(n_fold, len_fold))
            else:
                print("  Fold {0} ({1} vertices): "
                  "NO MATCH -- fold has only one label ({2})".
                  format(n_fold, len_fold, unique_fold_labels[0]))
            # Ignore: sulci already initialized with -1 values

        else:
            # Find all label boundary pairs within the fold
            indices_fold_pairs, fold_pairs, unique_fold_pairs = extract_borders(
                fold, labels, neighbor_lists, ignore_values=[],
                return_label_pairs=True)

            # Find fold label pairs in the protocol (pairs are already sorted)
            fold_pairs_in_protocol = [x for x in unique_fold_pairs
                                      if x in unique_sulcus_label_pairs]

            if unique_fold_labels:
                print("  Fold {0} labels: {1} ({2} vertices)".format(n_fold,
                      ', '.join([str(x) for x in unique_fold_labels]), len_fold))
            #-----------------------------------------------------------------
            # NO MATCH -- fold has no sulcus label pair
            #-----------------------------------------------------------------
            if not fold_pairs_in_protocol:
                print("  Fold {0}: NO MATCH -- fold has no sulcus label pair".
                      format(n_fold, len_fold))

            #-----------------------------------------------------------------
            # Possible matches
            #-----------------------------------------------------------------
            else:
                print("  Fold {0} label pairs in protocol: {1}".format(n_fold,
                      ', '.join([str(x) for x in fold_pairs_in_protocol])))

                # Labels in the protocol (includes repeats across label pairs)
                labels_in_pairs = [x for lst in fold_pairs_in_protocol for x in lst]

                # Labels that appear in one or more than one sulcus label boundary
                unique_labels = []
                nonunique_labels = []
                for label in np.unique(labels_in_pairs):
                    if len([x for x in labels_in_pairs if x == label]) == 1:
                        unique_labels.append(label)
                    else:
                        nonunique_labels.append(label)

                #-------------------------------------------------------------
                # Vertices whose labels are in only one sulcus label pair
                #-------------------------------------------------------------
                # Find vertices with a label that is in only one of the fold's
                # label pairs (the other label in the pair can exist
                # in other pairs). Assign the vertices the sulcus with the label
                # pair if they are connected to the label boundary for that pair.
                #-------------------------------------------------------------
                if len(unique_labels):

                    for pair in fold_pairs_in_protocol:
                        # If one or both labels in label pair is/are unique
                        unique_labels_in_pair = [x for x in pair if x in unique_labels]
                        n_unique = len(unique_labels_in_pair)
                        if n_unique:

                            ID = [i for i,x in enumerate(sulcus_label_pair_lists)
                                  if pair in x][0]

                            # Construct seeds from label boundary vertices
                            # (fold_pairs and pair already sorted)
                            indices_pair = [x for i,x in enumerate(indices_fold_pairs)
                                            if fold_pairs[i] == pair]

                            # Identify vertices with unique label(s) in pair
                            indices_unique_labels = [fold[i]
                                                     for i,x in enumerate(fold_labels)
                                                     if x in unique_sulcus_label_pairs]

                            # Propagate from seeds to labels in label pair
                            sulci2 = segment(indices_unique_labels, neighbor_lists,
                                             min_region_size=1,
                                             seed_lists=[indices_pair],
                                             keep_seeding=False,
                                             spread_within_labels=True,
                                             labels=labels)
                            sulci[sulci2 > -1] = ID

                            # Print statement
                            if n_unique == 1:
                                ps1 = '1 label'
                            else:
                                ps1 = 'Both labels'
                            if len(sulcus_names):
                                ps2 = sulcus_names[ID]
                            else:
                                ps2 = ''
                            print("    {0} unique to one fold pair: {1} {2}".
                                  format(ps1, ps2, unique_labels_in_pair))

                #-------------------------------------------------------------
                # Vertex labels shared by multiple label pairs
                #-------------------------------------------------------------
                # Propagate labels from label borders to vertices with labels
                # that are shared by multiple label pairs in the fold.
                #-------------------------------------------------------------
                if len(nonunique_labels):
                    # For each label shared by different label pairs
                    for label in nonunique_labels:
                        # Print statement
                        print("    Propagate sulcus label borders with label {0}".
                              format(int(label)))

                        # Construct seeds from label boundary vertices
                        seeds = -1 * np.ones(len(points))
                        for ID, label_pair_list in enumerate(sulcus_label_pair_lists):
                            label_pairs = [x for x in label_pair_list if label in x]
                            for label_pair in label_pairs:
                                indices_pair = [x for i,x in enumerate(indices_fold_pairs)
                                    if np.sort(fold_pairs[i]).tolist() == label_pair]
                                if indices_pair:

                                    # Do not include short boundary segments
                                    if min_boundary > 1:
                                        indices_pair2 = []
                                        seeds2 = segment(indices_pair, neighbor_lists)
                                        for seed2 in range(int(max(seeds2))+1):
                                            iseed2 = [i for i,x in enumerate(seeds2)
                                                      if x == seed2]
                                            if len(iseed2) >= min_boundary:
                                                indices_pair2.extend(iseed2)
                                            else:
                                                if len(iseed2) == 1:
                                                    print("    Remove assignment "
                                                          "of ID {0} from 1 vertex".
                                                          format(seed2))
                                                else:
                                                    print("    Remove assignment "
                                                          "of ID {0} from {1} vertices".
                                                          format(seed2, len(iseed2)))
                                        indices_pair = indices_pair2

                                    # Assign sulcus IDs to seeds
                                    seeds[indices_pair] = ID

                        # Identify vertices with the label
                        label_array = -1 * np.ones(len(points))
                        indices_label = [fold[i] for i,x in enumerate(fold_labels)
                                         if x == label]
                        if len(indices_label):
                            label_array[indices_label] = 1

                            # Propagate from seeds to vertices with label
                            #indices_seeds = []
                            #for seed in range(int(max(seeds))+1):
                            #    indices_seeds.append([i for i,x in enumerate(seeds)
                            #                          if x == seed])
                            #sulci2 = segment(indices_label, neighbor_lists,
                            #                 50, indices_seeds, False, True, labels)
                            sulci2 = propagate(points, faces,
                                               label_array, seeds, sulci,
                                               max_iters=10000,
                                               tol=0.001, sigma=5)
                            sulci[sulci2 > -1] = sulci2[sulci2 > -1]

    #-------------------------------------------------------------------------
    # Print out assigned sulci
    #-------------------------------------------------------------------------
    sulcus_numbers = [int(x) for x in np.unique(sulci) if x > -1]
    n_sulci = len(sulcus_numbers)
    print("Extracted {0} sulci from {1} folds ({2:.1f}s):".
          format(n_sulci, n_folds, time()-t0))
    if len(sulcus_names):
        for sulcus_number in sulcus_numbers:
            print("  {0}: {1}".format(sulcus_number, sulcus_names[sulcus_number]))
    else:
        print("  " + ", ".join([str(x) for x in sulcus_numbers]))

    #-------------------------------------------------------------------------
    # Print out unresolved sulci
    #-------------------------------------------------------------------------
    unresolved = [i for i in range(len(sulcus_label_pair_lists))
                  if i not in sulcus_numbers]
    if len(unresolved) == 1:
        print("The following sulcus is unaccounted for:")
    else:
        print("The following {0} sulci are unaccounted for:".format(len(unresolved)))
    if len(sulcus_names):
        for sulcus_number in unresolved:
            print("  {0}: {1}".format(sulcus_number, sulcus_names[sulcus_number]))
    else:
        print("  " + ", ".join([str(x) for x in unresolved]))

    #-------------------------------------------------------------------------
    # Return sulci, number of sulci, and file name
    #-------------------------------------------------------------------------
    sulci_file = os.path.join(os.getcwd(), 'sulci.vtk')
    rewrite_scalars(labels_file, sulci_file, sulci, 'sulci', sulci)
    sulci.tolist()

    return sulci, n_sulci, sulci_file
コード例 #36
0
    sulcus_names, sulcus_label_pair_lists, unique_sulcus_label_pairs, \
        label_names, label_numbers, cortex_names, cortex_numbers, \
        noncortex_names, noncortex_numbers = dkt_protocol(protocol)

    fundi_file = sys.argv[1]
    folds_file = sys.argv[2]
    labels_file = sys.argv[3]

    print('***')
    print('Input fundi:' + fundi_file)
    print('Input folds:' + folds_file)
    print('Input labels:' + labels_file)
    print('***')

    # Load fundi, folds, labels
    fundi, name = read_scalars(fundi_file, return_arrays=True)
    folds, name = read_scalars(folds_file, return_arrays=True)
    faces, lines, indices, points, npoints, labels, scalar_names = load_vtk(labels_file, return_arrays=True)

    # List of indices to fold vertices
    fold_indices = [i for i,x in enumerate(folds) if x > 0]

    # Calculate neighbor lists for all points
    print('Find neighbors to all vertices...')
    neighbor_lists = find_neighbors(faces, npoints)

    # Prepare list of all unique sorted label pairs in the labeling protocol
    print('Prepare a list of unique, sorted label pairs in the protocol...')
    n_fundi = len(sulcus_label_pair_lists)

    # Find label boundary points in any of the folds
コード例 #37
0
def spectrum_from_file(vtk_file, n_eigenvalues=6, exclude_labels=[-1],
                       normalization=None, area_file=''):
    """
    Compute Laplace-Beltrami spectrum of a 3D shape in a VTK file.

    Parameters
    ----------
    vtk_file : string
        the input vtk file
    n_eigenvalues : integer
        number of eigenvalues to be computed (the length of the spectrum)
    exclude_labels : list of integers
        labels to be excluded
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    area_file :  string
        name of VTK file with surface area scalar values

    Returns
    -------
    spectrum : list of floats
        first n_eigenvalues of Laplace-Beltrami spectrum

    Examples
    --------
    >>> # Spectrum for entire left hemisphere of Twins-2-1:
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> spectrum_from_file(vtk_file, n_eigenvalues=6)
        Load "Labels" scalars from lh.labels.DKT25.manual.vtk
        Linear FEM Laplace-Beltrami spectrum:
        [4.829758648026221e-18,
         0.00012841730024672036,
         0.00027151815722727465,
         0.00032051508471594065,
         0.0004701628070486447,
         0.0005768904023010338]
    >>> # Spectrum for label 22 (postcentral) (after running explode_scalars()):
    >>> import os
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'label22.vtk')
    >>> spectrum_from_file(vtk_file, n_eigenvalues=6)
        Load "scalars" scalars from label22.vtk
        Linear FEM Laplace-Beltrami spectrum:
        [6.3469513010430304e-18,
         0.0005178862383467466,
         0.0017434911095630806,
         0.003667561767487689,
         0.005429017880363778,
         0.006309346984678918]

    >>> # Loop thru all MB 101 brains
    >>> from mindboggle.shapes.laplace_beltrami import spectrum_from_file
    >>> for hemidir in os.listdir(header):
    >>>     print hemidir
    >>>     sulci_file = os.path.join(header, hemidir, "sulci.vtk")
    >>>     spectrum = spectrum_from_file(sulci_file)

    """
    from mindboggle.utils.io_vtk import read_vtk, read_scalars
    from mindboggle.shapes.laplace_beltrami import spectrum_of_largest

    faces, u1, u2, points, u4, u5, u6, u7 = read_vtk(vtk_file)

    # Area file:
    if area_file:
        areas, u1 = read_scalars(area_file)
    else:
        areas = None

    spectrum = spectrum_of_largest(points, faces, n_eigenvalues,
                                   exclude_labels, normalization, areas)

    return spectrum
コード例 #38
0
def concatenate_sulcus_scalars(scalar_files, fold_files, label_files):
    """
    Prepare data for estimating scalar distributions along and outside fundi.

    Extract (e.g., depth, curvature) scalar values in folds, along sulcus
    label boundaries as well as outside the sulcus label boundaries.
    Concatenate these scalar values across multiple files.

    Parameters
    ----------
    scalar_files : list of strings
        names of surface mesh VTK files with scalar values to concatenate
    fold_files : list of strings (corr. to each list in scalar_files)
        VTK files with fold numbers as scalars (-1 for non-fold vertices)
    label_files : list of strings (corr. to fold_files)
        VTK files with label numbers (-1 for unlabeled vertices)

    Returns
    -------
    border_scalars : list of floats
        concatenated scalar values within folds along sulcus label boundaries
    nonborder_scalars : list of floats
        concatenated scalar values within folds outside sulcus label boundaries

    Examples
    --------
    >>> # Concatenate (duplicate) depth scalars:
    >>> import os
    >>> from mindboggle.shapes.likelihood import concatenate_sulcus_scalars
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> depth_file = os.path.join(path, 'arno', 'shapes', 'depth_rescaled.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> labels_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>> scalar_files = [depth_file, depth_file]
    >>> fold_files = [folds_file, folds_file]
    >>> label_files = [labels_file, labels_file]
    >>> #
    >>> S = concatenate_sulcus_scalars(scalar_files, fold_files, label_files)

    """
    import numpy as np

    from mindboggle.utils.io_vtk import read_scalars
    from mindboggle.utils.mesh import find_neighbors_from_file
    from mindboggle.utils.segment import extract_borders
    from mindboggle.LABELS import DKTprotocol

    dkt = DKTprotocol()

    # Prepare (non-unique) list of sulcus label pairs:
    protocol_label_pairs = [
        x for lst in dkt.sulcus_label_pair_lists for x in lst
    ]

    border_scalars = []
    nonborder_scalars = []

    # Loop through files with the scalar values:
    for ifile, scalar_file in enumerate(scalar_files):
        print(scalar_file)

        # Load scalars, folds, and labels:
        folds_file = fold_files[ifile]
        labels_file = label_files[ifile]
        scalars, name = read_scalars(scalar_file, True, True)
        if scalars.shape:
            folds, name = read_scalars(folds_file)
            labels, name = read_scalars(labels_file)
            indices_folds = [i for i, x in enumerate(folds) if x != -1]
            neighbor_lists = find_neighbors_from_file(labels_file)

            # Find all label border pairs within the folds:
            indices_label_pairs, label_pairs, unique_pairs = extract_borders(
                indices_folds,
                labels,
                neighbor_lists,
                ignore_values=[-1],
                return_label_pairs=True)
            indices_label_pairs = np.array(indices_label_pairs)

            # Find vertices with label pairs in the sulcus labeling protocol:
            Ipairs_in_protocol = [
                i for i, x in enumerate(label_pairs)
                if x in protocol_label_pairs
            ]
            indices_label_pairs = indices_label_pairs[Ipairs_in_protocol]
            indices_outside_pairs = list(
                frozenset(indices_folds).difference(indices_label_pairs))

            # Store scalar values in folds along label border pairs:
            border_scalars.extend(scalars[indices_label_pairs].tolist())

            # Store scalar values in folds outside label border pairs:
            nonborder_scalars.extend(scalars[indices_outside_pairs].tolist())

    return border_scalars, nonborder_scalars
コード例 #39
0
def realign_boundaries_to_fundus_lines(surf_file,
                                       init_label_file,
                                       fundus_lines_file,
                                       thickness_file,
                                       out_label_file=None):
    """
    Fix label boundaries to fundus lines.

    Parameters
    ----------
    surf_file : file containing the surface geometry in vtk format
    init_label_file : file containing scalars that represent the
                      initial guess at labels
    fundus_lines_file : file containing scalars representing fundus lines.
    thickness_file: file containing cortical thickness scalar data
    (for masking out the medial wall only)
    out_label_file : if specified, the realigned labels will be writen to
                     this file

    Returns
    -------
    numpy array representing the realigned label for each surface vertex.
    """

    import numpy as np
    from mindboggle.utils.segment import extract_borders
    import mindboggle.utils.graph as go
    from mindboggle.utils.io_vtk import read_vtk, read_scalars, write_vtk
    from mindboggle.utils.mesh import find_neighbors
    import propagate_fundus_lines

    ## read files
    faces, _, indices, points, num_points, _, _, _ = read_vtk(
        surf_file, return_first=True, return_array=True)
    indices = range(num_points)

    init_labels, _ = read_scalars(init_label_file,
                                  return_first=True,
                                  return_array=True)

    fundus_lines, _ = read_scalars(fundus_lines_file,
                                   return_first=True,
                                   return_array=True)

    thickness, _ = read_scalars(thickness_file,
                                return_first=True,
                                return_array=True)

    # remove labels from vertices with zero thickness (get around
    # DKT40 annotations having the label '3' for all the Corpus
    # Callosum vertices).
    cc_inds = [x for x in indices if thickness[x] < 0.001]
    init_labels[cc_inds] = 0

    ## setup seeds from initial label boundaries
    neighbor_lists = find_neighbors(faces, num_points)

    # extract all vertices that are on a boundary between labels
    boundary_indices, label_pairs, _ = extract_borders(indices,
                                                       init_labels,
                                                       neighbor_lists,
                                                       return_label_pairs=True)

    # split boundary vertices into segments with common boundary pairs.
    boundary_segments = {}
    for boundary_index, label_pair in zip(boundary_indices, label_pairs):
        key = ((label_pair[0],
                label_pair[1]) if label_pair[0] < label_pair[1] else
               (label_pair[1], label_pair[0]))
        if key not in boundary_segments:
            boundary_segments[key] = []

        boundary_segments[key].append(boundary_index)

    boundary_matrix, boundary_matrix_keys = _build_boundary_matrix(
        boundary_segments, num_points)

    # build the affinity matrix
    affinity_matrix = go.weight_graph(np.array(points),
                                      indices,
                                      np.array(faces),
                                      sigma=10,
                                      add_to_graph=False)

    ## propagate boundaries to fundus line vertices
    learned_matrix = _propagate_labels(affinity_matrix, boundary_matrix,
                                       boundary_indices, 100, 1)

    # assign labels to fundus line vertices based on highest probability
    new_boundaries = -1 * np.ones(init_labels.shape)
    fundus_line_indices = [i for i, x in enumerate(fundus_lines) if x > 0.5]

    # tile the surface into connected components delimited by fundus lines
    closed_fundus_lines, _, _ = propagate_fundus_lines.propagate_fundus_lines(
        points, faces, fundus_line_indices, thickness)

    closed_fundus_line_indices = np.where(closed_fundus_lines > 0)[0]

    # split surface into connected components
    connected_component_faces = _remove_boundary_faces(
        points, faces, closed_fundus_line_indices)

    # label components based on most probable label assignment
    new_labels = _label_components(connected_component_faces, num_points,
                                   boundary_indices, learned_matrix,
                                   boundary_matrix_keys)

    # propagate new labels to fill holes
    label_matrix, label_map = _build_label_matrix(new_labels)
    new_learned_matrix = _propagate_labels(
        affinity_matrix, label_matrix,
        [i for i in range(num_points) if new_labels[i] >= 0], 100, 1)

    # assign most probable labels
    for idx in [i for i in range(num_points) if new_labels[i] == -1]:
        max_idx = np.argmax(new_learned_matrix[idx])
        new_labels[idx] = label_map[max_idx]

    # save
    if out_label_file is not None:
        write_vtk(out_label_file,
                  points,
                  faces=faces,
                  scalars=[int(x) for x in new_labels],
                  scalar_type='int')

    return new_labels
コード例 #40
0
ファイル: plots.py プロジェクト: ccraddock/mindboggle
def plot_mask_surface(vtk_file, mask_file='', nonmask_value=-1,
                      masked_output='', remove_nonmask=False,
                      program='vtkviewer',
                      use_colormap=False, colormap_file=''):
    """
    Use vtkviewer or mayavi2 to visualize VTK surface mesh data.

    If a mask_file is provided, a temporary masked file is saved,
    and it is this file that is viewed.

    If using vtkviewer, can optionally provide colormap file
    or set $COLORMAP environment variable.

    Parameters
    ----------
    vtk_file : string
        name of VTK surface mesh file
    mask_file : string
        name of VTK surface mesh file to mask vtk_file vertices
    nonmask_value : integer
        nonmask (usually background) value
    masked_output : string
        temporary masked output file name
    remove_nonmask : Boolean
        remove vertices that are not in mask? (otherwise assign nonmask_value)
    program : string {'vtkviewer', 'mayavi2'}
        program to visualize VTK file
    use_colormap : Boolean
        use Paraview-style XML colormap file set by $COLORMAP env variable?
    colormap_file : string
        use colormap in given file if use_colormap==True?  if empty and
        use_colormap==True, use file set by $COLORMAP environment variable

    Examples
    --------
    >>> import os
    >>> from mindboggle.utils.plots import plot_mask_surface
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk')
    >>> mask_file = os.path.join(path, 'test_one_label.vtk')
    >>> nonmask_value = 0 #-1
    >>> masked_output = ''
    >>> remove_nonmask = True
    >>> program = 'vtkviewer'
    >>> use_colormap = True
    >>> colormap_file = '' #'/software/mindboggle_tools/colormap.xml'
    >>> plot_mask_surface(vtk_file, mask_file, nonmask_value, masked_output, remove_nonmask, program, use_colormap, colormap_file)

    """
    import os
    import numpy as np

    from mindboggle.utils.mesh import remove_faces, reindex_faces_points
    from mindboggle.utils.utils import execute
    from mindboggle.utils.plots import plot_surfaces
    from mindboggle.utils.io_vtk import read_scalars, rewrite_scalars, \
                                        read_vtk, write_vtk

    #-------------------------------------------------------------------------
    # Filter mesh with non-background values from a second (same-size) mesh:
    #-------------------------------------------------------------------------
    if mask_file:
        mask, name = read_scalars(mask_file, True, True)
        if not masked_output:
            masked_output = os.path.join(os.getcwd(), 'temp.vtk')
        file_to_plot = masked_output

        #---------------------------------------------------------------------
        # Remove nonmask-valued vertices:
        #---------------------------------------------------------------------
        if remove_nonmask:
            #-----------------------------------------------------------------
            # Load VTK files:
            #-----------------------------------------------------------------
            faces, lines, indices, points, npoints, scalars, scalar_names, \
            o1 = read_vtk(vtk_file, True, True)
            #-----------------------------------------------------------------
            # Find mask indices, remove nonmask faces, and reindex:
            #-----------------------------------------------------------------
            Imask = [i for i,x in enumerate(mask) if x != nonmask_value]
            mask_faces = remove_faces(faces, Imask)
            mask_faces, points, \
            original_indices = reindex_faces_points(mask_faces, points)
            #-----------------------------------------------------------------
            # Write VTK file with scalar values:
            #-----------------------------------------------------------------
            if np.ndim(scalars) == 1:
                scalar_type = type(scalars[0]).__name__
            elif np.ndim(scalars) == 2:
                scalar_type = type(scalars[0][0]).__name__
            else:
                print("Undefined scalar type!")
            write_vtk(file_to_plot, points, [], [], mask_faces,
                      scalars[original_indices].tolist(), scalar_names,
                      scalar_type=scalar_type)
        else:
            scalars, name = read_scalars(vtk_file, True, True)
            scalars[mask == nonmask_value] = nonmask_value
            rewrite_scalars(vtk_file, file_to_plot, scalars)
    else:
        file_to_plot = vtk_file

    #-------------------------------------------------------------------------
    # Display with vtkviewer.py:
    #-------------------------------------------------------------------------
    if program == 'vtkviewer':
        plot_surfaces(file_to_plot, use_colormap=use_colormap,
                      colormap_file=colormap_file)
    #-------------------------------------------------------------------------
    # Display with mayavi2:
    #-------------------------------------------------------------------------
    elif program == 'mayavi2':
        cmd = ["mayavi2", "-d", file_to_plot, "-m", "Surface", "&"]
        execute(cmd, 'os')
コード例 #41
0
ファイル: fundi.py プロジェクト: TankThinkLabs/mindboggle
def extract_fundi(folds, sulci, curv_file, depth_file, min_separation=10,
                  erode_ratio=0.1, erode_min_size=1, save_file=False):
    """
    Extract fundi from folds.

    A fundus is a branching curve that runs along the deepest and most
    highly curved portions of a sulcus fold.

    Steps ::
        1. Find fundus endpoints (outer anchors) with find_outer_anchors().
        2. Include inner anchor points.
        3. Connect anchor points using connect_points_erosion().
        4. Segment fundi by sulcus definitions.
        Possible postprocessing step: smooth with smooth_skeleton().

    Parameters
    ----------
    folds : list of integers
        fold number for each vertex
    curv_file :  string
        surface mesh file in VTK format with mean curvature values
    depth_file :  string
        surface mesh file in VTK format with rescaled depth values
    sulci : list of integers
        sulcus number for each vertex
    likelihoods : list of integers
        fundus likelihood value for each vertex
    min_separation : integer
        minimum number of edges between inner/outer anchor points
    erode_ratio : float
        fraction of indices to test for removal at each iteration
        in connect_points_erosion()
    save_file : Boolean
        save output VTK file?

    Returns
    -------
    fundi : list of integers
        fundus numbers for all vertices (-1 for non-fundus vertices)
    n_fundi :  integer
        number of fundi
    fundi_file : string (if save_file)
        name of output VTK file with fundus numbers (-1 for non-fundus vertices)

    Examples
    --------
    >>> # Extract fundus from one or more folds:
    >>> single_fold = True
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_scalars
    >>> from mindboggle.features.fundi import extract_fundi
    >>> from mindboggle.utils.plots import plot_vtk
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> sulci_file = os.path.join(path, 'arno', 'features', 'sulci.vtk')
    >>> sulci, name = read_scalars(sulci_file, True, True)
    >>> curv_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>> depth_file = os.path.join(path, 'arno', 'shapes', 'travel_depth_rescaled.vtk')
    >>> folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>> folds, name = read_scalars(folds_file, True, True)
    >>> if single_fold:
    >>>     fold_number = 2 #11
    >>>     folds[folds != fold_number] = -1
    >>> min_separation = 10
    >>> erode_ratio = 0.10
    >>> erode_min_size = 10
    >>> save_file = True
    >>> fundi, n_fundi, fundi_file = extract_fundi(folds, sulci, curv_file,
    >>>     depth_file, min_separation, erode_ratio, erode_min_size, save_file)
    >>> #
    >>> # View:
    >>> plot_vtk(fundi_file)

    """

    # Extract a skeleton to connect endpoints in a fold:
    import os
    import numpy as np
    from time import time

    from mindboggle.utils.io_vtk import read_scalars, read_vtk, rewrite_scalars
    from mindboggle.utils.compute import median_abs_dev
    from mindboggle.utils.paths import find_max_values
    from mindboggle.utils.mesh import find_neighbors_from_file
    from mindboggle.utils.paths import find_outer_anchors, connect_points_erosion

    # Load values, threshold, and neighbors:
    u1,u2,u3, points, npoints, curvs, u4,u5 = read_vtk(curv_file, True,True)
    depths, name = read_scalars(depth_file, True, True)
    values = curvs * depths
    values0 = [x for x in values if x > 0]
    thr = np.median(values0) + 2 * median_abs_dev(values0)
    neighbor_lists = find_neighbors_from_file(curv_file)

    #-------------------------------------------------------------------------
    # Loop through folds:
    #-------------------------------------------------------------------------
    t1 = time()
    skeletons = []
    unique_fold_IDs = [x for x in np.unique(folds) if x != -1]

    if len(unique_fold_IDs) == 1:
        print("Extract a fundus from 1 fold...")
    else:
        print("Extract a fundus from each of {0} folds...".
              format(len(unique_fold_IDs)))

    for fold_ID in unique_fold_IDs:
        indices_fold = [i for i,x in enumerate(folds) if x == fold_ID]
        if indices_fold:
            print('  Fold {0}:'.format(int(fold_ID)))

            #-----------------------------------------------------------------
            # Find outer anchor points on the boundary of the surface region,
            # to serve as fundus endpoints :
            #-----------------------------------------------------------------
            outer_anchors, tracks = find_outer_anchors(indices_fold,
                neighbor_lists, values, depths, min_separation)

            #-----------------------------------------------------------------
            # Find inner anchor points:
            #-----------------------------------------------------------------
            inner_anchors = find_max_values(points, values, min_separation, thr)

            #-----------------------------------------------------------------
            # Connect endpoints to create skeleton:
            #-----------------------------------------------------------------
            B = -1 * np.ones(npoints)
            B[indices_fold] = 1
            skeleton = connect_points_erosion(B, neighbor_lists,
                outer_anchors, inner_anchors, values,
                erode_ratio, erode_min_size, save_steps=[], save_vtk='')
            if skeleton:
                skeletons.extend(skeleton)

    #-------------------------------------------------------------------------
    # Create fundi by segmenting skeletons with overlapping sulcus labels:
    #-------------------------------------------------------------------------
    fundi = -1 * np.ones(npoints)
    indices = [x for x in skeletons if sulci[x] != -1]
    fundi[indices] = sulci[indices]

    n_fundi = len([x for x in np.unique(fundi) if x != -1])
    if n_fundi == 1:
        sdum = 'fundus'
    else:
        sdum = 'fundi'
    print('  ...Extracted {0} {1} ({2:.2f} seconds)'.
          format(n_fundi, sdum, time() - t1))

    #-------------------------------------------------------------------------
    # Return fundi, number of fundi, and file name:
    #-------------------------------------------------------------------------
    fundi = fundi.tolist()

    if save_file:
        fundi_file = os.path.join(os.getcwd(), 'fundi.vtk')
        rewrite_scalars(curv_file, fundi_file, fundi, 'fundi', folds)
    else:
        fundi_file = None

    return fundi, n_fundi, fundi_file
コード例 #42
0
    table_file = os.path.join(os.getcwd(), label_name+'_'+shape_name+'.csv')
    table_column_names = []
    column_names = []
    columns = []

    for subject in subjects_list:
        label_file = '/homedir/Desktop/hippo/workspace/Mindboggle/Labels/_hemi_'+\
                     hemi+'_subject_'+subject+'/DKT_annot_to_VTK/'+hemi+'.DKTatlas40.gcs.vtk'
        shape_file = '/homedir/Desktop/hippo/results/shapes/_hemi_'+hemi+\
                     '_subject_'+subject+'/'+hemi+'.thickness.vtk'
        area_file = '/homedir/Desktop/hippo/results/shapes/_hemi_'+hemi+\
                     '_subject_'+subject+'/'+hemi+'.pial.area.vtk'
    
        area_array = []
        if os.path.exists(label_file):
            feature_array, name = read_scalars(label_file, True, True)
        if os.path.exists(shape_file):
            shape_array, name = read_scalars(shape_file, True, True)
        if os.path.exists(label_file):
            label_array, name = read_scalars(label_file, True, True)
        if area_file and os.path.exists(area_file):
            area_array, name = read_scalars(area_file, True, True)

        #-----------------------------------------------------------------
        # Loop through shape measures:
        #-----------------------------------------------------------------
        table_column_names.extend(column_names[:])
        print('  Compute statistics on {0} {1}'.
             format(label_name, shape_name))
    
        #-------------------------------------------------------------