コード例 #1
0
def compute_fundus_distances(label_boundary_fundi, fundi, folds, points, n_fundi):
    """
    Create a fundus distance matrix.

    Compute the minimum distance from each label boundary vertex corresponding
    to a fundus in the Desikan-Killiany-Tourville cortical labeling protocol
    to all of the fundus vertices in the same fold.

    Parameters
    ----------
    label_boundary_fundi : list of integers
        indices to fundi in sulcus protocol
    fundi : list of integers
        indices of vertices
    folds : list of integers
        indices to vertices
    points : list of lists of three floats
        coordinates
    n_fundi : int
        number of fundi

    Returns
    -------
    distances : numpy array
        distance value for each vertex (zero where there is no vertex)
    distance_matrix : numpy array
        rows are for vertices and columns for fundi (-1 default value)

    """
    import numpy as np
    from mindboggle.utils.compute import point_distance

    npoints = len(points)

    distances = np.zeros(npoints)
    distance_matrix = -1 * np.ones((npoints, n_fundi))

    sum_distances = 0
    num_distances = 0

    # For each label boundary fundus point
    for i_label_point, fundus_ID in enumerate(label_boundary_fundi):
        if fundus_ID > 0:

            # Find (indices of) fundus points in the same fold
            I_fundus_points = [i for i,x in enumerate(fundi)
                               if x > 0
                               if folds[i] == folds[i_label_point]]

            # Find the closest fundus point to the label boundary fundus point
            d, i = point_distance(points[i_label_point],
                                          points[I_fundus_points])
            distances[i_label_point] = d
            distance_matrix[i_label_point, fundus_ID - 1] = d

            sum_distances += d
            num_distances += 1

            if i_label_point % 1000 == 0 and num_distances > 0:
                percent_done = 100.0 * float(i_label_point) / float(npoints)
                mean_distance = sum_distances / num_distances
                print('Done: {0} pct, mean dist {1}, {2}'.format(
                      percent_done, mean_distance, i_label_point))

    mean_distance = sum_distances / num_distances
    print('Done: 100 pct, mean dist {0}'.format(mean_distance))

    return distances, distance_matrix
コード例 #2
0
def watershed(depths, points, indices, neighbor_lists, min_size=1,
              depth_factor=0.25, depth_ratio=0.1, tolerance=0.01, regrow=True):
    """
    Segment vertices of a surface mesh into contiguous "watershed basins"
    by seed growing from an iterative selection of the deepest vertices.

    Steps ::

        1. Grow segments from an iterative selection of the deepest seeds.
        2. Regrow segments from the resulting seeds, until each seed's
            segment touches a boundary.
        3. Use the segment() function to fill in the rest.
        4. Merge segments if their seeds are too close to each other
            or their depths are very different.

    Note ::

        Despite the above precautions, the order of seed selection in segment()
        could possibly influence the resulting borders between adjoining
        segments (vs. propagate(), which is slower and insensitive to depth,
        but is not biased by seed order).

    Parameters
    ----------
    depths : numpy array of floats
        depth values for all vertices (default -1)
    points : list of lists of floats
        each element is a list of 3-D coordinates of a vertex on a surface mesh
    indices : list of integers
        indices to mesh vertices to be segmented
    min_size : index
        the minimum number of vertices in a basin
    neighbor_lists : list of lists of integers
        each list contains indices to neighboring vertices for each vertex
    depth_factor : float
        factor to determine whether to merge two neighboring watershed catchment
        basins -- they are merged if the Euclidean distance between their basin
        seeds is less than this fraction of the maximum Euclidean distance
        between points having minimum and maximum depths
    depth_ratio : float
        the minimum fraction of depth for a neighboring shallower
        watershed catchment basin (otherwise merged with the deeper basin)
    tolerance : float
        tolerance for detecting differences in depth between vertices
    regrow : Boolean
        regrow segments from watershed seeds?

    Returns
    -------
    segments : list of integers
        region numbers for all vertices (default -1)
    seed_indices : list of integers
        list of indices to seed vertices

    Examples
    --------
    >>> # Perform watershed segmentation on the deeper portions of a surface:
    >>> import os
    >>> import numpy as np
    >>> from mindboggle.utils.mesh import find_neighbors
    >>> from mindboggle.utils.plots import plot_vtk
    >>> from mindboggle.utils.segment import watershed, segment
    >>> from mindboggle.utils.io_vtk import read_vtk, read_scalars, rewrite_scalars
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>> faces, lines, indices, points, npoints, depths, name, input_vtk = read_vtk(depth_file,
    >>>     return_first=True, return_array=True)
    >>> indices = np.where(depths > 0.01)[0]  # high to speed up
    >>> neighbor_lists = find_neighbors(faces, npoints)
    >>> min_size = 50
    >>> depth_factor = 0.25
    >>> depth_ratio = 0.1
    >>> tolerance = 0.01
    >>> regrow = True
    >>> #
    >>> segments, seed_indices = watershed(depths, points,
    >>>     indices, neighbor_lists, min_size, depth_factor, depth_ratio,
    >>>     tolerance, regrow)
    >>> #
    >>> # Write results to vtk file and view:
    >>> rewrite_scalars(depth_file, 'watershed.vtk',
    >>>                 segments, 'segments', segments)
    >>> plot_vtk('watershed.vtk')
    >>> # View watershed seeds:
    >>> seeds = -1 * np.ones(len(depths))
    >>> for i, s in enumerate(seed_indices):
    >>>     seeds[s] = i
    >>> rewrite_scalars(depth_file, 'watershed_seeds.vtk',
    >>>                 seeds, 'seeds', seeds)
    >>> plot_vtk('watershed_seeds.vtk')

    """
    import numpy as np
    from time import time
    from mindboggle.labels.labels import extract_borders
    from mindboggle.utils.segment import segment
    from mindboggle.utils.compute import point_distance

    # Make sure argument is a list
    if isinstance(indices, np.ndarray):
        indices.tolist()

    print('Segment {0} vertices by a surface watershed algorithm'.
          format(len(indices)))
    verbose = False
    merge = True
    t0 = time()
    tiny = 0.000001

    use_depth_ratio = True

    #-------------------------------------------------------------------------
    # Find the borders of the given mesh vertices (indices):
    #-------------------------------------------------------------------------
    D = np.ones(len(depths))
    D[indices] = 2
    borders, foo1, foo2 = extract_borders(range(len(depths)), D,
        neighbor_lists, ignore_values=[], return_label_pairs=False)

    #-------------------------------------------------------------------------
    # Select deepest vertex as initial seed:
    #-------------------------------------------------------------------------
    index_deepest = indices[np.argmax(depths[indices])]
    seed_list = [index_deepest]
    basin_depths = []
    original_indices = indices[:]

    #-------------------------------------------------------------------------
    # Loop until all vertices have been segmented.
    # This limits the number of possible seeds:
    #-------------------------------------------------------------------------
    segments = -1 * np.ones(len(depths))
    seed_indices = []
    seed_points = []
    all_regions = []
    region = []
    counter = 0
    terminate = False
    while not terminate:

        # Add seeds to region:
        region.extend(seed_list)
        all_regions.extend(seed_list)

        # Remove seeds from vertices to segment:
        indices = list(frozenset(indices).difference(seed_list))
        if indices:

            # Identify neighbors of seeds:
            neighbors = []
            [neighbors.extend(neighbor_lists[x]) for x in seed_list]

            # Select neighbors that have not been previously selected
            # and are among the vertices to segment:
            old_seed_list = seed_list[:]
            seed_list = list(frozenset(neighbors).intersection(indices))
            seed_list = list(frozenset(seed_list).difference(all_regions))

            # For each vertex, select neighbors that are shallower:
            seed_neighbors = []
            for seed in old_seed_list:
                seed_neighbors.extend([x for x in neighbor_lists[seed]
                                       if depths[x] - tolerance <= depths[seed]])
            seed_list = list(frozenset(seed_list).intersection(seed_neighbors))

        else:
            seed_list = []

        # If there are no seeds remaining:
        if not len(seed_list):

            # If there is at least min_size points, assign counter to
            # segmented region, store index, and increment counter:
            if len(region) >= min_size:
                segments[region] = counter
                seed_indices.append(index_deepest)
                seed_points.append(points[index_deepest])
                counter += 1

                # Compute basin depth (max - min):
                Imax = region[np.argmax(depths[region])]
                Imin = region[np.argmin(depths[region])]
                max_depth = point_distance(points[Imax], [points[Imin]])[0]
                basin_depths.append(max_depth)

            # If vertices left to segment, re-initialize parameters:
            if indices:

                # Initialize new region/basin:
                region = []

                # Select deepest unsegmented vertex as new seed
                # if its rescaled depth is close to 1:
                index_deepest = indices[np.argmax(depths[indices])]
                seed_list = [index_deepest]

            # Termination criteria:
            if not len(indices):
                terminate = True

            # Display current number and size of region:
            if verbose:
                print("    {0} vertices remain".format(len(indices)))

    print('  ...Segmented {0} initial watershed regions ({1:.2f} seconds)'.
          format(counter, time() - t0))

    #-------------------------------------------------------------------------
    # Regrow from (deep) watershed seeds, stopping at borders:
    #-------------------------------------------------------------------------
    if regrow:

        print('  Regrow segments from watershed seeds, stopping at borders')
        indices = original_indices[:]
        segments = -1 * np.ones(len(depths))
        all_regions = []
        for iseed, seed_index in enumerate(seed_indices):
            seed_list = [seed_index]
            region = []
            terminate = False
            while not terminate:

                # Add seeds to region:
                region.extend(seed_list)
                all_regions.extend(seed_list)

                # Remove seeds from vertices to segment:
                indices = list(frozenset(indices).difference(seed_list))
                if indices:

                    # Identify neighbors of seeds:
                    neighbors = []
                    [neighbors.extend(neighbor_lists[x]) for x in seed_list]

                    # Select neighbors that have not been previously selected
                    # and are among the vertices to segment:
                    old_seed_list = seed_list[:]
                    seed_list = list(frozenset(neighbors).intersection(indices))
                    seed_list = list(frozenset(seed_list).difference(all_regions))

                    # For each vertex, select neighbors that are shallower:
                    seed_neighbors = []
                    for seed in old_seed_list:
                        seed_neighbors.extend([x for x in neighbor_lists[seed]
                            if depths[x] - tolerance <= depths[seed]])
                    seed_list = list(frozenset(seed_list).intersection(seed_neighbors))

                    # Remove seed list if it contains a border vertex:
                    if seed_list:
                        if list(frozenset(seed_list).intersection(borders)):
                            seed_list = []
                else:
                    seed_list = []

                # Terminate growth for this seed if the seed_list is empty:
                if not len(seed_list):
                    terminate = True

                    # If there is at least min_size points, store index:
                    if len(region) >= min_size:
                        segments[region] = iseed

                    # Display current number and size of region:
                    if verbose:
                        print("    {0} vertices remain".format(len(indices)))

        #---------------------------------------------------------------------
        # Continue growth until there are no more vertices to segment:
        #---------------------------------------------------------------------
        # Note: As long as keep_seeding=False, the segment values in `segments`
        # are equal to the order of the `basin_depths` and `seed_points` below.
        seed_lists = [[i for i,x in enumerate(segments) if x==s]
                      for s in np.unique(segments) if s!=-1]
        segments = segment(indices, neighbor_lists, min_region_size=1,
            seed_lists=seed_lists, keep_seeding=False, spread_within_labels=False,
            labels=[], label_lists=[], values=[], max_steps='', verbose=False)

        print('  ...Regrew {0} watershed regions from seeds ({1:.2f} seconds)'.
              format(iseed+1, time() - t0))

    #-------------------------------------------------------------------------
    # Merge watershed catchment basins:
    #-------------------------------------------------------------------------
    if merge:

        # Extract segments pairs at borders between watershed basins:
        print('  Merge watershed catchment basins with deeper neighboring basins')
        if verbose:
            print('    Extract basin borders')
        foo1, foo2, pairs = extract_borders(original_indices, segments,
                                            neighbor_lists, ignore_values=[-1],
                                            return_label_pairs=True)
        # Sort basin depths (descending order) -- return segment indices:
        Isort = np.argsort(basin_depths).tolist()
        Isort.reverse()

        # Find neighboring basins to each of the sorted basins:
        if verbose:
            print("    Find neighboring basins")
        basin_pairs = []
        for index in Isort:
            index_neighbors = [int(list(frozenset(x).difference([index]))[0])
                               for x in pairs if index in x]
            if index_neighbors:

                # Store neighbors whose depth is less than a fraction of the
                # basin's depth and farther away than half the basin's depth:
                if use_depth_ratio:
                    index_neighbors = [[x, index] for x in index_neighbors
                        if basin_depths[x] / (basin_depths[index]+tiny) < depth_ratio
                        if point_distance(seed_points[x], [seed_points[index]])[0] >
                          depth_factor * max([basin_depths[x], basin_depths[index]])]
                # Store neighbors farther away than half the basin's depth:
                else:
                    index_neighbors = [[x, index] for x in index_neighbors
                        if point_distance(seed_points[x], [seed_points[index]])[0] >
                        depth_factor * max([basin_depths[x], basin_depths[index]])]
                if index_neighbors:
                    basin_pairs.extend(index_neighbors)

        # Merge shallow watershed catchment basins:
        if basin_pairs:
            if verbose:
                print('    Merge basins with deeper neighboring basins')
            for basin_pair in basin_pairs:
                segments[np.where(segments == basin_pair[0])] = basin_pair[1]

        # Renumber segments so they are sequential:
        renumber_segments = segments.copy()
        segment_numbers = [int(x) for x in np.unique(segments) if x != -1]
        for i_segment, n_segment in enumerate(segment_numbers):
            segment = [i for i,x in enumerate(segments) if x == n_segment]
            renumber_segments[segment] = i_segment
        segments = renumber_segments

        # Print statement:
        print('  ...Merged segments to form {0} watershed regions ({1:.2f} seconds)'.
              format(i_segment + 1, time() - t0))

    return segments.tolist(), seed_indices
コード例 #3
0
ファイル: compute.py プロジェクト: jsalva/mindboggle
def source_to_target_distances(sourceIDs,
                               targetIDs,
                               points,
                               segmentIDs=[],
                               excludeIDs=[-1]):
    """
    Create a Euclidean distance matrix between source and target points.

    Compute the Euclidean distance from each source point to
    its nearest target point, optionally within each segment.

    Example::

        Compute fundus-to-feature distances, the minimum distance
        from each label boundary vertex (corresponding to a fundus
        in the DKT cortical labeling protocol) to all of the
        feature vertices in the same fold.

    Parameters
    ----------
    sourceIDs : list of N integers (N is the number of vertices)
        source IDs, where any ID not in excludeIDs is a source point
    targetIDs : list of N integers (N is the number of vertices)
        target IDs, where any ID not in excludeIDs is a target point
    points : list of N lists of three floats (N is the number of vertices)
        coordinates of all vertices
    segmentIDs : list of N integers (N is the number of vertices)
        segment IDs, where each ID not in excludeIDs is considered a
        different segment (unlike above, where value in sourceIDs or
        targetIDs doesn't matter, so long as its not in excludeIDs);
        source/target distances are computed within each segment
    excludeIDs : list of integers
        IDs to exclude

    Returns
    -------
    distances : numpy array
        distance value for each vertex (default -1)
    distance_matrix : numpy array [#points by maximum segment ID + 1]
        distances organized by segments (columns)

    """
    import numpy as np
    from mindboggle.utils.compute import point_distance

    if isinstance(points, list):
        points = np.asarray(points)
    npoints = len(points)

    # Extract unique segment IDs (or use all points as a single segment):
    if np.size(segmentIDs):
        segments = [x for x in np.unique(segmentIDs) if x not in excludeIDs]
    else:
        segmentIDs = np.zeros(npoints)
        segments = [0]
    nsegments = max(segments) + 1

    # Initialize outputs:
    distances = -1 * np.ones(npoints)
    distance_matrix = -1 * np.ones((npoints, nsegments))

    # For each segment:
    for segment in segments:
        segment_indices = [i for i, x in enumerate(segmentIDs) if x == segment]

        # Find all source points in the segment:
        source_indices = [
            i for i, x in enumerate(sourceIDs) if x not in excludeIDs
            if i in segment_indices
        ]
        # Find all target points in the segment:
        target_indices = [
            i for i, x in enumerate(targetIDs) if x not in excludeIDs
            if i in segment_indices
        ]

        if source_indices and target_indices:

            # For each source point in the segment:
            for isource in source_indices:

                # Find the closest target point:
                d, i = point_distance(points[isource], points[target_indices])
                distances[isource] = d
                distance_matrix[isource, segment] = d

    return distances, distance_matrix
コード例 #4
0
ファイル: compute.py プロジェクト: ccraddock/mindboggle
def source_to_target_distances(sourceIDs, targetIDs, points,
                               segmentIDs=[], excludeIDs=[-1]):
    """
    Create a Euclidean distance matrix between source and target points.

    Compute the Euclidean distance from each source point to
    its nearest target point, optionally within each segment.

    Example::

        Compute fundus-to-feature distances, the minimum distance
        from each label boundary vertex (corresponding to a fundus
        in the DKT cortical labeling protocol) to all of the
        feature vertices in the same fold.

    Parameters
    ----------
    sourceIDs : list of N integers (N is the number of vertices)
        source IDs, where any ID not in excludeIDs is a source point
    targetIDs : list of N integers (N is the number of vertices)
        target IDs, where any ID not in excludeIDs is a target point
    points : list of N lists of three floats (N is the number of vertices)
        coordinates of all vertices
    segmentIDs : list of N integers (N is the number of vertices)
        segment IDs, where each ID not in excludeIDs is considered a
        different segment (unlike above, where value in sourceIDs or
        targetIDs doesn't matter, so long as its not in excludeIDs);
        source/target distances are computed within each segment
    excludeIDs : list of integers
        IDs to exclude

    Returns
    -------
    distances : numpy array
        distance value for each vertex (default -1)
    distance_matrix : numpy array [#points by maximum segment ID + 1]
        distances organized by segments (columns)

    """
    import numpy as np
    from mindboggle.utils.compute import point_distance

    if isinstance(points, list):
        points = np.asarray(points)
    npoints = len(points)

    # Extract unique segment IDs (or use all points as a single segment):
    if np.size(segmentIDs):
        segments = [x for x in np.unique(segmentIDs) if x not in excludeIDs]
    else:
        segmentIDs = np.zeros(npoints)
        segments = [0]
    nsegments = max(segments) + 1

    # Initialize outputs:
    distances = -1 * np.ones(npoints)
    distance_matrix = -1 * np.ones((npoints, nsegments))

    # For each segment:
    for segment in segments:
        segment_indices = [i for i,x in enumerate(segmentIDs)
                           if x == segment]

        # Find all source points in the segment:
        source_indices = [i for i,x in enumerate(sourceIDs)
                          if x not in excludeIDs
                          if i in segment_indices]
        # Find all target points in the segment:
        target_indices = [i for i,x in enumerate(targetIDs)
                          if x not in excludeIDs
                          if i in segment_indices]
        if source_indices and target_indices:

            # For each source point in the segment:
            for isource in source_indices:

                # Find the closest target point:
                d, i = point_distance(points[isource],
                                      points[target_indices])
                distances[isource] = d
                distance_matrix[isource, segment] = d

    return distances, distance_matrix
コード例 #5
0
gene_mni = G['mni']
path = os.environ['MINDBOGGLE_DATA']
genes = []
gene_values = []
for i in range(25):
    print(i)
    input_vtk = os.path.join(path, 'allen', 'labels_traveldepth' + str(i) + '.vtk')
    if os.path.exists(input_vtk):
        faces, lines, indices, points, npoints, depths, name, input_vtk = read_vtk(input_vtk)

        I = [i for i,x in enumerate(depths) if x>-1]
        gene = 0
        value = 0
        print(len(I))
        points2 = np.array(points)
        points2 = points2[I]
        for point in points2:
            mind, minI = point_distance(point, gene_mni)
            if np.max(values[minI]) > value:
                gene = minI
                value = np.max(values[minI])

        genes.append(gene)
        gene_values.append(value)

    else:
        genes.append(0)
        gene_values.append(0)