예제 #1
0
    def centroid_similarity(self, inputs, targets, shape):
        """
        Metric for similarity to centroid. Computes a similarity based on
        inverse of normaised euclidian distance for every centroid neighbor.
        Neighbors are determined by a structing element (cell-ish dims)

        The output should be between 0-1 (therefore easily invertable)
        MAKE SURE THIS IS TRUE!!!

        Notes
        -----
        Currently uses a structing element to find centroid neighbors.
        Hoping to add an option for using segmentation to get neighbors. 

        """
        offsets = _offsets_to_raveled_neighbors(shape, self.selem, self.centre)

        euclid_dists = self.euclidian_distances()
        weights = euclid_dists - 1
        centroids = np.argwhere(np_targets == 1.)
        score = 0
        for c in centroids:
            max_ind = np.inputs.shape[-1]
            raveled_indices = c + offsets
            in_bounds_indices = np.array([idx for idx in raveled_indices \
                                            if idx >= 0 and idx < max_ind])
            neighbors = np_inputs[in_bounds_indices]
            weighted = neighbors * weights
            score += weighted.mean()
        return mean
예제 #2
0
def _indices_to_raveled_affinities(image_shape, selem, centre):
    im_offsets = _offsets_to_raveled_neighbors(image_shape, selem, centre)
    #im_offsets[-len(image_shape):] = 0
    affs = np.concatenate(
        [np.arange(len(image_shape)),
         np.arange(len(image_shape))[::-1]])
    indices = np.stack([affs, im_offsets], axis=1)
    return indices
예제 #3
0
파일: test_util.py 프로젝트: fmg30/diss
def test_offsets_to_raveled_neighbors_explicit_0():
    """Check reviewed example."""
    image_shape = (100, 200, 3)
    selem = np.ones((3, 3, 3), dtype=bool)
    center = (1, 1, 1)
    offsets = _util._offsets_to_raveled_neighbors(image_shape, selem, center)

    desired = np.array([
        3, -600, 1, -1, 600, -3, 4, 2, 603, -2, -4, -597, 601, -599, -601,
        -603, 599, 597, 602, -604, 596, -596, -598, -602, 598, 604
    ])
    assert_array_equal(offsets, desired)
예제 #4
0
def test_offsets_to_raveled_neighbors_highest_connectivity(image_shape, order):
    """
    Check scenarios where footprint is always of the highest connectivity
    and all dimensions are > 2.
    """
    footprint = np.ones((3, ) * len(image_shape), dtype=bool)
    center = (1, ) * len(image_shape)
    offsets = _util._offsets_to_raveled_neighbors(image_shape, footprint,
                                                  center, order)

    # Assert only neighbors are present, center was removed
    assert len(offsets) == footprint.sum() - 1
    assert 0 not in offsets
    # Assert uniqueness
    assert len(set(offsets)) == offsets.size
    # offsets form pairs of with same value but different signs
    # if footprint is symmetric around center
    assert all(-x in offsets for x in offsets)

    # Construct image whose values are the Manhattan distance to its center
    image_center = tuple(s // 2 for s in image_shape)
    coords = [
        np.abs(np.arange(s, dtype=np.intp) - c)
        for s, c in zip(image_shape, image_center)
    ]
    grid = np.meshgrid(*coords, indexing="ij")
    image = np.sum(grid, axis=0)

    image_raveled = image.ravel(order)
    image_center_raveled = np.ravel_multi_index(image_center,
                                                image_shape,
                                                order=order)

    # Sample raveled image around its center
    samples = []
    for offset in offsets:
        index = image_center_raveled + offset
        samples.append(image_raveled[index])

    # Assert that center with value 0 wasn't selected
    assert np.min(samples) == 1
    # Assert that only neighbors where selected
    # (highest value == connectivity)
    assert np.max(samples) == len(image_shape)
    # Assert that nearest neighbors are selected first
    assert list(sorted(samples)) == samples
예제 #5
0
파일: test_util.py 프로젝트: fmg30/diss
def test_offsets_to_raveled_neighbors_explicit_1():
    """Check reviewed example where selem is larger in last dimension."""
    image_shape = (10, 9, 8, 3)
    selem = np.ones((3, 3, 3, 4), dtype=bool)
    center = (1, 1, 1, 1)
    offsets = _util._offsets_to_raveled_neighbors(image_shape, selem, center)

    desired = np.array([
        24, 3, 1, -1, -3, -24, -216, 216, -192, 215, -2, -21, -23, 2, -25, -27,
        4, 217, 21, 219, -4, 23, 25, -240, 240, 192, 27, -213, -219, 213, -215,
        -217, -243, 191, -241, 195, 189, 212, 26, 5, 20, 28, 22, 214, 243,
        -237, -22, 241, -214, -212, 237, -218, -195, -20, 220, -193, -191, 218,
        -189, -28, -26, 193, -239, -220, 239, 196, 221, 242, 236, 238, 194,
        -244, -188, -238, -211, -196, -194, -190, -236, -19, 244, 29, 188,
        -242, 190, -187, 197, -235, 245
    ])
    assert_array_equal(offsets, desired)
예제 #6
0
파일: test_util.py 프로젝트: fmg30/diss
def test_offsets_to_raveled_neighbors_selem_smaller_image(image_shape, order):
    """
    Test if a dimension indicated by `image_shape` is smaller than in
    `selem`.
    """
    selem = np.ones((3, ) * len(image_shape), dtype=bool)
    center = (1, ) * len(image_shape)
    offsets = _util._offsets_to_raveled_neighbors(image_shape, selem, center,
                                                  order)

    # Assert only neighbors are present, center and duplicates (possible
    # for this scenario) where removed
    assert len(offsets) <= selem.sum() - 1
    assert 0 not in offsets
    # Assert uniqueness
    assert len(set(offsets)) == offsets.size
    # offsets form pairs of with same value but different signs
    # if selem is symmetric around center
    assert all(-x in offsets for x in offsets)
예제 #7
0
def _prep_data(image, marker_coords, mask=None, affinities=False, output=None):
    # INTENSITY VALUES
    if affinities:
        im_ndim = image.ndim - 1  # the first dim should represent affinities
        image_shape = image.shape[1:]
        image_strides = image[0].strides
        image_itemsize = image[0].itemsize
        raveled_image = np.zeros((image.shape[0], image[0].size),
                                 dtype=image.dtype)
        for i in range(image.shape[0]):
            raveled_image[i] = image[i].ravel()
    else:
        im_ndim = image.ndim
        image_shape = image.shape
        image_strides = image.strides
        image_itemsize = image.itemsize
        raveled_image = image.ravel()
    # NEIGHBORS
    selem, centre = _validate_connectivity(im_ndim, 1, None)
    if affinities:
        # array of shape (ndim * 2, 2) giving the indicies of neighbor affinities
        offsets = _indices_to_raveled_affinities(image_shape, selem, centre)
    else:
        offsets = _offsets_to_raveled_neighbors(image_shape, selem, centre)
    raveled_markers = np.apply_along_axis(_raveled_coordinate, 1,
                                          marker_coords,
                                          **{'shape': image_shape})
    if mask is None:
        small_shape = [s - 2 for s in image_shape]
        mask = np.ones(small_shape, dtype=bool)
        mask = np.pad(mask, 1, constant_values=0)
        assert image_shape == mask.shape
    mask_raveled = mask.ravel()
    if output is None:
        output = np.zeros(mask_raveled.shape, dtype=raveled_image.dtype)
        labels = np.arange(len(raveled_markers)) + 1
        output[raveled_markers] = labels
    strides = np.array(image_strides, dtype=np.intp) // image_itemsize
    return raveled_image, raveled_markers, offsets, mask_raveled, output, strides
예제 #8
0
def watershed(image,
              markers=None,
              connectivity=1,
              offset=None,
              mask=None,
              compactness=0,
              watershed_line=False,
              method=0):
    """Find watershed basins in `image` flooded from given `markers`.
    Parameters
    ----------
    image : ndarray (2-D, 3-D, ...) of integers
        Data array where the lowest value points are labeled first.
    markers : int, or ndarray of int, same shape as `image`, optional
        The desired number of markers, or an array marking the basins with the
        values to be assigned in the label matrix. Zero means not a marker. If
        ``None`` (no markers given), the local minima of the image are used as
        markers.
    connectivity : ndarray, optional
        An array with the same number of dimensions as `image` whose
        non-zero elements indicate neighbors for connection.
        Following the scipy convention, default is a one-connected array of
        the dimension of the image.
    offset : array_like of shape image.ndim, optional
        offset of the connectivity (one offset per dimension)
    mask : ndarray of bools or 0s and 1s, optional
        Array of same shape as `image`. Only points at which mask == True
        will be labeled.
    compactness : float, optional
        Use compact watershed [3]_ with given compactness parameter.
        Higher values result in more regularly-shaped watershed basins.
    watershed_line : bool, optional
        If watershed_line is True, a one-pixel wide line separates the regions
        obtained by the watershed algorithm. The line has the label 0.
    Returns
    -------
    out : ndarray
        A labeled matrix of the same type and shape as markers
    See also
    --------
    skimage.segmentation.random_walker: random walker segmentation
        A segmentation algorithm based on anisotropic diffusion, usually
        slower than the watershed but with good results on noisy data and
        boundaries with holes.
    Notes
    -----
    This function implements a watershed algorithm [1]_ [2]_ that apportions
    pixels into marked basins. The algorithm uses a priority queue to hold
    the pixels with the metric for the priority queue being pixel value, then
    the time of entry into the queue - this settles ties in favor of the
    closest marker.
    Some ideas taken from
    Soille, "Automated Basin Delineation from Digital Elevation Models Using
    Mathematical Morphology", Signal Processing 20 (1990) 171-182
    The most important insight in the paper is that entry time onto the queue
    solves two problems: a pixel should be assigned to the neighbor with the
    largest gradient or, if there is no gradient, pixels on a plateau should
    be split between markers on opposite sides.
    This implementation converts all arguments to specific, lowest common
    denominator types, then passes these to a C algorithm.
    Markers can be determined manually, or automatically using for example
    the local minima of the gradient of the image, or the local maxima of the
    distance function to the background for separating overlapping objects
    (see example).
    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29
    .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html
    .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and
           Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation
           Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`
           https://www.tu-chemnitz.de/etit/proaut/forschung/rsrc/cws_pSLIC_ICPR.pdf
    Examples
    --------
    The watershed algorithm is useful to separate overlapping objects.
    We first generate an initial image with two overlapping circles:
    >>> x, y = np.indices((80, 80))
    >>> x1, y1, x2, y2 = 28, 28, 44, 52
    >>> r1, r2 = 16, 20
    >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
    >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
    >>> image = np.logical_or(mask_circle1, mask_circle2)
    Next, we want to separate the two circles. We generate markers at the
    maxima of the distance to the background:
    >>> from scipy import ndimage as ndi
    >>> distance = ndi.distance_transform_edt(image)
    >>> from skimage.feature import peak_local_max
    >>> local_maxi = peak_local_max(distance, labels=image,
    ...                             footprint=np.ones((3, 3)),
    ...                             indices=False)
    >>> markers = ndi.label(local_maxi)[0]
    Finally, we run the watershed on the image and markers:
    >>> labels = watershed(-distance, markers, mask=image)
    The algorithm works also for 3-D images, and can be used for example to
    separate overlapping spheres.
    """
    image, markers, mask = _validate_inputs(image, markers, mask, connectivity)
    connectivity, offset = _validate_connectivity(image.ndim - 1, connectivity,
                                                  offset)

    # pad the image, markers, and mask so that we can use the mask to
    # keep from running off the edges
    pad_width = [(p, p) for p in offset]
    image0 = np.pad(image[..., 0], pad_width, mode='constant')
    image1 = np.pad(image[..., 1], pad_width, mode='constant')
    image2 = np.pad(image[..., 2], pad_width, mode='constant')
    mask = np.pad(mask, pad_width, mode='constant').ravel()
    output = np.pad(markers, pad_width, mode='constant')
    distance = np.zeros_like(output, dtype=float)
    #distance = distance.astype(float)

    flat_neighborhood = _offsets_to_raveled_neighbors(image0.shape,
                                                      connectivity,
                                                      center=offset)
    marker_locations = np.flatnonzero(output)
    image_strides = np.array(image0.strides, dtype=np.intp) // image.itemsize

    _watershed_cy.watershed_raveled(image0.ravel(), image1.ravel(),
                                    image2.ravel(), marker_locations,
                                    flat_neighborhood, mask, image_strides,
                                    compactness, output.ravel(),
                                    distance.ravel(), watershed_line, method)

    output = crop(output, pad_width, copy=True)
    distance = crop(distance, pad_width, copy=True)
    #print('Using method {}'.format(method))
    return output, distance