Ejemplo n.º 1
0
def _texture_filter(gaussian_filtered):
    combos = combinations_with_replacement
    H_elems = [
        cp.gradient(cp.gradient(gaussian_filtered)[ax0], axis=ax1)
        for ax0, ax1 in combos(range(gaussian_filtered.ndim), 2)
    ]
    eigvals = feature.hessian_matrix_eigvals(H_elems)
    return eigvals
Ejemplo n.º 2
0
    def cost_func(self, I1, I2, Tx):
        gradTx = cp.gradient(Tx, axis=0)

        I1mod = (1 - gradTx) * self.movepixels_3d(I1, -Tx)
        I2mod = (1 + gradTx) * self.movepixels_3d(I2, Tx)

        return cp.sum(cp.abs(I1mod - I2mod)**2)
Ejemplo n.º 3
0
    def gradstep(self, I1, I2, Tx, Btotal, Bgrad):
        gradTx = cp.gradient(Tx, axis=0)

        I1mod = self.movepixels_3d(I1, -Tx)
        I2mod = self.movepixels_3d(I2, Tx)

        I1grad = self.imagegrad(I1, -Tx)
        I2grad = self.imagegrad(I2, Tx)

        imagediff = (1 - gradTx) * I1mod - (1 + gradTx) * I2mod

        im1 = imagediff * (I1grad * (1 - gradTx))
        im2 = imagediff * I1mod
        im3 = imagediff * (I2grad * (1 + gradTx))
        im4 = imagediff * I2mod

        #Btotal and Bgrad are entered as sparse arrays
        df1 = Btotal.dot(cp.reshape(im1 + im3, (im1.size, 1), order='F'))
        df2 = Bgrad.dot(cp.reshape(im2 + im4, (im2.size, 1), order='F'))

        return -cp.reshape((df1 + df2), (df1.size, 1), order='F')
Ejemplo n.º 4
0
def _ilk(reference_image, moving_image, flow0, radius, num_warp, gaussian,
         prefilter):
    """Iterative Lucas-Kanade (iLK) solver for optical flow estimation.

    Parameters
    ----------
    reference_image : ndarray, shape (M, N[, P[, ...]])
        The first gray scale image of the sequence.
    moving_image : ndarray, shape (M, N[, P[, ...]])
        The second gray scale image of the sequence.
    flow0 : ndarray, shape (reference_image.ndim, M, N[, P[, ...]])
        Initialization for the vector field.
    radius : int
        Radius of the window considered around each pixel.
    num_warp : int
        Number of times moving_image is warped.
    gaussian : bool
        if True, a gaussian kernel is used for the local
        integration. Otherwise, a uniform kernel is used.
    prefilter : bool
        Whether to prefilter the estimated optical flow before each
        image warp. This helps to remove potential outliers.

    Returns
    -------
    flow : ndarray, shape ((reference_image.ndim, M, N[, P[, ...]])
        The estimated optical flow components for each axis.

    """
    dtype = reference_image.dtype
    ndim = reference_image.ndim
    size = 2 * radius + 1

    if gaussian:
        sigma = ndim * (size / 4, )
        filter_func = partial(ndi.gaussian_filter, sigma=sigma, mode="mirror")
    else:
        filter_func = partial(ndi.uniform_filter,
                              size=ndim * (size, ),
                              mode="mirror")

    flow = flow0
    # For each pixel location (i, j), the optical flow X = flow[:, i, j]
    # is the solution of the ndim x ndim linear system
    # A[i, j] * X = b[i, j]
    A = cp.zeros(reference_image.shape + (ndim, ndim), dtype=dtype)
    b = cp.zeros(reference_image.shape + (ndim, ), dtype=dtype)

    grid = cp.meshgrid(
        *[cp.arange(n, dtype=dtype) for n in reference_image.shape],
        indexing="ij",
        sparse=True,
    )

    for _ in range(num_warp):
        if prefilter:
            flow = ndi.filters.median_filter(flow, (1, ) + ndim * (3, ))

        moving_image_warp = warp(moving_image,
                                 get_warp_points(grid, flow),
                                 mode="nearest")
        grad = cp.stack(cp.gradient(moving_image_warp), axis=0)
        error_image = ((grad * flow).sum(axis=0) + reference_image -
                       moving_image_warp)

        # Local linear systems creation
        for i, j in combinations_with_replacement(range(ndim), 2):
            A[..., i, j] = A[..., j, i] = filter_func(grad[i] * grad[j])

        for i in range(ndim):
            b[..., i] = filter_func(grad[i] * error_image)

        # Don't consider badly conditioned linear systems
        idx = abs(cp.linalg.det(A)) < 1e-14
        A[idx] = cp.eye(ndim, dtype=dtype)
        b[idx] = 0

        # Solve the local linear systems
        flow = cp.moveaxis(cp.linalg.solve(A, b), ndim, 0)

    return flow
Ejemplo n.º 5
0
def pressure_grad(pressure):
    return np.stack(np.gradient(pressure, edge_order=2), -1)
Ejemplo n.º 6
0
 def imagegrad(self, Iin, Tx):
     gradX = cp.gradient(Iin, axis=0)
     return self.movepixels_3d(gradX, Tx)
Ejemplo n.º 7
0
def morphological_geodesic_active_contour(gimage,
                                          iterations,
                                          init_level_set='circle',
                                          smoothing=1,
                                          threshold='auto',
                                          balloon=0,
                                          iter_callback=lambda x: None):
    """Morphological Geodesic Active Contours (MorphGAC).

    Geodesic active contours implemented with morphological operators. It can
    be used to segment objects with visible but noisy, cluttered, broken
    borders.

    Parameters
    ----------
    gimage : (M, N) or (L, M, N) array
        Preprocessed image or volume to be segmented. This is very rarely the
        original image. Instead, this is usually a preprocessed version of the
        original image that enhances and highlights the borders (or other
        structures) of the object to segment.
        `morphological_geodesic_active_contour` will try to stop the contour
        evolution in areas where `gimage` is small. See
        `morphsnakes.inverse_gaussian_gradient` as an example function to
        perform this preprocessing. Note that the quality of
        `morphological_geodesic_active_contour` might greatly depend on this
        preprocessing.
    iterations : uint
        Number of iterations to run.
    init_level_set : str, (M, N) array, or (L, M, N) array
        Initial level set. If an array is given, it will be binarized and used
        as the initial level set. If a string is given, it defines the method
        to generate a reasonable initial level set with the shape of the
        `image`. Accepted values are 'checkerboard' and 'circle'. See the
        documentation of `checkerboard_level_set` and `circle_level_set`
        respectively for details about how these level sets are created.
    smoothing : uint, optional
        Number of times the smoothing operator is applied per iteration.
        Reasonable values are around 1-4. Larger values lead to smoother
        segmentations.
    threshold : float, optional
        Areas of the image with a value smaller than this threshold will be
        considered borders. The evolution of the contour will stop in this
        areas.
    balloon : float, optional
        Balloon force to guide the contour in non-informative areas of the
        image, i.e., areas where the gradient of the image is too small to push
        the contour towards a border. A negative value will shrink the contour,
        while a positive value will expand the contour in these areas. Setting
        this to zero will disable the balloon force.
    iter_callback : function, optional
        If given, this function is called once per iteration with the current
        level set as the only argument. This is useful for debugging or for
        plotting intermediate results during the evolution.

    Returns
    -------
    out : (M, N) or (L, M, N) array
        Final segmentation (i.e., the final level set)

    See Also
    --------
    inverse_gaussian_gradient, circle_level_set, checkerboard_level_set

    Notes
    -----

    This is a version of the Geodesic Active Contours (GAC) algorithm that uses
    morphological operators instead of solving partial differential equations
    (PDEs) for the evolution of the contour. The set of morphological operators
    used in this algorithm are proved to be infinitesimally equivalent to the
    GAC PDEs (see [1]_). However, morphological operators are do not suffer
    from the numerical stability issues typically found in PDEs (e.g., it is
    not necessary to find the right time step for the evolution), and are
    computationally faster.

    The algorithm and its theoretical derivation are described in [1]_.

    References
    ----------
    .. [1] A Morphological Approach to Curvature-based Evolution of Curves and
           Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
           Transactions on Pattern Analysis and Machine Intelligence (PAMI),
           2014, :DOI:`10.1109/TPAMI.2013.106`
    """

    image = gimage
    init_level_set = _init_level_set(init_level_set, image.shape)

    _check_input(image, init_level_set)

    if threshold == 'auto':
        threshold = cp.percentile(image, 40)

    structure = cp.ones((3, ) * len(image.shape), dtype=cp.int8)
    dimage = cp.gradient(image)
    # threshold_mask = image > threshold
    if balloon != 0:
        threshold_mask_balloon = image > threshold / cp.abs(balloon)

    u = (init_level_set > 0).astype(cp.int8)

    iter_callback(u)

    for _ in range(iterations):

        # Balloon
        if balloon > 0:
            aux = ndi.binary_dilation(u, structure)
        elif balloon < 0:
            aux = ndi.binary_erosion(u, structure)
        if balloon != 0:
            u[threshold_mask_balloon] = aux[threshold_mask_balloon]

        # Image attachment
        aux = cp.zeros_like(image)
        du = cp.gradient(u)
        for el1, el2 in zip(dimage, du):
            aux += el1 * el2
        u[aux > 0] = 1
        u[aux < 0] = 0

        # Smoothing
        for _ in range(smoothing):
            u = _curvop(u)

        iter_callback(u)

    return u
Ejemplo n.º 8
0
def morphological_chan_vese(image,
                            iterations,
                            init_level_set='checkerboard',
                            smoothing=1,
                            lambda1=1,
                            lambda2=1,
                            iter_callback=lambda x: None):
    """Morphological Active Contours without Edges (MorphACWE)

    Active contours without edges implemented with morphological operators. It
    can be used to segment objects in images and volumes without well defined
    borders. It is required that the inside of the object looks different on
    average than the outside (i.e., the inner area of the object should be
    darker or lighter than the outer area on average).

    Parameters
    ----------
    image : (M, N) or (L, M, N) array
        Grayscale image or volume to be segmented.
    iterations : uint
        Number of iterations to run
    init_level_set : str, (M, N) array, or (L, M, N) array
        Initial level set. If an array is given, it will be binarized and used
        as the initial level set. If a string is given, it defines the method
        to generate a reasonable initial level set with the shape of the
        `image`. Accepted values are 'checkerboard' and 'circle'. See the
        documentation of `checkerboard_level_set` and `circle_level_set`
        respectively for details about how these level sets are created.
    smoothing : uint, optional
        Number of times the smoothing operator is applied per iteration.
        Reasonable values are around 1-4. Larger values lead to smoother
        segmentations.
    lambda1 : float, optional
        Weight parameter for the outer region. If `lambda1` is larger than
        `lambda2`, the outer region will contain a larger range of values than
        the inner region.
    lambda2 : float, optional
        Weight parameter for the inner region. If `lambda2` is larger than
        `lambda1`, the inner region will contain a larger range of values than
        the outer region.
    iter_callback : function, optional
        If given, this function is called once per iteration with the current
        level set as the only argument. This is useful for debugging or for
        plotting intermediate results during the evolution.

    Returns
    -------
    out : (M, N) or (L, M, N) array
        Final segmentation (i.e., the final level set)

    See Also
    --------
    circle_level_set, checkerboard_level_set

    Notes
    -----

    This is a version of the Chan-Vese algorithm that uses morphological
    operators instead of solving a partial differential equation (PDE) for the
    evolution of the contour. The set of morphological operators used in this
    algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE
    (see [1]_). However, morphological operators are do not suffer from the
    numerical stability issues typically found in PDEs (it is not necessary to
    find the right time step for the evolution), and are computationally
    faster.

    The algorithm and its theoretical derivation are described in [1]_.

    References
    ----------
    .. [1] A Morphological Approach to Curvature-based Evolution of Curves and
           Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
           Transactions on Pattern Analysis and Machine Intelligence (PAMI),
           2014, :DOI:`10.1109/TPAMI.2013.106`
    """

    init_level_set = _init_level_set(init_level_set, image.shape)

    _check_input(image, init_level_set)

    u = (init_level_set > 0).astype(cp.int8)

    iter_callback(u)

    for _ in range(iterations):

        # inside = u > 0
        # outside = u <= 0
        c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8)
        c1 = (image * u).sum() / float(u.sum() + 1e-8)

        # Image attachment
        du = cp.gradient(u)
        abs_du = cp.abs(cp.stack(du, axis=0)).sum(0)
        aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2)

        u[aux < 0] = 1
        u[aux > 0] = 0

        # Smoothing
        for _ in range(smoothing):
            u = _curvop(u)

        iter_callback(u)

    return u
Ejemplo n.º 9
0
def _tvl1(reference_image, moving_image, flow0, attachment, tightness,
          num_warp, num_iter, tol, prefilter):
    """TV-L1 solver for optical flow estimation.

    Parameters
    ----------
    reference_image : ndarray, shape (M, N[, P[, ...]])
        The first gray scale image of the sequence.
    moving_image : ndarray, shape (M, N[, P[, ...]])
        The second gray scale image of the sequence.
    flow0 : ndarray, shape (image0.ndim, M, N[, P[, ...]])
        Initialization for the vector field.
    attachment : float
        Attachment parameter. The smaller this parameter is,
        the smoother is the solutions.
    tightness : float
        Tightness parameter. It should have a small value in order to
        maintain attachement and regularization parts in
        correspondence.
    num_warp : int
        Number of times image1 is warped.
    num_iter : int
        Number of fixed point iteration.
    tol : float
        Tolerance used as stopping criterion based on the L² distance
        between two consecutive values of (u, v).
    prefilter : bool
        Whether to prefilter the estimated optical flow before each
        image warp.

    Returns
    -------
    flow : ndarray, shape ((image0.ndim, M, N[, P[, ...]])
        The estimated optical flow components for each axis.

    """

    dtype = reference_image.dtype
    grid = cp.meshgrid(
        *[cp.arange(n, dtype=dtype) for n in reference_image.shape],
        indexing='ij',
        sparse=True)

    dt = 0.5 / reference_image.ndim
    reg_num_iter = 2
    f0 = attachment * tightness
    f1 = dt / tightness
    tol *= reference_image.size

    flow_current = flow_previous = flow0

    g = cp.zeros((reference_image.ndim, ) + reference_image.shape, dtype=dtype)
    proj = cp.zeros((
        reference_image.ndim,
        reference_image.ndim,
    ) + reference_image.shape,
                    dtype=dtype)

    s_g = [slice(None)] * g.ndim
    s_p = [slice(None)] * proj.ndim
    s_d = [slice(None)] * (proj.ndim - 2)

    for _ in range(num_warp):
        if prefilter:
            flow_current = ndi.median_filter(flow_current,
                                             [1] + reference_image.ndim * [3])

        image1_warp = warp(moving_image,
                           get_warp_points(grid, flow_current),
                           mode='nearest')
        grad = cp.stack(cp.gradient(image1_warp))
        NI = (grad * grad).sum(0)
        NI[NI == 0] = 1

        rho_0 = image1_warp - reference_image - (grad * flow_current).sum(0)

        for _ in range(num_iter):

            # Data term

            rho = rho_0 + (grad * flow_current).sum(0)

            idx = abs(rho) <= f0 * NI

            flow_auxiliary = flow_current

            flow_auxiliary[:, idx] -= rho[idx] * grad[:, idx] / NI[idx]

            idx = ~idx
            srho = f0 * cp.sign(rho[idx])
            flow_auxiliary[:, idx] -= srho * grad[:, idx]

            # Regularization term
            flow_current = flow_auxiliary.copy()

            for idx in range(reference_image.ndim):
                s_p[0] = idx
                for _ in range(reg_num_iter):
                    for ax in range(reference_image.ndim):
                        s_g[0] = ax
                        s_g[ax + 1] = slice(0, -1)
                        g[tuple(s_g)] = cp.diff(flow_current[idx], axis=ax)
                        s_g[ax + 1] = slice(None)

                    norm = cp.sqrt((g * g).sum(0, keepdims=True))
                    norm *= f1
                    norm += 1.0
                    proj[idx] -= dt * g
                    proj[idx] /= norm

                    # d will be the (negative) divergence of proj[idx]
                    d = -proj[idx].sum(0)
                    for ax in range(reference_image.ndim):
                        s_p[1] = ax
                        s_p[ax + 2] = slice(0, -1)
                        s_d[ax] = slice(1, None)
                        d[tuple(s_d)] += proj[tuple(s_p)]
                        s_p[ax + 2] = slice(None)
                        s_d[ax] = slice(None)

                    flow_current[idx] = flow_auxiliary[idx] + d

        flow_previous -= flow_current  # The difference as stopping criteria
        if (flow_previous * flow_previous).sum() < tol:
            break

        flow_previous = flow_current

    return flow_current
Ejemplo n.º 10
0
def hessian_matrix(image, sigma=1, mode="constant", cval=0, order="rc"):
    """Compute Hessian matrix.

    The Hessian matrix is defined as::

        H = [Hrr Hrc]
            [Hrc Hcc]

    which is computed by convolving the image with the second derivatives
    of the Gaussian kernel in the respective r- and c-directions.

    Parameters
    ----------
    image : ndarray
        Input image.
    sigma : float
        Standard deviation used for the Gaussian kernel, which is used as
        weighting function for the auto-correlation matrix.
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
        How to handle values outside the image borders.
    cval : float, optional
        Used in conjunction with mode 'constant', the value outside
        the image boundaries.
    order : {'rc', 'xy'}, optional
        This parameter allows for the use of reverse or forward order of
        the image axes in gradient computation. 'rc' indicates the use of
        the first axis initially (Hrr, Hrc, Hcc), whilst 'xy' indicates the
        usage of the last axis initially (Hxx, Hxy, Hyy)

    Returns
    -------
    Hrr : ndarray
        Element of the Hessian matrix for each pixel in the input image.
    Hrc : ndarray
        Element of the Hessian matrix for each pixel in the input image.
    Hcc : ndarray
        Element of the Hessian matrix for each pixel in the input image.

    Examples
    --------
    >>> import cupy as cp
    >>> from cucim.skimage.feature import hessian_matrix
    >>> square = cp.zeros((5, 5))
    >>> square[2, 2] = 4
    >>> Hrr, Hrc, Hcc = hessian_matrix(square, sigma=0.1, order='rc')
    >>> Hrc
    array([[ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  1.,  0., -1.,  0.],
           [ 0.,  0.,  0.,  0.,  0.],
           [ 0., -1.,  0.,  1.,  0.],
           [ 0.,  0.,  0.,  0.,  0.]])
    """

    image = img_as_float(image)

    gaussian_filtered = ndi.gaussian_filter(image,
                                            sigma=sigma,
                                            mode=mode,
                                            cval=cval)

    gradients = cp.gradient(gaussian_filtered)
    axes = range(image.ndim)

    if order == "rc":
        axes = reversed(axes)

    H_elems = [
        cp.gradient(gradients[ax0], axis=ax1)
        for ax0, ax1 in combinations_with_replacement(axes, 2)
    ]

    return H_elems
Ejemplo n.º 11
0
    def initialize_iteration(self):
        r"""Prepares the metric to compute one displacement field iteration.

        Pre-computes the cross-correlation factors for efficient computation
        of the gradient of the Cross Correlation w.r.t. the displacement field.
        It also pre-computes the image gradients in the physical space by
        re-orienting the gradients in the voxel space using the corresponding
        affine transformations.
        """
        def invalid_image_size(image):
            min_size = self.radius * 2 + 1
            return any([size < min_size for size in image.shape])

        msg = ("Each image dimension should be superior to 2 * radius + 1."
               "Decrease CCMetric radius or increase your image size")

        if invalid_image_size(self.static_image):
            raise ValueError("Static image size is too small. " + msg)
        if invalid_image_size(self.moving_image):
            raise ValueError("Moving image size is too small. " + msg)

        self.factors = self.precompute_factors(self.static_image,
                                               self.moving_image, self.radius)

        if self.coord_axis == -1:
            self.gradient_moving = cp.empty(shape=(self.moving_image.shape) +
                                            (self.dim, ),
                                            dtype=floating)

            for i, grad in enumerate(cp.gradient(self.moving_image)):
                self.gradient_moving[..., i] = grad
        else:
            self.gradient_moving = cp.empty(shape=(self.dim, ) +
                                            (self.moving_image.shape),
                                            dtype=floating)

            for i, grad in enumerate(cp.gradient(self.moving_image)):
                self.gradient_moving[i] = grad

        # Convert moving image's gradient field from voxel to physical space
        if self.moving_spacing is not None:
            if self.coord_axis == -1:
                self.gradient_moving /= self.moving_spacing
            else:
                temp = self.moving_spacing.reshape((-1, ) + (1, ) * self.dim)
                self.gradient_moving /= temp
        if self.moving_direction is not None:
            self.reorient_vector_field(self.gradient_moving,
                                       self.moving_direction,
                                       coord_axis=self.coord_axis)

        if self.coord_axis == -1:
            self.gradient_static = cp.empty(shape=(self.static_image.shape) +
                                            (self.dim, ),
                                            dtype=floating)
            for i, grad in enumerate(cp.gradient(self.static_image)):
                self.gradient_static[..., i] = grad
        else:
            self.gradient_static = cp.empty(shape=(self.dim, ) +
                                            (self.static_image.shape),
                                            dtype=floating)
            for i, grad in enumerate(cp.gradient(self.static_image)):
                self.gradient_static[i] = grad

        # Convert moving image's gradient field from voxel to physical space
        if self.static_spacing is not None:
            if self.coord_axis == -1:
                self.gradient_static /= self.static_spacing
            else:
                temp = self.moving_spacing.reshape((-1, ) + (1, ) * self.dim)
                self.gradient_static /= temp

        if self.static_direction is not None:
            self.reorient_vector_field(self.gradient_static,
                                       self.static_direction,
                                       coord_axis=self.coord_axis)