Ejemplo n.º 1
0
def farid_v(image, *, mask=None):
    """Find the vertical edges of an image using the Farid transform.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Farid edge map.

    Notes
    -----
    The kernel was constructed using the 5-tap weights from [1].

    References
    ----------
    .. [1] Farid, H. and Simoncelli, E. P., "Differentiation of discrete
           multidimensional signals", IEEE Transactions on Image Processing
           13(4): 496-508, 2004. :DOI:`10.1109/TIP.2004.823819`
    """
    check_nD(image, 2)
    image = img_as_float(image)
    result = ndi.convolve(image, cp.asarray(VFARID_WEIGHTS))
    return _mask_filter_result(result, mask)
Ejemplo n.º 2
0
def laplace(image, ksize=3, mask=None):
    """Find the edges of an image using the Laplace operator.

    Parameters
    ----------
    image : ndarray
        Image to process.
    ksize : int, optional
        Define the size of the discrete Laplacian operator such that it
        will have a size of (ksize,) * image.ndim.
    mask : ndarray, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : ndarray
        The Laplace edge map.

    Notes
    -----
    The Laplacian operator is generated using the function
    skimage.restoration.uft.laplacian().

    """
    image = img_as_float(image)
    # Create the discrete Laplacian operator - We keep only the real part of
    # the filter
    _, laplace_op = laplacian(image.ndim, (ksize,) * image.ndim)
    result = ndi.convolve(image, laplace_op)
    return _mask_filter_result(result, mask)
Ejemplo n.º 3
0
def farid_h(image, *, mask=None):
    """Find the horizontal edges of an image using the Farid transform.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Farid edge map.

    Notes
    -----
    The kernel was constructed using the 5-tap weights from [1].

    References
    ----------
    .. [1] Farid, H. and Simoncelli, E. P., "Differentiation of discrete
           multidimensional signals", IEEE Transactions on Image Processing
           13(4): 496-508, 2004. :DOI:`10.1109/TIP.2004.823819`
    .. [2] Farid, H. and Simoncelli, E. P. "Optimally rotation-equivariant
           directional derivative kernels", In: 7th International Conference on
           Computer Analysis of Images and Patterns, Kiel, Germany. Sep, 1997.
    """
    check_nD(image, 2)
    image = img_as_float(image)
    result = ndi.convolve(image, cp.asarray(HFARID_WEIGHTS))
    return _mask_filter_result(result, mask)
Ejemplo n.º 4
0
def roberts_neg_diag(image, mask=None):
    """Find the cross edges of an image using the Roberts' Cross operator.

    The kernel is applied to the input image to produce separate measurements
    of the gradient component one orientation.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Robert's edge map.

    Notes
    -----
    We use the following kernel::

      0   1
     -1   0

    """
    check_nD(image, 2)
    image = img_as_float(image)
    result = ndi.convolve(image, cp.asarray(ROBERTS_ND_WEIGHTS))
    return _mask_filter_result(result, mask)
Ejemplo n.º 5
0
def _interpolate_image(image, *, multichannel=False):
    """Replacing each pixel in ``image`` with the average of its neighbors.

    Parameters
    ----------
    image : ndarray
        Input data to be interpolated.
    multichannel : bool, optional
        Whether the last axis of the image is to be interpreted as multiple
        channels or another spatial dimension.

    Returns
    -------
    interp : ndarray
        Interpolated version of `image`.
    """
    spatialdims = image.ndim if not multichannel else image.ndim - 1
    conv_filter = ndi.generate_binary_structure(spatialdims,
                                                1).astype(image.dtype)
    conv_filter.ravel()[conv_filter.size // 2] = 0
    conv_filter /= conv_filter.sum()

    if multichannel:
        conv_filter = conv_filter[..., np.newaxis]
    interp = ndi.convolve(image, conv_filter, mode="mirror")
    return interp
Ejemplo n.º 6
0
def test_image_shape():
    """Test that shape of output image in deconvolution is same as input.

    This addresses issue #1172.
    """
    point = cp.zeros((5, 5), np.float)
    point[2, 2] = 1.0
    psf = ndi.gaussian_filter(point, sigma=1.0)
    # image shape: (45, 45), as reported in #1172
    image = cp.asarray(test_img[110:155, 225:270])  # just the face
    image_conv = ndi.convolve(image, psf)
    deconv_sup = restoration.wiener(image_conv, psf, 1)
    deconv_un = restoration.unsupervised_wiener(image_conv, psf)[0]
    # test the shape
    assert image.shape == deconv_sup.shape
    assert image.shape == deconv_un.shape
    # test the reconstruction error
    sup_relative_error = cp.abs(deconv_sup - image) / image
    un_relative_error = cp.abs(deconv_un - image) / image
    cp.testing.assert_array_less(np.median(sup_relative_error.get()), 0.1)
    cp.testing.assert_array_less(np.median(un_relative_error.get()), 0.1)
Ejemplo n.º 7
0
def perimeter(image, neighbourhood=4):
    """Calculate total perimeter of all objects in binary image.

    Parameters
    ----------
    image : (N, M) ndarray
        2D binary image.
    neighbourhood : 4 or 8, optional
        Neighborhood connectivity for border pixel determination. It is used to
        compute the contour. A higher neighbourhood widens the border on which
        the perimeter is computed.

    Returns
    -------
    perimeter : float
        Total perimeter of all objects in binary image.

    References
    ----------
    .. [1] K. Benkrid, D. Crookes. Design and FPGA Implementation of
           a Perimeter Estimator. The Queen's University of Belfast.
           http://www.cs.qub.ac.uk/~d.crookes/webpubs/papers/perimeter.doc

    Examples
    --------
    >>> from skimage import data, util
    >>> from skimage.measure import label
    >>> # coins image (binary)
    >>> img_coins = data.coins() > 110
    >>> # total perimeter of all objects in the image
    >>> perimeter(img_coins, neighbourhood=4)  # doctest: +ELLIPSIS
    7796.867...
    >>> perimeter(img_coins, neighbourhood=8)  # doctest: +ELLIPSIS
    8806.268...

    """
    if image.ndim != 2:
        raise NotImplementedError("`perimeter` supports 2D images only")

    if neighbourhood == 4:
        strel = STREL_4
    else:
        strel = STREL_8
    strel = cp.asarray(strel)
    image = image.astype(cp.uint8)
    eroded_image = ndi.binary_erosion(image, strel, border_value=0)
    border_image = image - eroded_image

    perimeter_weights = cp.zeros(50, dtype=cp.double)
    perimeter_weights[[5, 7, 15, 17, 25, 27]] = 1
    perimeter_weights[[21, 33]] = sqrt(2)
    perimeter_weights[[13, 23]] = (1 + sqrt(2)) / 2

    perimeter_image = ndi.convolve(
        border_image,
        cp.asarray([[10, 2, 10], [2, 1, 2], [10, 2, 10]]),
        mode="constant",
        cval=0,
    )

    # You can also write
    # return perimeter_weights[perimeter_image].sum()
    # but that was measured as taking much longer than bincount + cp.dot (5x
    # as much time)
    perimeter_histogram = cp.bincount(perimeter_image.ravel(), minlength=50)
    total_perimeter = perimeter_histogram @ perimeter_weights
    return total_perimeter
Ejemplo n.º 8
0
def _generic_edge_filter(
    image,
    *,
    smooth_weights,
    edge_weights=[1, 0, -1],
    axis=None,
    mode="reflect",
    cval=0.0,
    mask=None,
):
    """Apply a generic, n-dimensional edge filter.

    The filter is computed by applying the edge weights along one dimension
    and the smoothing weights along all other dimensions. If no axis is given,
    or a tuple of axes is given the filter is computed along all axes in turn,
    and the magnitude is computed as the square root of the average square
    magnitude of all the axes.

    Parameters
    ----------
    image : array
        The input image.
    smooth_weights : array of float
        The smoothing weights for the filter. These are applied to dimensions
        orthogonal to the edge axis.
    edge_weights : 1D array of float, optional
        The weights to compute the edge along the chosen axes.
    axis : int or sequence of int, optional
        Compute the edge filter along this axis. If not provided, the edge
        magnitude is computed. This is defined as::

            edge_mag = np.sqrt(sum([_generic_edge_filter(image, ..., axis=i)**2
                                    for i in range(image.ndim)]) / image.ndim)

        The magnitude is also computed if axis is a sequence.
    mode : str or sequence of str, optional
        The boundary mode for the convolution. See `scipy.ndimage.convolve`
        for a description of the modes. This can be either a single boundary
        mode or one boundary mode per axis.
    cval : float, optional
        When `mode` is ``'constant'``, this is the constant used in values
        outside the boundary of the image data.
    """
    ndim = image.ndim
    if axis is None:
        axes = list(range(ndim))
    elif np.isscalar(axis):
        axes = [axis]
    else:
        axes = axis
    return_magnitude = len(axes) > 1

    output = cp.zeros(image.shape, dtype=float)

    # Note: added these to cp.asarray calls not present in skimage
    #       may want to remove, but will probably require updating the tests.
    edge_weights = cp.asarray(edge_weights)
    smooth_weights = cp.asarray(smooth_weights)

    for edge_dim in axes:
        kernel = _reshape_nd(edge_weights, ndim, edge_dim)
        smooth_axes = list(set(range(ndim)) - {edge_dim})
        for smooth_dim in smooth_axes:
            kernel = kernel * _reshape_nd(smooth_weights, ndim, smooth_dim)
        ax_output = ndi.convolve(image, kernel, mode="reflect")
        if return_magnitude:
            ax_output *= ax_output
        output += ax_output

    if return_magnitude:
        output /= ndim
        output = cp.sqrt(output, out=output)
    return output
Ejemplo n.º 9
0
def gabor(
    image,
    frequency,
    theta=0,
    bandwidth=1,
    sigma_x=None,
    sigma_y=None,
    n_stds=3,
    offset=0,
    mode="reflect",
    cval=0,
):
    """Return real and imaginary responses to Gabor filter.

    The real and imaginary parts of the Gabor filter kernel are applied to the
    image and the response is returned as a pair of arrays.

    Gabor filter is a linear filter with a Gaussian kernel which is modulated
    by a sinusoidal plane wave. Frequency and orientation representations of
    the Gabor filter are similar to those of the human visual system.
    Gabor filter banks are commonly used in computer vision and image
    processing. They are especially suitable for edge detection and texture
    classification.

    Parameters
    ----------
    image : 2-D array
        Input image.
    frequency : float
        Spatial frequency of the harmonic function. Specified in pixels.
    theta : float, optional
        Orientation in radians. If 0, the harmonic is in the x-direction.
    bandwidth : float, optional
        The bandwidth captured by the filter. For fixed bandwidth, ``sigma_x``
        and ``sigma_y`` will decrease with increasing frequency. This value is
        ignored if ``sigma_x`` and ``sigma_y`` are set by the user.
    sigma_x, sigma_y : float, optional
        Standard deviation in x- and y-directions. These directions apply to
        the kernel *before* rotation. If `theta = pi/2`, then the kernel is
        rotated 90 degrees so that ``sigma_x`` controls the *vertical*
        direction.
    n_stds : scalar, optional
        The linear size of the kernel is n_stds (3 by default) standard
        deviations.
    offset : float, optional
        Phase offset of harmonic function in radians.
    mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional
        Mode used to convolve image with a kernel, passed to `ndi.convolve`
    cval : scalar, optional
        Value to fill past edges of input if ``mode`` of convolution is
        'constant'. The parameter is passed to `ndi.convolve`.

    Returns
    -------
    real, imag : arrays
        Filtered images using the real and imaginary parts of the Gabor filter
        kernel. Images are of the same dimensions as the input one.

    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Gabor_filter
    .. [2] https://web.archive.org/web/20180127125930/http://mplab.ucsd.edu/tutorials/gabor.pdf

    Examples
    --------
    >>> from skimage.filters import gabor
    >>> from skimage import data, io
    >>> from matplotlib import pyplot as plt  # doctest: +SKIP

    >>> image = data.coins()
    >>> # detecting edges in a coin image
    >>> filt_real, filt_imag = gabor(image, frequency=0.6)
    >>> plt.figure()            # doctest: +SKIP
    >>> io.imshow(filt_real)    # doctest: +SKIP
    >>> io.show()               # doctest: +SKIP

    >>> # less sensitivity to finer details with the lower frequency kernel
    >>> filt_real, filt_imag = gabor(image, frequency=0.1)
    >>> plt.figure()            # doctest: +SKIP
    >>> io.imshow(filt_real)    # doctest: +SKIP
    >>> io.show()               # doctest: +SKIP
    """
    check_nD(image, 2)
    g = gabor_kernel(
        frequency, theta, bandwidth, sigma_x, sigma_y, n_stds, offset
    )

    filtered = ndi.convolve(image, g, mode=mode, cval=cval)

    return filtered.real, filtered.imag