Exemple #1
0
def test_prepare_grayscale_input_2D():
    assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 3, 3)))
    assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1)))
    assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1, 1)))
    img = _prepare_grayscale_input_2D(np.zeros((3, 3)))
    img = _prepare_grayscale_input_2D(np.zeros((3, 3, 1)))
    img = _prepare_grayscale_input_2D(np.zeros((1, 3, 3)))
Exemple #2
0
def test_prepare_grayscale_input_2D():
    assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 3, 3)))
    assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1)))
    assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1, 1)))
    img = _prepare_grayscale_input_2D(np.zeros((3, 3)))
    img = _prepare_grayscale_input_2D(np.zeros((3, 3, 1)))
    img = _prepare_grayscale_input_2D(np.zeros((1, 3, 3)))
Exemple #3
0
def test_prepare_grayscale_input_2D():
    with testing.raises(ValueError):
        _prepare_grayscale_input_2D(np.zeros((3, 3, 3)))
    with testing.raises(ValueError):
        _prepare_grayscale_input_2D(np.zeros((3, 1)))
    with testing.raises(ValueError):
        _prepare_grayscale_input_2D(np.zeros((3, 1, 1)))
    img = _prepare_grayscale_input_2D(np.zeros((3, 3)))
    img = _prepare_grayscale_input_2D(np.zeros((3, 3, 1)))
    img = _prepare_grayscale_input_2D(np.zeros((1, 3, 3)))
Exemple #4
0
def test_prepare_grayscale_input_2D():
    with pytest.raises(ValueError):
        _prepare_grayscale_input_2D(np.zeros((3, 3, 3)))
    with pytest.raises(ValueError):
        _prepare_grayscale_input_2D(np.zeros((3, 1)))
    with pytest.raises(ValueError):
        _prepare_grayscale_input_2D(np.zeros((3, 1, 1)))
    img = _prepare_grayscale_input_2D(np.zeros((3, 3)))
    img = _prepare_grayscale_input_2D(np.zeros((3, 3, 1)))
    img = _prepare_grayscale_input_2D(np.zeros((1, 3, 3)))
Exemple #5
0
def structure_tensor(image, sigma=1, mode="constant", cval=0):
    """Compute structure tensor using sum of squared differences.

    The structure tensor A is defined as::

        A = [Axx Axy]
            [Axy Ayy]

    which is approximated by the weighted sum of squared differences in a local
    window around each pixel in the image.

    Parameters
    ----------
    image : ndarray
        Input image.
    sigma : float
        Standard deviation used for the Gaussian kernel, which is used as a
        weighting function for the local summation of squared differences.
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
        How to handle values outside the image borders.
    cval : float, optional
        Used in conjunction with mode 'constant', the value outside
        the image boundaries.

    Returns
    -------
    Axx : ndarray
        Element of the structure tensor for each pixel in the input image.
    Axy : ndarray
        Element of the structure tensor for each pixel in the input image.
    Ayy : ndarray
        Element of the structure tensor for each pixel in the input image.

    Examples
    --------
    >>> from skimage.feature import structure_tensor
    >>> square = np.zeros((5, 5))
    >>> square[2, 2] = 1
    >>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
    >>> Axx
    array([[ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  1.,  0.,  1.,  0.],
           [ 0.,  4.,  0.,  4.,  0.],
           [ 0.,  1.,  0.,  1.,  0.],
           [ 0.,  0.,  0.,  0.,  0.]])

    """

    image = _prepare_grayscale_input_2D(image)

    imx, imy = _compute_derivatives(image, mode=mode, cval=cval)

    # structure tensore
    Axx = ndimage.gaussian_filter(imx * imx, sigma, mode=mode, cval=cval)
    Axy = ndimage.gaussian_filter(imx * imy, sigma, mode=mode, cval=cval)
    Ayy = ndimage.gaussian_filter(imy * imy, sigma, mode=mode, cval=cval)

    return Axx, Axy, Ayy
Exemple #6
0
def structure_tensor(image, sigma=1, mode='constant', cval=0):
    """Compute structure tensor using sum of squared differences.

    The structure tensor A is defined as::

        A = [Axx Axy]
            [Axy Ayy]

    which is approximated by the weighted sum of squared differences in a local
    window around each pixel in the image.

    Parameters
    ----------
    image : ndarray
        Input image.
    sigma : float
        Standard deviation used for the Gaussian kernel, which is used as a
        weighting function for the local summation of squared differences.
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
        How to handle values outside the image borders.
    cval : float, optional
        Used in conjunction with mode 'constant', the value outside
        the image boundaries.

    Returns
    -------
    Axx : ndarray
        Element of the structure tensor for each pixel in the input image.
    Axy : ndarray
        Element of the structure tensor for each pixel in the input image.
    Ayy : ndarray
        Element of the structure tensor for each pixel in the input image.

    Examples
    --------
    >>> from skimage.feature import structure_tensor
    >>> square = np.zeros((5, 5))
    >>> square[2, 2] = 1
    >>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
    >>> Axx
    array([[ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  1.,  0.,  1.,  0.],
           [ 0.,  4.,  0.,  4.,  0.],
           [ 0.,  1.,  0.,  1.,  0.],
           [ 0.,  0.,  0.,  0.,  0.]])

    """

    image = _prepare_grayscale_input_2D(image)

    imx, imy = _compute_derivatives(image, mode=mode, cval=cval)

    # structure tensore
    Axx = ndimage.gaussian_filter(imx * imx, sigma, mode=mode, cval=cval)
    Axy = ndimage.gaussian_filter(imx * imy, sigma, mode=mode, cval=cval)
    Ayy = ndimage.gaussian_filter(imy * imy, sigma, mode=mode, cval=cval)

    return Axx, Axy, Ayy
Exemple #7
0
def hessian_matrix(image, sigma=1, mode="constant", cval=0):
    """Compute Hessian matrix.

    The Hessian matrix is defined as::

        H = [Hxx Hxy]
            [Hxy Hyy]

    which is computed by convolving the image with the second derivatives
    of the Gaussian kernel in the respective x- and y-directions.

    Parameters
    ----------
    image : ndarray
        Input image.
    sigma : float
        Standard deviation used for the Gaussian kernel, which is used as
        weighting function for the auto-correlation matrix.
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
        How to handle values outside the image borders.
    cval : float, optional
        Used in conjunction with mode 'constant', the value outside
        the image boundaries.

    Returns
    -------
    Hxx : ndarray
        Element of the Hessian matrix for each pixel in the input image.
    Hxy : ndarray
        Element of the Hessian matrix for each pixel in the input image.
    Hyy : ndarray
        Element of the Hessian matrix for each pixel in the input image.

    Examples
    --------
    >>> from skimage.feature import hessian_matrix, hessian_matrix_eigvals
    >>> square = np.zeros((5, 5))
    >>> square[2, 2] = 1
    >>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
    >>> Hxx
    array([[ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  1.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.]])

    """

    image = _prepare_grayscale_input_2D(image)

    # window extent to the left and right, which covers > 99% of the normal
    # distribution
    window_ext = max(1, np.ceil(3 * sigma))

    ky, kx = np.mgrid[-window_ext : window_ext + 1, -window_ext : window_ext + 1]

    # second derivative Gaussian kernels
    gaussian_exp = np.exp(-(kx ** 2 + ky ** 2) / (2 * sigma ** 2))
    kernel_xx = 1 / (2 * np.pi * sigma ** 4) * (kx ** 2 / sigma ** 2 - 1)
    kernel_xx *= gaussian_exp
    kernel_xx /= kernel_xx.sum()
    kernel_xy = 1 / (2 * np.pi * sigma ** 6) * (kx * ky)
    kernel_xy *= gaussian_exp
    kernel_xy /= kernel_xx.sum()
    kernel_yy = kernel_xx.transpose()

    Hxx = ndimage.convolve(image, kernel_xx, mode=mode, cval=cval)
    Hxy = ndimage.convolve(image, kernel_xy, mode=mode, cval=cval)
    Hyy = ndimage.convolve(image, kernel_yy, mode=mode, cval=cval)

    return Hxx, Hxy, Hyy
Exemple #8
0
def corner_fast(image, n=12, threshold=0.15):
    """Extract FAST corners for a given image.

    Parameters
    ----------
    image : 2D ndarray
        Input image.
    n : int
        Minimum number of consecutive pixels out of 16 pixels on the circle
        that should all be either brighter or darker w.r.t testpixel.
        A point c on the circle is darker w.r.t test pixel p if
        `Ic < Ip - threshold` and brighter if `Ic > Ip + threshold`. Also
        stands for the n in `FAST-n` corner detector.
    threshold : float
        Threshold used in deciding whether the pixels on the circle are
        brighter, darker or similar w.r.t. the test pixel. Decrease the
        threshold when more corners are desired and vice-versa.

    Returns
    -------
    response : ndarray
        FAST corner response image.

    References
    ----------
    .. [1] Edward Rosten and Tom Drummond
           "Machine Learning for high-speed corner detection",
           http://www.edwardrosten.com/work/rosten_2006_machine.pdf
    .. [2] Wikipedia, "Features from accelerated segment test",
           https://en.wikipedia.org/wiki/Features_from_accelerated_segment_test

    Examples
    --------
    >>> from skimage.feature import corner_fast, corner_peaks
    >>> square = np.zeros((12, 12))
    >>> square[3:9, 3:9] = 1
    >>> square.astype(int)
    array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
    >>> corner_peaks(corner_fast(square, 9), min_distance=1)
    array([[3, 3],
           [3, 8],
           [8, 3],
           [8, 8]])

    """
    image = _prepare_grayscale_input_2D(image)

    image = np.ascontiguousarray(image)
    response = _corner_fast(image, n, threshold)
    return response
Exemple #9
0
    def detect(self, image):
        """Detect CENSURE keypoints along with the corresponding scale.

        Parameters
        ----------
        image : 2D ndarray
            Input image.

        """

        # (1) First we generate the required scales on the input grayscale
        # image using a bi-level filter and stack them up in `filter_response`.

        # (2) We then perform Non-Maximal suppression in 3 x 3 x 3 window on
        # the filter_response to suppress points that are neither minima or
        # maxima in 3 x 3 x 3 neighbourhood. We obtain a boolean ndarray
        # `feature_mask` containing all the minimas and maximas in
        # `filter_response` as True.
        # (3) Then we suppress all the points in the `feature_mask` for which
        # the corresponding point in the image at a particular scale has the
        # ratio of principal curvatures greater than `line_threshold`.
        # (4) Finally, we remove the border keypoints and return the keypoints
        # along with its corresponding scale.

        num_scales = self.max_scale - self.min_scale

        image = np.ascontiguousarray(_prepare_grayscale_input_2D(image))

        # Generating all the scales
        filter_response = _filter_image(image, self.min_scale, self.max_scale,
                                        self.mode)

        # Suppressing points that are neither minima or maxima in their
        # 3 x 3 x 3 neighborhood to zero
        minimas = minimum_filter(filter_response, (3, 3, 3)) == filter_response
        maximas = maximum_filter(filter_response, (3, 3, 3)) == filter_response

        feature_mask = minimas | maximas
        feature_mask[filter_response < self.non_max_threshold] = False

        for i in range(1, num_scales):
            # sigma = (window_size - 1) / 6.0, so the window covers > 99% of
            #                                  the kernel's distribution
            # window_size = 7 + 2 * (min_scale - 1 + i)
            # Hence sigma = 1 + (min_scale - 1 + i)/ 3.0
            _suppress_lines(feature_mask[:, :, i], image,
                            (1 + (self.min_scale + i - 1) / 3.0),
                            self.line_threshold)

        rows, cols, scales = np.nonzero(feature_mask[..., 1:num_scales])
        keypoints = np.column_stack([rows, cols])
        scales = scales + self.min_scale + 1

        if self.mode == 'dob':
            self.keypoints = keypoints
            self.scales = scales
            return

        cumulative_mask = np.zeros(keypoints.shape[0], dtype=np.bool)

        if self.mode == 'octagon':
            for i in range(self.min_scale + 1, self.max_scale):
                c = (OCTAGON_OUTER_SHAPE[i - 1][0] - 1) // 2 \
                    + OCTAGON_OUTER_SHAPE[i - 1][1]
                cumulative_mask |= (
                    _mask_border_keypoints(image.shape, keypoints, c)
                    & (scales == i))
        elif self.mode == 'star':
            for i in range(self.min_scale + 1, self.max_scale):
                c = STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] \
                    + STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] // 2
                cumulative_mask |= (
                    _mask_border_keypoints(image.shape, keypoints, c)
                    & (scales == i))

        self.keypoints = keypoints[cumulative_mask]
        self.scales = scales[cumulative_mask]
Exemple #10
0
 def _build_pyramid(self, image):
     image = _prepare_grayscale_input_2D(image)
     return list(pyramid_gaussian(image, self.n_scales - 1, self.downscale))
Exemple #11
0
    def detect(self, image):
        """Detect CENSURE keypoints along with the corresponding scale.

        Parameters
        ----------
        image : 2D ndarray
            Input image.

        """

        # (1) First we generate the required scales on the input grayscale
        # image using a bi-level filter and stack them up in `filter_response`.

        # (2) We then perform Non-Maximal suppression in 3 x 3 x 3 window on
        # the filter_response to suppress points that are neither minima or
        # maxima in 3 x 3 x 3 neighbourhood. We obtain a boolean ndarray
        # `feature_mask` containing all the minimas and maximas in
        # `filter_response` as True.
        # (3) Then we suppress all the points in the `feature_mask` for which
        # the corresponding point in the image at a particular scale has the
        # ratio of principal curvatures greater than `line_threshold`.
        # (4) Finally, we remove the border keypoints and return the keypoints
        # along with its corresponding scale.

        assert_nD(image, 2)

        num_scales = self.max_scale - self.min_scale

        image = np.ascontiguousarray(_prepare_grayscale_input_2D(image))

        # Generating all the scales
        filter_response = _filter_image(image, self.min_scale, self.max_scale,
                                        self.mode)

        # Suppressing points that are neither minima or maxima in their
        # 3 x 3 x 3 neighborhood to zero
        minimas = minimum_filter(filter_response, (3, 3, 3)) == filter_response
        maximas = maximum_filter(filter_response, (3, 3, 3)) == filter_response

        feature_mask = minimas | maximas
        feature_mask[filter_response < self.non_max_threshold] = False

        for i in range(1, num_scales):
            # sigma = (window_size - 1) / 6.0, so the window covers > 99% of
            #                                  the kernel's distribution
            # window_size = 7 + 2 * (min_scale - 1 + i)
            # Hence sigma = 1 + (min_scale - 1 + i)/ 3.0
            _suppress_lines(feature_mask[:, :, i], image,
                            (1 + (self.min_scale + i - 1) / 3.0),
                            self.line_threshold)

        rows, cols, scales = np.nonzero(feature_mask[..., 1:num_scales])
        keypoints = np.column_stack([rows, cols])
        scales = scales + self.min_scale + 1

        if self.mode == 'dob':
            self.keypoints = keypoints
            self.scales = scales
            return

        cumulative_mask = np.zeros(keypoints.shape[0], dtype=np.bool)

        if self.mode == 'octagon':
            for i in range(self.min_scale + 1, self.max_scale):
                c = (OCTAGON_OUTER_SHAPE[i - 1][0] - 1) // 2 \
                    + OCTAGON_OUTER_SHAPE[i - 1][1]
                cumulative_mask |= (
                    _mask_border_keypoints(image.shape, keypoints, c)
                    & (scales == i))
        elif self.mode == 'star':
            for i in range(self.min_scale + 1, self.max_scale):
                c = STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] \
                    + STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] // 2
                cumulative_mask |= (
                    _mask_border_keypoints(image.shape, keypoints, c)
                    & (scales == i))

        self.keypoints = keypoints[cumulative_mask]
        self.scales = scales[cumulative_mask]
Exemple #12
0
def corner_fast(image, n=12, threshold=0.15):
    """Extract FAST corners for a given image.

    Parameters
    ----------
    image : 2D ndarray
        Input image.
    n : int
        Minimum number of consecutive pixels out of 16 pixels on the circle
        that should all be either brighter or darker w.r.t testpixel.
        A point c on the circle is darker w.r.t test pixel p if
        `Ic < Ip - threshold` and brighter if `Ic > Ip + threshold`. Also
        stands for the n in `FAST-n` corner detector.
    threshold : float
        Threshold used in deciding whether the pixels on the circle are
        brighter, darker or similar w.r.t. the test pixel. Decrease the
        threshold when more corners are desired and vice-versa.

    Returns
    -------
    response : ndarray
        FAST corner response image.

    References
    ----------
    .. [1] Edward Rosten and Tom Drummond
           "Machine Learning for high-speed corner detection",
           http://www.edwardrosten.com/work/rosten_2006_machine.pdf
    .. [2] Wikipedia, "Features from accelerated segment test",
           https://en.wikipedia.org/wiki/Features_from_accelerated_segment_test

    Examples
    --------
    >>> from skimage.feature import corner_fast, corner_peaks
    >>> square = np.zeros((12, 12))
    >>> square[3:9, 3:9] = 1
    >>> square.astype(int)
    array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
    >>> corner_peaks(corner_fast(square, 9), min_distance=1)
    array([[3, 3],
           [3, 8],
           [8, 3],
           [8, 8]])

    """
    image = _prepare_grayscale_input_2D(image)

    image = np.ascontiguousarray(image)
    response = _corner_fast(image, n, threshold)
    return response
Exemple #13
0
def hessian_matrix(image, sigma=1, mode='constant', cval=0):
    """Compute Hessian matrix.

    The Hessian matrix is defined as::

        H = [Hxx Hxy]
            [Hxy Hyy]

    which is computed by convolving the image with the second derivatives
    of the Gaussian kernel in the respective x- and y-directions.

    Parameters
    ----------
    image : ndarray
        Input image.
    sigma : float
        Standard deviation used for the Gaussian kernel, which is used as
        weighting function for the auto-correlation matrix.
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
        How to handle values outside the image borders.
    cval : float, optional
        Used in conjunction with mode 'constant', the value outside
        the image boundaries.

    Returns
    -------
    Hxx : ndarray
        Element of the Hessian matrix for each pixel in the input image.
    Hxy : ndarray
        Element of the Hessian matrix for each pixel in the input image.
    Hyy : ndarray
        Element of the Hessian matrix for each pixel in the input image.

    Examples
    --------
    >>> from skimage.feature import hessian_matrix
    >>> square = np.zeros((5, 5))
    >>> square[2, 2] = 1
    >>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
    >>> Hxx
    array([[ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  1.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.]])

    """

    image = _prepare_grayscale_input_2D(image)

    # window extent to the left and right, which covers > 99% of the normal
    # distribution
    window_ext = max(1, np.ceil(3 * sigma))

    ky, kx = np.mgrid[-window_ext:window_ext + 1, -window_ext:window_ext + 1]

    # second derivative Gaussian kernels
    gaussian_exp = np.exp(-(kx ** 2 + ky ** 2) / (2 * sigma ** 2))
    kernel_xx = 1 / (2 * np.pi * sigma ** 4) * (kx ** 2 / sigma ** 2 - 1)
    kernel_xx *= gaussian_exp
    kernel_xx /= kernel_xx.sum()
    kernel_xy = 1 / (2 * np.pi * sigma ** 6) * (kx * ky)
    kernel_xy *= gaussian_exp
    kernel_xy /= kernel_xx.sum()
    kernel_yy = kernel_xx.transpose()

    Hxx = ndimage.convolve(image, kernel_xx, mode=mode, cval=cval)
    Hxy = ndimage.convolve(image, kernel_xy, mode=mode, cval=cval)
    Hyy = ndimage.convolve(image, kernel_yy, mode=mode, cval=cval)

    return Hxx, Hxy, Hyy
Exemple #14
0
 def _build_pyramid(self, image):
     image = _prepare_grayscale_input_2D(image)
     return list(pyramid_gaussian(image, self.n_scales - 1, self.downscale))
def vap_ridgeness_detection(img,
                            img_path,
                            img_output_names,
                            img_shw_step=False):
    #==============================================
    # Pre-Process
    img = _prepare_grayscale_input_2D(img)
    [H, W] = img.shape
    if img_shw_step:
        vap_imshow(img, "Input image")
        vap_imsave(img_path, img_output_names + ['01Input_image'])

    #==============================================
    # Configuration setting
    SIGMA = 0.5
    MODE = 'constant'
    CVAL = 0

    #==============================================
    # Implement algorithm
    """ 
	Step 1: Gaussian smoothing 
	"""
    img = img_gaussian_smooth(img, MODE, CVAL)
    if img_shw_step:
        vap_imshow(img, "Smooth_version_of_image")
        vap_imsave(img_path, img_output_names + ['02Smooth_version_of_image'])
    """ 
	Step 2: Compute derivatives 
	"""
    [im_x, im_y] = compute_derivatives(img, mode=MODE, cval=CVAL)
    if img_shw_step:
        vap_imshow_set(1, 2, 1, im_x, "X derivative")
        vap_imshow_set(1, 2, 2, im_y, "Y derivative")
        vap_imsave(img_path, img_output_names + ['03X_Y_derivative'])
    """ 
	Step 3: Build structure tensor 
	"""
    struct_tensor, grad_vector = compute_tensor_struct_1(
        img, im_x, im_y, img_path, img_output_names, img_shw_step)
    # struct_tensor, grad_vector = compute_tensor_struct_2(img, im_x, im_y, img_path, img_output_names, img_shw_step)
    """ 
	Step 4: find dominant gradient vector 
	"""
    ### Compute eigenvalue, eigenvector for each structure tensor
    [eig_val, eig_vector] = np.linalg.eig(struct_tensor)
    # eig_vector = eig_vector.transpose((0, 1, 3, 2))

    ### Find dominant gradient by finding the greatest eigen value
    dominant_idx = np.argmax(eig_val, axis=-1)
    [axV, axU] = np.indices([H, W])
    dominant_vector = np.reshape(
        eig_vector[axV.flat, axU.flat, dominant_idx.flat], [H, W, 2])
    dominant_vector_t = np.reshape(dominant_vector, (H, W, 1, 2))
    if img_shw_step:
        idx = slice(None, None, 10)
        a = np.linspace(0, W - 1, W)
        b = np.linspace(0, H - 1, H)

        vap_imnew()
        plt.quiver(a[idx],
                   b[idx],
                   dominant_vector[idx, idx, 0],
                   dominant_vector[idx, idx, 1],
                   pivot='mid')
        plt.title("Dominate vector", loc='center')
        vap_imsave(img_path, img_output_names + ['05dominant_vector'])

    ### Dominant vector with direction
    sign_mask = np.matmul(dominant_vector_t, grad_vector).reshape(H, W)
    dominant_vector[sign_mask < 0] *= -1
    dominant_vector[sign_mask == 0] *= 0

    if img_shw_step:
        vap_imnew()
        plt.quiver(np.arange(W),
                   np.arange(H),
                   dominant_vector[:, :, 0],
                   dominant_vector[:, :, 1],
                   pivot='mid')
        plt.title("Dominate vector with direction", loc='center')
        vap_imsave(img_path,
                   img_output_names + ['06dominant_vector_with_direction'])
    """ 
	Step 5: Compute divergence 
	"""
    vector_u = dominant_vector[:, :, 0]
    vector_v = dominant_vector[:, :, 1]
    ridge_img = -divergence([vector_v, vector_u])
    ridge_img[ridge_img < 0.25] = 0

    if img_shw_step:
        vap_imshow(ridge_img, "Original Ridgeness Image")
        vap_imsave(img_path, img_output_names + ['07Original_Ridgeness_Image'])
    """ 
	Step 6: Discard ridge point with large horizontal component 
	"""
    theta = np.abs(np.arctan2(vector_v, vector_u))
    mask = np.logical_and(theta > np.pi * 0.4, theta < np.pi * 0.6)
    ridge_img[mask] = 0

    if img_shw_step:
        vap_imshow(ridge_img, "Theta Filter Ridgeness image")
        vap_imsave(img_path,
                   img_output_names + ['08Theta_Filter_Ridgeness_image'])
    """ 
	Step 7: Confident Filter image 
	"""
    ridge_img *= (
        1 - np.exp(-np.power(eig_val[:, :, 0] - eig_val[:, :, 1], 2) / 0.001))
    ridge_img[ridge_img < 0.5] = 0

    if img_shw_step:
        vap_imshow(ridge_img, "Confident Filter Image")
        vap_imsave(img_path, img_output_names + ['09Confident_Filter_Image'])

    return np.uint8(ridge_img * 128)
def vap_ridgeness_detection(image, path, output_names, show_step_result=True):

    image = _prepare_grayscale_input_2D(image)
    [rowImg, colImg] = image.shape
    if show_step_result:
        vap_imshow(image, "Input image")
        vap_imsave(path, output_names + ['01Input_image'])

    # Configuration setting
    SIGMA = 0.5
    MODE = 'constant'
    CVAL = 0

    # Gaussian smoothing
    image = img_gaussian_smooth(image, MODE, CVAL)
    if show_step_result:
        vap_imshow(image, "Smooth_version_of_image")
        vap_imsave(path, output_names + ['02Smooth_version_of_image'])

    # Step 1: Compute derivatives
    imx, imy = compute_derivatives(image, mode=MODE,
                                   cval=CVAL)  # np.gradient(image)#

    if show_step_result:
        vap_imshow_set(1, 2, 1, imx, "X derivative")
        vap_imshow_set(1, 2, 2, imy, "Y derivative")
        vap_imsave(path, output_names + ['03X_Y_derivative'])

    # Step 3: Create a local 2x2 structure tensor for each pixel
    imx = np.expand_dims(imx, -1)
    imy = np.expand_dims(imy, -1)
    gradient_vector = np.concatenate((imx, imy), axis=2)
    gradient_vector = np.reshape(gradient_vector, (rowImg, colImg, 2, 1))
    gradient_vector_t = np.reshape(gradient_vector, (rowImg, colImg, 1, 2))
    structure_tensor = np.matmul(gradient_vector, gradient_vector_t)
    tensor_std = 0.5
    structure_tensor = ndi.gaussian_filter(
        structure_tensor, [tensor_std, tensor_std, tensor_std, tensor_std],
        mode="constant")
    # for x in range(len(structure_tensor)):
    # 	for y in range(len(structure_tensor[x])):
    # 		structure_tensor[x,y]=gradient_vector[x,y].dot(gradient_vector_t[x,y])

    # Step 4: Compute eigenvalue, eigenvector for each structure tensor
    [eigValue, eigVector] = np.linalg.eig(structure_tensor)
    # eigVector = eigVector.transpose((0,1,3,2))

    # Step 5: Find dominant gradient by finding the greatest eigen value
    dominantIndex = np.argmax(eigValue, axis=-1)
    [axV, axU] = np.indices([rowImg, colImg])
    dominantVector = np.reshape(
        eigVector[axV.flat, axU.flat, dominantIndex.flat], [rowImg, colImg, 2])
    print("dominantVector shape", dominantVector.shape)
    dominantVector_T = np.reshape(dominantVector, (rowImg, colImg, 1, 2))

    if show_step_result:
        vap_imnew()
        idx = slice(None, None, 10)
        a = np.linspace(0, colImg - 1, colImg)
        b = np.linspace(0, rowImg - 1, rowImg)
        q = plt.quiver(a[idx],
                       b[idx],
                       dominantVector[idx, idx, 0],
                       dominantVector[idx, idx, 1],
                       pivot='mid')
        plt.title("Dominate vector", loc='center')
        vap_imsave(path, output_names + ['05dominant_vector'])

    # Step 6: Dominant vector with direction
    signMask = np.matmul(dominantVector_T, gradient_vector)
    sign_max = np.amax(signMask)
    print("sign_max", np.unique(signMask))
    # for x in range(len(signMask)):
    # 	for y in range(len(signMask[x])):
    # 		signMask[x,y]=dominantVector_T[x,y].dot(gradient_vector[x,y])

    signMask = np.reshape(signMask, (rowImg, colImg))
    dominantVector[signMask < 0] *= -1
    dominantVector[abs(signMask) < 1e-5] *= 0
    dominantVector[signMask > 0] *= 1

    if show_step_result:
        vap_imnew()
        # idx=slice(None,None,10)
        # a=np.linspace(0,colImg-1,colImg)
        # b=np.linspace(0,rowImg-1,rowImg)
        # q=plt.quiver(a[idx], b[idx], dominantVector[idx,idx,0], dominantVector[idx,idx,1], pivot='mid')
        plt.quiver(np.arange(colImg),
                   np.arange(rowImg),
                   dominantVector[:, :, 0],
                   dominantVector[:, :, 1],
                   pivot='mid')
        plt.title("Dominate vector with direction", loc='center')
        # plt.show()
        vap_imsave(path, output_names + ['06dominant_vector_with_direction'])

    # Step 7: Compute divergence
    vectorU = dominantVector[:, :, 0]
    vectorV = dominantVector[:, :, 1]
    ridgeImage = -divergence([vectorV, vectorU])
    ridgeImage[ridgeImage < 0.25] = 0

    if show_step_result:
        vap_imshow(ridgeImage, "Original Ridgeness Image")
        vap_imsave(path, output_names + ['07Original_Ridgeness_Image'])

    # Step 8: Discard ridge point with large horizontal component
    theta = np.abs(np.arctan2(vectorV, vectorU))
    mask = np.logical_and(theta > np.pi * 0.4, theta < np.pi * 0.6)
    ridgeImage[mask] = 0

    if show_step_result:
        vap_imshow(ridgeImage, "Theta Filter Ridgeness image")
        vap_imsave(path, output_names + ['08Theta_Filter_Ridgeness_image'])

    # Step 9: Confident Filter image
    ridgeImage *= (1 -
                   np.exp(-(eigValue[:, :, 0] - eigValue[:, :, 1])**4 / 0.001))
    ridgeImage[ridgeImage < 0.5] = 0
    # ridgeImage[ridgeImage > 0.5] = 1
    ridgeImage[:, 0:5] = 0
    ridgeImage[:, colImg - 10:colImg] = 0

    if show_step_result:
        vap_imshow(ridgeImage, "Confident Filter Image")
        vap_imsave(path, output_names + ['09Confident_Filter_Image'])

    #Step 10: RANSAC filter
    ransac_result, test_result = vap_ransac_method(ridgeImage)

    if show_step_result:
        vap_imshow(ransac_result, "Ransac Image")
        vap_imsave(path, output_names + ['10RANSAC_Image'])
        vap_imshow(test_result, "Test Ransac Image")
        vap_imsave(path, output_names + ['11 Test RANSAC_Image'])

    return ridgeImage