Exemplo n.º 1
0
def hsobel(image, mask=None):
    """Find the horizontal edges of an image using the Sobel transform.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Sobel edge map.

    Notes
    -----
    We use the following kernel and return the absolute value of the
    result at each point::

      1   2   1
      0   0   0
     -1  -2  -1

    """
    assert_nD(image, 2)
    image = img_as_float(image)
    result = np.abs(convolve(image, HSOBEL_WEIGHTS))
    return _mask_filter_result(result, mask)
Exemplo n.º 2
0
def sobel(image, mask=None):
    """Find the edge magnitude using the Sobel transform.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Sobel edge map.

    Notes
    -----
    Take the square root of the sum of the squares of the horizontal and
    vertical Sobels to get a magnitude that's somewhat insensitive to
    direction.

    Note that ``scipy.ndimage.sobel`` returns a directional Sobel which
    has to be further processed to perform edge detection.
    """
    assert_nD(image, 2)
    return np.sqrt(hsobel(image, mask)**2 + vsobel(image, mask)**2)
Exemplo n.º 3
0
def prewitt_v(image, mask=None):
    """Find the vertical edges of an image using the Prewitt transform.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Prewitt edge map.

    Notes
    -----
    We use the following kernel::

      1   0  -1
      1   0  -1
      1   0  -1

    """
    assert_nD(image, 2)
    image = img_as_float(image)
    result = convolve(image, VPREWITT_WEIGHTS)
    return _mask_filter_result(result, mask)
Exemplo n.º 4
0
def prewitt(image, mask=None):
    """Find the edge magnitude using the Prewitt transform.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Prewitt edge map.

    Notes
    -----
    Return the square root of the sum of squares of the horizontal
    and vertical Prewitt transforms.
    """
    assert_nD(image, 2)
    out = np.sqrt(prewitt_h(image, mask)**2 + prewitt_v(image, mask)**2)
    out /= np.sqrt(2)
    return out
Exemplo n.º 5
0
def scharr_v(image, mask=None):
    """Find the vertical edges of an image using the Scharr transform.

    Parameters
    ----------
    image : 2-D array
        Image to process
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Scharr edge map.

    Notes
    -----
    We use the following kernel::

       3   0   -3
      10   0  -10
       3   0   -3

    References
    ----------
    .. [1] D. Kroon, 2009, Short Paper University Twente, Numerical
           Optimization of Kernel Based Image Derivatives.

    """
    assert_nD(image, 2)
    image = img_as_float(image)
    result = convolve(image, VSCHARR_WEIGHTS)
    return _mask_filter_result(result, mask)
Exemplo n.º 6
0
def _apply(func,
           image,
           selem,
           out,
           mask,
           shift_x,
           shift_y,
           s0,
           s1,
           out_dtype=None):

    assert_nD(image, 2)
    image, selem, out, mask, max_bin = _handle_input(image, selem, out, mask,
                                                     out_dtype)

    func(image,
         selem,
         shift_x=shift_x,
         shift_y=shift_y,
         mask=mask,
         out=out,
         max_bin=max_bin,
         s0=s0,
         s1=s1)

    return out.reshape(out.shape[:2])
Exemplo n.º 7
0
def _check_input(image, init_level_set):
    """Check that shapes of `image` and `init_level_set` match."""
    assert_nD(image, [2, 3])

    if len(image.shape) != len(init_level_set.shape):
        raise ValueError("The dimensions of the initial level set do not "
                         "match the dimensions of the image.")
Exemplo n.º 8
0
def roberts_negative_diagonal(image, mask=None):
    """Find the cross edges of an image using the Roberts' Cross operator.

    The kernel is applied to the input image to produce separate measurements
    of the gradient component one orientation.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Robert's edge map.

    Notes
    -----
    We use the following kernel and return the absolute value of the
    result at each point::

      0   1
     -1   0

    """
    assert_nD(image, 2)
    image = img_as_float(image)
    result = np.abs(convolve(image, ROBERTS_ND_WEIGHTS))
    return _mask_filter_result(result, mask)
Exemplo n.º 9
0
def prewitt(image, mask=None):
    """Find the edge magnitude using the Prewitt transform.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Prewitt edge map.

    Notes
    -----
    Return the square root of the sum of squares of the horizontal
    and vertical Prewitt transforms.
    """
    assert_nD(image, 2)
    out = np.sqrt(hprewitt(image, mask)**2 + vprewitt(image, mask)**2)
    out /= np.sqrt(2)
    return out
Exemplo n.º 10
0
def forward(data, impulse_response=None, filter_params={},
            predefined_filter=None):
    """Apply the given filter to data.

    Parameters
    ----------
    data : (M,N) ndarray
        Input data.
    impulse_response : callable `f(r, c, **filter_params)`
        Impulse response of the filter.  See LPIFilter2D.__init__.
    filter_params : dict
        Additional keyword parameters to the impulse_response function.

    Other Parameters
    ----------------
    predefined_filter : LPIFilter2D
        If you need to apply the same filter multiple times over different
        images, construct the LPIFilter2D and specify it here.

    Examples
    --------

    Gaussian filter:

    >>> def filt_func(r, c):
    ...     return np.exp(-np.hypot(r, c)/1)
    >>>
    >>> from skimage import data
    >>> filtered = forward(data.coins(), filt_func)

    """
    assert_nD(data, 2, 'data')
    if predefined_filter is None:
        predefined_filter = LPIFilter2D(impulse_response, **filter_params)
    return predefined_filter(data)
Exemplo n.º 11
0
def hsobel(image, mask=None):
    """Find the horizontal edges of an image using the Sobel transform.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Sobel edge map.

    Notes
    -----
    We use the following kernel and return the absolute value of the
    result at each point::

      1   2   1
      0   0   0
     -1  -2  -1

    """
    assert_nD(image, 2)
    image = img_as_float(image)
    result = np.abs(convolve(image, HSOBEL_WEIGHTS))
    return _mask_filter_result(result, mask)
Exemplo n.º 12
0
def sobel(image, mask=None):
    """Find the edge magnitude using the Sobel transform.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Sobel edge map.

    Notes
    -----
    Take the square root of the sum of the squares of the horizontal and
    vertical Sobels to get a magnitude that's somewhat insensitive to
    direction.

    Note that ``scipy.ndimage.sobel`` returns a directional Sobel which
    has to be further processed to perform edge detection.
    """
    assert_nD(image, 2)
    out = np.sqrt(hsobel(image, mask)**2 + vsobel(image, mask)**2)
    out /= np.sqrt(2)
    return out
Exemplo n.º 13
0
def roberts_neg_diag(image, mask=None):
    """Find the cross edges of an image using the Roberts' Cross operator.

    The kernel is applied to the input image to produce separate measurements
    of the gradient component one orientation.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Robert's edge map.

    Notes
    -----
    We use the following kernel::

      0   1
     -1   0

    """
    assert_nD(image, 2)
    image = img_as_float(image)
    result = convolve(image, ROBERTS_ND_WEIGHTS)
    return _mask_filter_result(result, mask)
Exemplo n.º 14
0
def roberts(image, mask=None):
    """Find the edge magnitude using Roberts' cross operator.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Roberts' Cross edge map.

    See also
    --------
    sobel, scharr, prewitt, feature.canny

    Examples
    --------
    >>> from skimage import data
    >>> camera = data.camera()
    >>> from skimage import filters
    >>> edges = filters.roberts(camera)

    """
    assert_nD(image, 2)
    out = np.sqrt(roberts_pos_diag(image, mask)**2 +
                  roberts_neg_diag(image, mask)**2)
    out /= np.sqrt(2)
    return out
Exemplo n.º 15
0
def local_binary_pattern(image, P, R, method='default'):
    """Gray scale and rotation invariant LBP (Local Binary Patterns).

    LBP is an invariant descriptor that can be used for texture classification.

    Parameters
    ----------
    image : (N, M) array
        Graylevel image.
    P : int
        Number of circularly symmetric neighbour set points (quantization of
        the angular space).
    R : float
        Radius of circle (spatial resolution of the operator).
    method : {'default', 'ror', 'uniform', 'var'}
        Method to determine the pattern.

        * 'default': original local binary pattern which is gray scale but not
            rotation invariant.
        * 'ror': extension of default implementation which is gray scale and
            rotation invariant.
        * 'uniform': improved rotation invariance with uniform patterns and
            finer quantization of the angular space which is gray scale and
            rotation invariant.
        * 'nri_uniform': non rotation-invariant uniform patterns variant
            which is only gray scale invariant [2].
        * 'var': rotation invariant variance measures of the contrast of local
            image texture which is rotation but not gray scale invariant.

    Returns
    -------
    output : (N, M) array
        LBP image.

    References
    ----------
    .. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
           Classification with Local Binary Patterns.
           Timo Ojala, Matti Pietikainen, Topi Maenpaa.
           http://www.rafbis.it/biplab15/images/stories/docenti/Danielriccio/Articoliriferimento/LBP.pdf, 2002.
    .. [2] Face recognition with local binary patterns.
           Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
           http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
           2004.
    """
    assert_nD(image, 2)

    methods = {
        'default': ord('D'),
        'ror': ord('R'),
        'uniform': ord('U'),
        'nri_uniform': ord('N'),
        'var': ord('V')
    }
    image = np.ascontiguousarray(image, dtype=np.double)
    output = _local_binary_pattern(image, P, R, methods[method.lower()])
    return output
Exemplo n.º 16
0
    def extract(self, image, keypoints):
        """Extract BRIEF binary descriptors for given keypoints in image.

        Parameters
        ----------
        image : 2D array
            Input image.
        keypoints : (N, 2) array
            Keypoint coordinates as ``(row, col)``.

        """
        assert_nD(image, 2)

        np.random.seed(self.sample_seed)

        image = _prepare_grayscale_input_2D(image)

        # Gaussian low-pass filtering to alleviate noise sensitivity
        image = np.ascontiguousarray(gaussian_filter(image, self.sigma))

        # Sampling pairs of decision pixels in patch_size x patch_size window
        desc_size = self.descriptor_size
        patch_size = self.patch_size
        if self.mode == 'normal':
            samples = (patch_size / 5.0) * np.random.randn(desc_size * 8)
            samples = np.array(samples, dtype=np.int32)
            samples = samples[(samples < (patch_size // 2))
                              & (samples > -(patch_size - 2) // 2)]

            pos1 = samples[:desc_size * 2].reshape(desc_size, 2)
            pos2 = samples[desc_size * 2:desc_size * 4].reshape(desc_size, 2)
        elif self.mode == 'uniform':
            samples = np.random.randint(-(patch_size - 2) // 2,
                                        (patch_size // 2) + 1,
                                        (desc_size * 2, 2))
            samples = np.array(samples, dtype=np.int32)
            pos1, pos2 = np.split(samples, 2)

        pos1 = np.ascontiguousarray(pos1)
        pos2 = np.ascontiguousarray(pos2)

        # Removing keypoints that are within (patch_size / 2) distance from the
        # image border
        self.mask = _mask_border_keypoints(image.shape, keypoints,
                                           patch_size // 2)

        keypoints = np.array(keypoints[self.mask, :],
                             dtype=np.intp,
                             order='C',
                             copy=False)

        self.descriptors = np.zeros((keypoints.shape[0], desc_size),
                                    dtype=bool,
                                    order='C')

        _brief_loop(image, self.descriptors.view(np.uint8), keypoints, pos1,
                    pos2)
Exemplo n.º 17
0
def local_binary_pattern(image, P, R, method='default'):
    """Gray scale and rotation invariant LBP (Local Binary Patterns).

    LBP is an invariant descriptor that can be used for texture classification.

    Parameters
    ----------
    image : (N, M) array
        Graylevel image.
    P : int
        Number of circularly symmetric neighbour set points (quantization of
        the angular space).
    R : float
        Radius of circle (spatial resolution of the operator).
    method : {'default', 'ror', 'uniform', 'var'}
        Method to determine the pattern.

        * 'default': original local binary pattern which is gray scale but not
            rotation invariant.
        * 'ror': extension of default implementation which is gray scale and
            rotation invariant.
        * 'uniform': improved rotation invariance with uniform patterns and
            finer quantization of the angular space which is gray scale and
            rotation invariant.
        * 'nri_uniform': non rotation-invariant uniform patterns variant
            which is only gray scale invariant [2].
        * 'var': rotation invariant variance measures of the contrast of local
            image texture which is rotation but not gray scale invariant.

    Returns
    -------
    output : (N, M) array
        LBP image.

    References
    ----------
    .. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
           Classification with Local Binary Patterns.
           Timo Ojala, Matti Pietikainen, Topi Maenpaa.
           http://www.rafbis.it/biplab15/images/stories/docenti/Danielriccio/Articoliriferimento/LBP.pdf, 2002.
    .. [2] Face recognition with local binary patterns.
           Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
           http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
           2004.
    """
    assert_nD(image, 2)

    methods = {
        'default': ord('D'),
        'ror': ord('R'),
        'uniform': ord('U'),
        'nri_uniform': ord('N'),
        'var': ord('V')
    }
    image = np.ascontiguousarray(image, dtype=np.double)
    output = _local_binary_pattern(image, P, R, methods[method.lower()])
    return output
Exemplo n.º 18
0
def gabor_filter(image,
                 frequency,
                 theta=0,
                 bandwidth=1,
                 sigma_x=None,
                 sigma_y=None,
                 offset=0,
                 mode='reflect',
                 cval=0):
    """Return real and imaginary responses to Gabor filter.

    The real and imaginary parts of the Gabor filter kernel are applied to the
    image and the response is returned as a pair of arrays.

    Frequency and orientation representations of the Gabor filter are similar
    to those of the human visual system. It is especially suitable for texture
    classification using Gabor filter banks.

    Parameters
    ----------
    image : array
        Input image.
    frequency : float
        Frequency of the harmonic function.
    theta : float
        Orientation in radians. If 0, the harmonic is in the x-direction.
    bandwidth : float
        The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
        and `sigma_y` will decrease with increasing frequency. This value is
        ignored if `sigma_x` and `sigma_y` are set by the user.
    sigma_x, sigma_y : float
        Standard deviation in x- and y-directions. These directions apply to
        the kernel *before* rotation. If `theta = pi/2`, then the kernel is
        rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
    offset : float, optional
        Phase offset of harmonic function in radians.

    Returns
    -------
    real, imag : arrays
        Filtered images using the real and imaginary parts of the Gabor filter
        kernel.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Gabor_filter
    .. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf

    """
    assert_nD(image, 2)
    g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, offset)

    filtered_real = ndimage.convolve(image, np.real(g), mode=mode, cval=cval)
    filtered_imag = ndimage.convolve(image, np.imag(g), mode=mode, cval=cval)

    return filtered_real, filtered_imag
Exemplo n.º 19
0
    def extract(self, image, keypoints):
        """Extract BRIEF binary descriptors for given keypoints in image.

        Parameters
        ----------
        image : 2D array
            Input image.
        keypoints : (N, 2) array
            Keypoint coordinates as ``(row, col)``.

        """
        assert_nD(image, 2)

        np.random.seed(self.sample_seed)

        image = _prepare_grayscale_input_2D(image)

        # Gaussian low-pass filtering to alleviate noise sensitivity
        image = np.ascontiguousarray(gaussian_filter(image, self.sigma))

        # Sampling pairs of decision pixels in patch_size x patch_size window
        desc_size = self.descriptor_size
        patch_size = self.patch_size
        if self.mode == 'normal':
            samples = (patch_size / 5.0) * np.random.randn(desc_size * 8)
            samples = np.array(samples, dtype=np.int32)
            samples = samples[(samples < (patch_size // 2))
                              & (samples > - (patch_size - 2) // 2)]

            pos1 = samples[:desc_size * 2].reshape(desc_size, 2)
            pos2 = samples[desc_size * 2:desc_size * 4].reshape(desc_size, 2)
        elif self.mode == 'uniform':
            samples = np.random.randint(-(patch_size - 2) // 2,
                                        (patch_size // 2) + 1,
                                        (desc_size * 2, 2))
            samples = np.array(samples, dtype=np.int32)
            pos1, pos2 = np.split(samples, 2)

        pos1 = np.ascontiguousarray(pos1)
        pos2 = np.ascontiguousarray(pos2)

        # Removing keypoints that are within (patch_size / 2) distance from the
        # image border
        self.mask = _mask_border_keypoints(image.shape, keypoints,
                                           patch_size // 2)

        keypoints = np.array(keypoints[self.mask, :], dtype=np.intp,
                             order='C', copy=False)

        self.descriptors = np.zeros((keypoints.shape[0], desc_size),
                                    dtype=bool, order='C')

        _brief_loop(image, self.descriptors.view(np.uint8), keypoints,
                    pos1, pos2)
Exemplo n.º 20
0
def _apply(func, image, selem, out, mask, shift_x, shift_y, s0, s1,
           out_dtype=None):

    assert_nD(image, 2)
    image, selem, out, mask, max_bin = _handle_input(image, selem, out, mask,
                                                     out_dtype)

    func(image, selem, shift_x=shift_x, shift_y=shift_y, mask=mask,
         out=out, max_bin=max_bin, s0=s0, s1=s1)

    return out.reshape(out.shape[:2])
Exemplo n.º 21
0
    def extract(self, image, keypoints, scales, orientations):
        """Extract rBRIEF binary descriptors for given keypoints in image.

        Note that the keypoints must be extracted using the same `downscale`
        and `n_scales` parameters. Additionally, if you want to extract both
        keypoints and descriptors you should use the faster
        `detect_and_extract`.

        Parameters
        ----------
        image : 2D array
            Input image.
        keypoints : (N, 2) array
            Keypoint coordinates as ``(row, col)``.
        scales : (N, ) array
            Corresponding scales.
        orientations : (N, ) array
            Corresponding orientations in radians.

        """
        assert_nD(image, 2)

        pyramid = self._build_pyramid(image)

        descriptors_list = []
        mask_list = []

        # Determine octaves from scales
        octaves = (np.log(scales) / np.log(self.downscale)).astype(np.intp)

        for octave in range(len(pyramid)):

            # Mask for all keypoints in current octave
            octave_mask = octaves == octave

            if np.sum(octave_mask) > 0:

                octave_image = np.ascontiguousarray(pyramid[octave])

                octave_keypoints = keypoints[octave_mask]
                octave_keypoints /= self.downscale ** octave

                octave_orientations = orientations[octave_mask]

                descriptors, mask = self._extract_octave(octave_image,
                                                         octave_keypoints,
                                                         octave_orientations)

                descriptors_list.append(descriptors)
                mask_list.append(mask)

        self.descriptors = np.vstack(descriptors_list).view(np.bool)
        self.mask_ = np.hstack(mask_list)
Exemplo n.º 22
0
    def extract(self, image, keypoints, scales, orientations):
        """Extract rBRIEF binary descriptors for given keypoints in image.

        Note that the keypoints must be extracted using the same `downscale`
        and `n_scales` parameters. Additionally, if you want to extract both
        keypoints and descriptors you should use the faster
        `detect_and_extract`.

        Parameters
        ----------
        image : 2D array
            Input image.
        keypoints : (N, 2) array
            Keypoint coordinates as ``(row, col)``.
        scales : (N, ) array
            Corresponding scales.
        orientations : (N, ) array
            Corresponding orientations in radians.

        """
        assert_nD(image, 2)

        pyramid = self._build_pyramid(image)

        descriptors_list = []
        mask_list = []

        # Determine octaves from scales
        octaves = (np.log(scales) / np.log(self.downscale)).astype(np.intp)

        for octave in range(len(pyramid)):

            # Mask for all keypoints in current octave
            octave_mask = octaves == octave

            if np.sum(octave_mask) > 0:

                octave_image = np.ascontiguousarray(pyramid[octave])

                octave_keypoints = keypoints[octave_mask]
                octave_keypoints /= self.downscale**octave

                octave_orientations = orientations[octave_mask]

                descriptors, mask = self._extract_octave(
                    octave_image, octave_keypoints, octave_orientations)

                descriptors_list.append(descriptors)
                mask_list.append(mask)

        self.descriptors = np.vstack(descriptors_list).view(np.bool)
        self.mask_ = np.hstack(mask_list)
Exemplo n.º 23
0
    def __call__(self, data):
        """Apply the filter to the given data.

        Parameters
        ----------
        data : (M,N) ndarray

        """
        assert_nD(data, 2, 'data')
        F, G = self._prepare(data)
        out = np.dual.ifftn(F * G)
        out = np.abs(_centre(out, data.shape))
        return out
Exemplo n.º 24
0
    def __call__(self, data):
        """Apply the filter to the given data.

        Parameters
        ----------
        data : (M,N) ndarray

        """
        assert_nD(data, 2, 'data')
        F, G = self._prepare(data)
        out = np.dual.ifftn(F * G)
        out = np.abs(_centre(out, data.shape))
        return out
Exemplo n.º 25
0
def gabor_filter(image, frequency, theta=0, bandwidth=1, sigma_x=None,
                 sigma_y=None, offset=0, mode='reflect', cval=0):
    """Return real and imaginary responses to Gabor filter.

    The real and imaginary parts of the Gabor filter kernel are applied to the
    image and the response is returned as a pair of arrays.

    Frequency and orientation representations of the Gabor filter are similar
    to those of the human visual system. It is especially suitable for texture
    classification using Gabor filter banks.

    Parameters
    ----------
    image : array
        Input image.
    frequency : float
        Frequency of the harmonic function.
    theta : float
        Orientation in radians. If 0, the harmonic is in the x-direction.
    bandwidth : float
        The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
        and `sigma_y` will decrease with increasing frequency. This value is
        ignored if `sigma_x` and `sigma_y` are set by the user.
    sigma_x, sigma_y : float
        Standard deviation in x- and y-directions. These directions apply to
        the kernel *before* rotation. If `theta = pi/2`, then the kernel is
        rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
    offset : float, optional
        Phase offset of harmonic function in radians.

    Returns
    -------
    real, imag : arrays
        Filtered images using the real and imaginary parts of the Gabor filter
        kernel.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Gabor_filter
    .. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf

    """
    assert_nD(image, 2)
    g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, offset)

    filtered_real = ndimage.convolve(image, np.real(g), mode=mode, cval=cval)
    filtered_imag = ndimage.convolve(image, np.imag(g), mode=mode, cval=cval)

    return filtered_real, filtered_imag
Exemplo n.º 26
0
    def detect(self, image):
        """Detect oriented FAST keypoints along with the corresponding scale.

        Parameters
        ----------
        image : 2D array
            Input image.

        """
        assert_nD(image, 2)

        pyramid = self._build_pyramid(image)

        keypoints_list = []
        orientations_list = []
        scales_list = []
        responses_list = []

        for octave in range(len(pyramid)):

            octave_image = np.ascontiguousarray(pyramid[octave])

            keypoints, orientations, responses = \
                self._detect_octave(octave_image)

            keypoints_list.append(keypoints * self.downscale**octave)
            orientations_list.append(orientations)
            scales_list.append(self.downscale**octave *
                               np.ones(keypoints.shape[0], dtype=np.intp))
            responses_list.append(responses)

        keypoints = np.vstack(keypoints_list)
        orientations = np.hstack(orientations_list)
        scales = np.hstack(scales_list)
        responses = np.hstack(responses_list)

        if keypoints.shape[0] < self.n_keypoints:
            self.keypoints = keypoints
            self.scales = scales
            self.orientations = orientations
            self.responses = responses
        else:
            # Choose best n_keypoints according to Harris corner response
            best_indices = responses.argsort()[::-1][:self.n_keypoints]
            self.keypoints = keypoints[best_indices]
            self.scales = scales[best_indices]
            self.orientations = orientations[best_indices]
            self.responses = responses[best_indices]
Exemplo n.º 27
0
def sobel(image, mask=None):
    """Find the edge magnitude using the Sobel transform.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Sobel edge map.

    See also
    --------
    scharr, prewitt, roberts, feature.canny

    Notes
    -----
    Take the square root of the sum of the squares of the horizontal and
    vertical Sobels to get a magnitude that's somewhat insensitive to
    direction.

    The 3x3 convolution kernel used in the horizontal and vertical Sobels is
    an approximation of the gradient of the image (with some slight blurring
    since 9 pixels are used to compute the gradient at a given pixel). As an
    approximation of the gradient, the Sobel operator is not completely
    rotation-invariant. The Scharr operator should be used for a better
    rotation invariance.

    Note that ``scipy.ndimage.sobel`` returns a directional Sobel which
    has to be further processed to perform edge detection.

    Examples
    --------
    >>> from skimage import data
    >>> camera = data.camera()
    >>> from skimage import filters
    >>> edges = filters.sobel(camera)
    """
    assert_nD(image, 2)
    out = np.sqrt(sobel_h(image, mask)**2 + sobel_v(image, mask)**2)
    out /= np.sqrt(2)
    return out
Exemplo n.º 28
0
    def detect(self, image):
        """Detect oriented FAST keypoints along with the corresponding scale.

        Parameters
        ----------
        image : 2D array
            Input image.

        """
        assert_nD(image, 2)

        pyramid = self._build_pyramid(image)

        keypoints_list = []
        orientations_list = []
        scales_list = []
        responses_list = []

        for octave in range(len(pyramid)):

            octave_image = np.ascontiguousarray(pyramid[octave])

            keypoints, orientations, responses = \
                self._detect_octave(octave_image)

            keypoints_list.append(keypoints * self.downscale ** octave)
            orientations_list.append(orientations)
            scales_list.append(self.downscale **  octave
                               * np.ones(keypoints.shape[0], dtype=np.intp))
            responses_list.append(responses)

        keypoints = np.vstack(keypoints_list)
        orientations = np.hstack(orientations_list)
        scales = np.hstack(scales_list)
        responses = np.hstack(responses_list)

        if keypoints.shape[0] < self.n_keypoints:
            self.keypoints = keypoints
            self.scales = scales
            self.orientations = orientations
            self.responses = responses
        else:
            # Choose best n_keypoints according to Harris corner response
            best_indices = responses.argsort()[::-1][:self.n_keypoints]
            self.keypoints = keypoints[best_indices]
            self.scales = scales[best_indices]
            self.orientations = orientations[best_indices]
            self.responses = responses[best_indices]
Exemplo n.º 29
0
def angular_lbp(image, P, R, method='default'):
    """Angular Difference LBP"""

    assert_nD(image, 2)

    methods = {
        'default': ord('D'),
        'ror': ord('R'),
        'uniform': ord('U'),
        'nri_uniform': ord('N'),
        'var': ord('V'),
    }

    image = np.ascontiguousarray(image, dtype=np.double)
    output = _lbp_ang._angular_lbp(image, P, R, methods[method.lower()])
    return output
Exemplo n.º 30
0
def ni_lbp(image, P, R, method='default'):
    """Neighbour Intensity LBP"""

    assert_nD(image, 2)

    methods = {
        'default': ord('D'),
        'ror': ord('R'),
        'uniform': ord('U'),
        'nri_uniform': ord('N'),
        'var': ord('V'),
    }

    image = np.ascontiguousarray(image, dtype=np.double)
    output = _nilbp._ni_lbp(image, P, R, methods[method.lower()])
    return output
Exemplo n.º 31
0
def radial_lbp(image, P, R_OUT, R_IN, method='default'):
    """Radial Difference LBP"""

    assert_nD(image, 2)

    methods = {
        'default': ord('D'),
        'ror': ord('R'),
        'uniform': ord('U'),
        'nri_uniform': ord('N'),
        'var': ord('V'),
    }

    image = np.ascontiguousarray(image, dtype=np.double)
    output = _lbp_rad._radial_lbp(image, P, R_OUT, R_IN,
                                  methods[method.lower()])
    return output
Exemplo n.º 32
0
def wiener(data,
           impulse_response=None,
           filter_params={},
           K=0.25,
           predefined_filter=None):
    """Minimum Mean Square Error (Wiener) inverse filter.

    Parameters
    ----------
    data : (M,N) ndarray
        Input data.
    K : float or (M,N) ndarray
        Ratio between power spectrum of noise and undegraded
        image.
    impulse_response : callable `f(r, c, **filter_params)`
        Impulse response of the filter.  See LPIFilter2D.__init__.
    filter_params : dict
        Additional keyword parameters to the impulse_response function.

    Other Parameters
    ----------------
    predefined_filter : LPIFilter2D
        If you need to apply the same filter multiple times over different
        images, construct the LPIFilter2D and specify it here.

    """
    assert_nD(data, 2, 'data')

    if not isinstance(K, float):
        assert_nD(K, 2, 'K')

    if predefined_filter is None:
        filt = LPIFilter2D(impulse_response, **filter_params)
    else:
        filt = predefined_filter

    F, G = filt._prepare(data)
    _min_limit(F)

    H_mag_sqr = np.abs(F)**2
    F = 1 / F * H_mag_sqr / (H_mag_sqr + K)

    return _centre(np.abs(ifftshift(np.dual.ifftn(G * F))), data.shape)
Exemplo n.º 33
0
def prewitt(image, mask=None):
    """Find the edge magnitude using the Prewitt transform.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Prewitt edge map.

    See also
    --------
    sobel, scharr

    Notes
    -----
    Return the square root of the sum of squares of the horizontal
    and vertical Prewitt transforms. The edge magnitude depends slightly
    on edge directions, since the approximation of the gradient operator by
    the Prewitt operator is not completely rotation invariant. For a better
    rotation invariance, the Scharr operator should be used. The Sobel operator
    has a better rotation invariance than the Prewitt operator, but a worse
    rotation invariance than the Scharr operator.

    Examples
    --------
    >>> from skimage import data
    >>> camera = data.camera()
    >>> from skimage import filters
    >>> edges = filters.prewitt(camera)
    """
    assert_nD(image, 2)
    out = np.sqrt(prewitt_h(image, mask)**2 + prewitt_v(image, mask)**2)
    out /= np.sqrt(2)
    return out
Exemplo n.º 34
0
def roberts(image, mask=None):
    """Find the edge magnitude using Roberts' cross operator.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Roberts' Cross edge map.
    """
    assert_nD(image, 2)
    return np.sqrt(roberts_positive_diagonal(image, mask)**2 +
                   roberts_negative_diagonal(image, mask)**2)
Exemplo n.º 35
0
def inverse(data,
            impulse_response=None,
            filter_params={},
            max_gain=2,
            predefined_filter=None):
    """Apply the filter in reverse to the given data.

    Parameters
    ----------
    data : (M,N) ndarray
        Input data.
    impulse_response : callable `f(r, c, **filter_params)`
        Impulse response of the filter.  See LPIFilter2D.__init__.
    filter_params : dict
        Additional keyword parameters to the impulse_response function.
    max_gain : float
        Limit the filter gain.  Often, the filter contains zeros, which would
        cause the inverse filter to have infinite gain.  High gain causes
        amplification of artefacts, so a conservative limit is recommended.

    Other Parameters
    ----------------
    predefined_filter : LPIFilter2D
        If you need to apply the same filter multiple times over different
        images, construct the LPIFilter2D and specify it here.

    """
    assert_nD(data, 2, 'data')
    if predefined_filter is None:
        filt = LPIFilter2D(impulse_response, **filter_params)
    else:
        filt = predefined_filter

    F, G = filt._prepare(data)
    _min_limit(F)

    F = 1 / F
    mask = np.abs(F) > max_gain
    F[mask] = np.sign(F[mask]) * max_gain

    return _centre(np.abs(ifftshift(np.dual.ifftn(G * F))), data.shape)
Exemplo n.º 36
0
def roberts(image, mask=None):
    """Find the edge magnitude using Roberts' cross operator.

    Parameters
    ----------
    image : 2-D array
        Image to process.
    mask : 2-D array, optional
        An optional mask to limit the application to a certain area.
        Note that pixels surrounding masked regions are also masked to
        prevent masked regions from affecting the result.

    Returns
    -------
    output : 2-D array
        The Roberts' Cross edge map.
    """
    assert_nD(image, 2)
    return np.sqrt(
        roberts_positive_diagonal(image, mask)**2 +
        roberts_negative_diagonal(image, mask)**2)
Exemplo n.º 37
0
def wiener(data, impulse_response=None, filter_params={}, K=0.25,
           predefined_filter=None):
    """Minimum Mean Square Error (Wiener) inverse filter.

    Parameters
    ----------
    data : (M,N) ndarray
        Input data.
    K : float or (M,N) ndarray
        Ratio between power spectrum of noise and undegraded
        image.
    impulse_response : callable `f(r, c, **filter_params)`
        Impulse response of the filter.  See LPIFilter2D.__init__.
    filter_params : dict
        Additional keyword parameters to the impulse_response function.

    Other Parameters
    ----------------
    predefined_filter : LPIFilter2D
        If you need to apply the same filter multiple times over different
        images, construct the LPIFilter2D and specify it here.

    """
    assert_nD(data, 2, 'data')

    if not isinstance(K, float):
        assert_nD(K, 2, 'K')

    if predefined_filter is None:
        filt = LPIFilter2D(impulse_response, **filter_params)
    else:
        filt = predefined_filter

    F, G = filt._prepare(data)
    _min_limit(F)

    H_mag_sqr = np.abs(F)**2
    F = 1 / F * H_mag_sqr / (H_mag_sqr + K)

    return _centre(np.abs(ifftshift(np.dual.ifftn(G * F))), data.shape)
Exemplo n.º 38
0
def _handle_input(image, selem, out, mask, out_dtype=None, pixel_size=1):

    assert_nD(image, 2)
    if image.dtype not in (np.uint8, np.uint16):
        image = img_as_ubyte(image)

    selem = np.ascontiguousarray(img_as_ubyte(selem > 0))
    image = np.ascontiguousarray(image)

    if mask is None:
        mask = np.ones(image.shape, dtype=np.uint8)
    else:
        mask = img_as_ubyte(mask)
        mask = np.ascontiguousarray(mask)

    if image is out:
        raise NotImplementedError("Cannot perform rank operation in place.")

    if out is None:
        if out_dtype is None:
            out_dtype = image.dtype
        out = np.empty(image.shape + (pixel_size, ), dtype=out_dtype)
    else:
        if len(out.shape) == 2:
            out = out.reshape(out.shape + (pixel_size, ))

    is_8bit = image.dtype in (np.uint8, np.int8)

    if is_8bit:
        max_bin = 255
    else:
        max_bin = max(4, image.max())

    bitdepth = int(np.log2(max_bin))
    if bitdepth > 10:
        warnings.warn("Bitdepth of %d may result in bad rank filter "
                      "performance due to large number of bins." % bitdepth)

    return image, selem, out, mask, max_bin
Exemplo n.º 39
0
def _handle_input(image, selem, out, mask, out_dtype=None, pixel_size=1):

    assert_nD(image, 2)
    if image.dtype not in (np.uint8, np.uint16):
        image = img_as_ubyte(image)

    selem = np.ascontiguousarray(img_as_ubyte(selem > 0))
    image = np.ascontiguousarray(image)

    if mask is None:
        mask = np.ones(image.shape, dtype=np.uint8)
    else:
        mask = img_as_ubyte(mask)
        mask = np.ascontiguousarray(mask)

    if image is out:
        raise NotImplementedError("Cannot perform rank operation in place.")

    if out is None:
        if out_dtype is None:
            out_dtype = image.dtype
        out = np.empty(image.shape+(pixel_size,), dtype=out_dtype)
    else:
        if len(out.shape) == 2:
            out = out.reshape(out.shape+(pixel_size,))

    is_8bit = image.dtype in (np.uint8, np.int8)

    if is_8bit:
        max_bin = 255
    else:
        max_bin = max(4, image.max())

    bitdepth = int(np.log2(max_bin))
    if bitdepth > 10:
        warnings.warn("Bitdepth of %d may result in bad rank filter "
                      "performance due to large number of bins." % bitdepth)

    return image, selem, out, mask, max_bin
Exemplo n.º 40
0
def inverse(data, impulse_response=None, filter_params={}, max_gain=2,
            predefined_filter=None):
    """Apply the filter in reverse to the given data.

    Parameters
    ----------
    data : (M,N) ndarray
        Input data.
    impulse_response : callable `f(r, c, **filter_params)`
        Impulse response of the filter.  See LPIFilter2D.__init__.
    filter_params : dict
        Additional keyword parameters to the impulse_response function.
    max_gain : float
        Limit the filter gain.  Often, the filter contains zeros, which would
        cause the inverse filter to have infinite gain.  High gain causes
        amplification of artefacts, so a conservative limit is recommended.

    Other Parameters
    ----------------
    predefined_filter : LPIFilter2D
        If you need to apply the same filter multiple times over different
        images, construct the LPIFilter2D and specify it here.

    """
    assert_nD(data, 2, 'data')
    if predefined_filter is None:
        filt = LPIFilter2D(impulse_response, **filter_params)
    else:
        filt = predefined_filter

    F, G = filt._prepare(data)
    _min_limit(F)

    F = 1 / F
    mask = np.abs(F) > max_gain
    F[mask] = np.sign(F[mask]) * max_gain

    return _centre(np.abs(ifftshift(np.dual.ifftn(G * F))), data.shape)
Exemplo n.º 41
0
def forward(data,
            impulse_response=None,
            filter_params={},
            predefined_filter=None):
    """Apply the given filter to data.

    Parameters
    ----------
    data : (M,N) ndarray
        Input data.
    impulse_response : callable `f(r, c, **filter_params)`
        Impulse response of the filter.  See LPIFilter2D.__init__.
    filter_params : dict
        Additional keyword parameters to the impulse_response function.

    Other Parameters
    ----------------
    predefined_filter : LPIFilter2D
        If you need to apply the same filter multiple times over different
        images, construct the LPIFilter2D and specify it here.

    Examples
    --------

    Gaussian filter:

    >>> def filt_func(r, c):
    ...     return np.exp(-np.hypot(r, c)/1)
    >>>
    >>> from skimage import data
    >>> filtered = forward(data.coins(), filt_func)

    """
    assert_nD(data, 2, 'data')
    if predefined_filter is None:
        predefined_filter = LPIFilter2D(impulse_response, **filter_params)
    return predefined_filter(data)
Exemplo n.º 42
0
def gabor_filter(image,
                 frequency,
                 theta=0,
                 bandwidth=1,
                 sigma_x=None,
                 sigma_y=None,
                 n_stds=3,
                 offset=0,
                 mode='reflect',
                 cval=0):
    """Return real and imaginary responses to Gabor filter.

    The real and imaginary parts of the Gabor filter kernel are applied to the
    image and the response is returned as a pair of arrays.

    Gabor filter is a linear filter with a Gaussian kernel which is modulated
    by a sinusoidal plane wave. Frequency and orientation representations of
    the Gabor filter are similar to those of the human visual system.
    Gabor filter banks are commonly used in computer vision and image
    processing. They are especially suitable for edge detection and texture
    classification.

    Parameters
    ----------
    image : 2-D array
        Input image.
    frequency : float
        Spatial frequency of the harmonic function. Specified in pixels.
    theta : float, optional
        Orientation in radians. If 0, the harmonic is in the x-direction.
    bandwidth : float, optional
        The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
        and `sigma_y` will decrease with increasing frequency. This value is
        ignored if `sigma_x` and `sigma_y` are set by the user.
    sigma_x, sigma_y : float, optional
        Standard deviation in x- and y-directions. These directions apply to
        the kernel *before* rotation. If `theta = pi/2`, then the kernel is
        rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
    n_stds : scalar, optional
        The linear size of the kernel is n_stds (3 by default) standard
        deviations.
    offset : float, optional
        Phase offset of harmonic function in radians.
    mode : string, optional
        Mode used to convolve image with a kernel, passed to `ndimage.convolve`
    cval : scalar, optional
        Value to fill past edges of input if `mode` of convolution is
        'constant'. The parameter is passed to `ndimage.convolve`.

    Returns
    -------
    real, imag : arrays
        Filtered images using the real and imaginary parts of the Gabor filter
        kernel. Images are of the same dimensions as the input one.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Gabor_filter
    .. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf

    Examples
    --------
    >>> from skimage.filter import gabor_filter
    >>> from skimage import data, io
    >>> from matplotlib import pyplot as plt  # doctest: +SKIP

    >>> image = data.coins()
    >>> # detecting edges in a coin image
    >>> filt_real, filt_imag = gabor_filter(image, frequency=0.6)
    >>> plt.figure()            # doctest: +SKIP
    >>> io.imshow(filt_real)    # doctest: +SKIP
    >>> io.show()               # doctest: +SKIP

    >>> # less sensitivity to finer details with the lower frequency kernel
    >>> filt_real, filt_imag = gabor_filter(image, frequency=0.1)
    >>> plt.figure()            # doctest: +SKIP
    >>> io.imshow(filt_real)    # doctest: +SKIP
    >>> io.show()               # doctest: +SKIP
    """
    assert_nD(image, 2)
    g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, n_stds,
                     offset)

    filtered_real = ndimage.convolve(image, np.real(g), mode=mode, cval=cval)
    filtered_imag = ndimage.convolve(image, np.imag(g), mode=mode, cval=cval)

    return filtered_real, filtered_imag
Exemplo n.º 43
0
def threshold_adaptive(image, block_size, method='gaussian', offset=0,
                       mode='reflect', param=None):
    """Applies an adaptive threshold to an array.

    Also known as local or dynamic thresholding where the threshold value is
    the weighted mean for the local neighborhood of a pixel subtracted by a
    constant. Alternatively the threshold can be determined dynamically by a a
    given function using the 'generic' method.

    Parameters
    ----------
    image : (N, M) ndarray
        Input image.
    block_size : int
        Uneven size of pixel neighborhood which is used to calculate the
        threshold value (e.g. 3, 5, 7, ..., 21, ...).
    method : {'generic', 'gaussian', 'mean', 'median'}, optional
        Method used to determine adaptive threshold for local neighbourhood in
        weighted mean image.

        * 'generic': use custom function (see `param` parameter)
        * 'gaussian': apply gaussian filter (see `param` parameter for custom\
                      sigma value)
        * 'mean': apply arithmetic mean filter
        * 'median': apply median rank filter

        By default the 'gaussian' method is used.
    offset : float, optional
        Constant subtracted from weighted mean of neighborhood to calculate
        the local threshold value. Default offset is 0.
    mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
        The mode parameter determines how the array borders are handled, where
        cval is the value when mode is equal to 'constant'.
        Default is 'reflect'.
    param : {int, function}, optional
        Either specify sigma for 'gaussian' method or function object for
        'generic' method. This functions takes the flat array of local
        neighbourhood as a single argument and returns the calculated
        threshold for the centre pixel.

    Returns
    -------
    threshold : (N, M) ndarray
        Thresholded binary image

    References
    ----------
    .. [1] http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold

    Examples
    --------
    >>> from skimage.data import camera
    >>> image = camera()[:50, :50]
    >>> binary_image1 = threshold_adaptive(image, 15, 'mean')
    >>> func = lambda arr: arr.mean()
    >>> binary_image2 = threshold_adaptive(image, 15, 'generic', param=func)
    """
    assert_nD(image, 2)
    thresh_image = np.zeros(image.shape, 'double')
    if method == 'generic':
        scipy.ndimage.generic_filter(image, param, block_size,
                                     output=thresh_image, mode=mode)
    elif method == 'gaussian':
        if param is None:
            # automatically determine sigma which covers > 99% of distribution
            sigma = (block_size - 1) / 6.0
        else:
            sigma = param
        scipy.ndimage.gaussian_filter(image, sigma, output=thresh_image,
                                      mode=mode)
    elif method == 'mean':
        mask = 1. / block_size * np.ones((block_size,))
        # separation of filters to speedup convolution
        scipy.ndimage.convolve1d(image, mask, axis=0, output=thresh_image,
                                 mode=mode)
        scipy.ndimage.convolve1d(thresh_image, mask, axis=1,
                                 output=thresh_image, mode=mode)
    elif method == 'median':
        scipy.ndimage.median_filter(image, block_size, output=thresh_image,
                                    mode=mode)

    return image > (thresh_image - offset)
Exemplo n.º 44
0
def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01,
             overlap=.5, log_scale=False):
    """Finds blobs in the given grayscale image.

    Blobs are found using the Determinant of Hessian method [1]_. For each blob
    found, the method returns its coordinates and the standard deviation
    of the Gaussian Kernel used for the Hessian matrix whose determinant
    detected the blob. Determinant of Hessians is approximated using [2]_.

    Parameters
    ----------
    image : ndarray
        Input grayscale image.Blobs can either be light on dark or vice versa.
    min_sigma : float, optional
        The minimum standard deviation for Gaussian Kernel used to compute
        Hessian matrix. Keep this low to detect smaller blobs.
    max_sigma : float, optional
        The maximum standard deviation for Gaussian Kernel used to compute
        Hessian matrix. Keep this high to detect larger blobs.
    num_sigma : int, optional
        The number of intermediate values of standard deviations to consider
        between `min_sigma` and `max_sigma`.
    threshold : float, optional.
        The absolute lower bound for scale space maxima. Local maxima smaller
        than thresh are ignored. Reduce this to detect less prominent blobs.
    overlap : float, optional
        A value between 0 and 1. If the area of two blobs overlaps by a
        fraction greater than `threshold`, the smaller blob is eliminated.
    log_scale : bool, optional
        If set intermediate values of standard deviations are interpolated
        using a logarithmic scale to the base `10`. If not, linear
        interpolation is used.

    Returns
    -------
    A : (n, 3) ndarray
        A 2d array with each row representing 3 values, ``(y,x,sigma)``
        where ``(y,x)`` are coordinates of the blob and ``sigma`` is the
        standard deviation of the Gaussian kernel of the Hessian Matrix whose
        determinant detected the blob.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Blob_detection#The_determinant_of_the_Hessian

    .. [2] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
           "SURF: Speeded Up Robust Features"
           ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf

    Examples
    --------
    >>> from skimage import data, feature
    >>> img = data.coins()
    >>> feature.blob_doh(img)
    array([[121, 271,  30],
           [123,  44,  23],
           [123, 205,  20],
           [124, 336,  20],
           [126, 101,  20],
           [126, 153,  20],
           [156, 302,  30],
           [185, 348,  30],
           [192, 212,  23],
           [193, 275,  23],
           [195, 100,  23],
           [197,  44,  20],
           [197, 153,  20],
           [260, 173,  30],
           [262, 243,  23],
           [265, 113,  23],
           [270, 363,  30]])


    Notes
    -----
    The radius of each blob is approximately `sigma`.
    Computation of Determinant of Hessians is independent of the standard
    deviation. Therefore detecting larger blobs won't take more time. In
    methods line :py:meth:`blob_dog` and :py:meth:`blob_log` the computation
    of Gaussians for larger `sigma` takes more time. The downside is that
    this method can't be used for detecting blobs of radius less than `3px`
    due to the box filters used in the approximation of Hessian Determinant.
    """

    assert_nD(image, 2)

    image = img_as_float(image)
    image = integral_image(image)

    if log_scale:
        start, stop = log(min_sigma, 10), log(max_sigma, 10)
        sigma_list = np.logspace(start, stop, num_sigma)
    else:
        sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)

    hessian_images = [_hessian_matrix_det(image, s) for s in sigma_list]
    image_cube = np.dstack(hessian_images)

    local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
                                  footprint=np.ones((3, 3, 3)),
                                  threshold_rel=0.0,
                                  exclude_border=False)

    # Convert the last index to its corresponding scale value
    local_maxima[:, 2] = sigma_list[local_maxima[:, 2]]
    return _prune_blobs(local_maxima, overlap)
Exemplo n.º 45
0
def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2,
             overlap=.5, log_scale=False):
    """Finds blobs in the given grayscale image.

    Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
    For each blob found, the method returns its coordinates and the standard
    deviation of the Gaussian kernel that detected the blob.

    Parameters
    ----------
    image : ndarray
        Input grayscale image, blobs are assumed to be light on dark
        background (white on black).
    min_sigma : float, optional
        The minimum standard deviation for Gaussian Kernel. Keep this low to
        detect smaller blobs.
    max_sigma : float, optional
        The maximum standard deviation for Gaussian Kernel. Keep this high to
        detect larger blobs.
    num_sigma : int, optional
        The number of intermediate values of standard deviations to consider
        between `min_sigma` and `max_sigma`.
    threshold : float, optional.
        The absolute lower bound for scale space maxima. Local maxima smaller
        than thresh are ignored. Reduce this to detect blobs with less
        intensities.
    overlap : float, optional
        A value between 0 and 1. If the area of two blobs overlaps by a
        fraction greater than `threshold`, the smaller blob is eliminated.
    log_scale : bool, optional
        If set intermediate values of standard deviations are interpolated
        using a logarithmic scale to the base `10`. If not, linear
        interpolation is used.

    Returns
    -------
    A : (n, 3) ndarray
        A 2d array with each row representing 3 values, ``(y,x,sigma)``
        where ``(y,x)`` are coordinates of the blob and ``sigma`` is the
        standard deviation of the Gaussian kernel which detected the blob.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian

    Examples
    --------
    >>> from skimage import data, feature, exposure
    >>> img = data.coins()
    >>> img = exposure.equalize_hist(img)  # improves detection
    >>> feature.blob_log(img, threshold = .3)
    array([[113, 323,   1],
           [121, 272,  17],
           [124, 336,  11],
           [126,  46,  11],
           [126, 208,  11],
           [127, 102,  11],
           [128, 154,  11],
           [185, 344,  17],
           [194, 213,  17],
           [194, 276,  17],
           [197,  44,  11],
           [198, 103,  11],
           [198, 155,  11],
           [260, 174,  17],
           [263, 244,  17],
           [263, 302,  17],
           [266, 115,  11]])

    Notes
    -----
    The radius of each blob is approximately :math:`\sqrt{2}sigma`.
    """

    assert_nD(image, 2)

    image = img_as_float(image)

    if log_scale:
        start, stop = log(min_sigma, 10), log(max_sigma, 10)
        sigma_list = np.logspace(start, stop, num_sigma)
    else:
        sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)

    # computing gaussian laplace
    # s**2 provides scale invariance
    gl_images = [-gaussian_laplace(image, s) * s ** 2 for s in sigma_list]
    image_cube = np.dstack(gl_images)

    local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
                                  footprint=np.ones((3, 3, 3)),
                                  threshold_rel=0.0,
                                  exclude_border=False)

    # Convert the last index to its corresponding scale value
    local_maxima[:, 2] = sigma_list[local_maxima[:, 2]]
    return _prune_blobs(local_maxima, overlap)
Exemplo n.º 46
0
def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0,
             overlap=.5,):
    """Finds blobs in the given grayscale image.

    Blobs are found using the Difference of Gaussian (DoG) method [1]_.
    For each blob found, the method returns its coordinates and the standard
    deviation of the Gaussian kernel that detected the blob.

    Parameters
    ----------
    image : ndarray
        Input grayscale image, blobs are assumed to be light on dark
        background (white on black).
    min_sigma : float, optional
        The minimum standard deviation for Gaussian Kernel. Keep this low to
        detect smaller blobs.
    max_sigma : float, optional
        The maximum standard deviation for Gaussian Kernel. Keep this high to
        detect larger blobs.
    sigma_ratio : float, optional
        The ratio between the standard deviation of Gaussian Kernels used for
        computing the Difference of Gaussians
    threshold : float, optional.
        The absolute lower bound for scale space maxima. Local maxima smaller
        than thresh are ignored. Reduce this to detect blobs with less
        intensities.
    overlap : float, optional
        A value between 0 and 1. If the area of two blobs overlaps by a
        fraction greater than `threshold`, the smaller blob is eliminated.

    Returns
    -------
    A : (n, 3) ndarray
        A 2d array with each row representing 3 values, ``(y,x,sigma)``
        where ``(y,x)`` are coordinates of the blob and ``sigma`` is the
        standard deviation of the Gaussian kernel which detected the blob.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach

    Examples
    --------
    >>> from skimage import data, feature
    >>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40)
    array([[ 45, 336,  16],
           [ 52, 155,  16],
           [ 52, 216,  16],
           [ 54,  42,  16],
           [ 54, 276,  10],
           [ 58, 100,  10],
           [120, 272,  16],
           [124, 337,  10],
           [125,  45,  16],
           [125, 208,  10],
           [127, 102,  10],
           [128, 154,  10],
           [185, 347,  16],
           [193, 213,  16],
           [194, 277,  16],
           [195, 102,  16],
           [196,  43,  10],
           [198, 155,  10],
           [260,  46,  16],
           [261, 173,  16],
           [263, 245,  16],
           [263, 302,  16],
           [267, 115,  10],
           [267, 359,  16]])

    Notes
    -----
    The radius of each blob is approximately :math:`\sqrt{2}sigma`.
    """
    assert_nD(image, 2)

    image = img_as_float(image)

    # k such that min_sigma*(sigma_ratio**k) > max_sigma
    k = int(log(float(max_sigma) / min_sigma, sigma_ratio)) + 1

    # a geometric progression of standard deviations for gaussian kernels
    sigma_list = np.array([min_sigma * (sigma_ratio ** i)
                          for i in range(k + 1)])

    gaussian_images = [gaussian_filter(image, s) for s in sigma_list]

    # computing difference between two successive Gaussian blurred images
    # multiplying with standard deviation provides scale invariance
    dog_images = [(gaussian_images[i] - gaussian_images[i + 1])
                  * sigma_list[i] for i in range(k)]
    image_cube = np.dstack(dog_images)

    # local_maxima = get_local_maxima(image_cube, threshold)
    local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
                                  footprint=np.ones((3, 3, 3)),
                                  threshold_rel=0.0,
                                  exclude_border=False)

    # Convert the last index to its corresponding scale value
    local_maxima[:, 2] = sigma_list[local_maxima[:, 2]]
    return _prune_blobs(local_maxima, overlap)
Exemplo n.º 47
0
def greycomatrix(image,
                 distances,
                 angles,
                 levels=256,
                 symmetric=False,
                 normed=False):
    """Calculate the grey-level co-occurrence matrix.

    A grey level co-occurrence matrix is a histogram of co-occurring
    greyscale values at a given offset over an image.

    Parameters
    ----------
    image : array_like of uint8
        Integer typed input image. The image will be cast to uint8, so
        the maximum value must be less than 256.
    distances : array_like
        List of pixel pair distance offsets.
    angles : array_like
        List of pixel pair angles in radians.
    levels : int, optional
        The input image should contain integers in [0, levels-1],
        where levels indicate the number of grey-levels counted
        (typically 256 for an 8-bit image). The maximum value is
        256.
    symmetric : bool, optional
        If True, the output matrix `P[:, :, d, theta]` is symmetric. This
        is accomplished by ignoring the order of value pairs, so both
        (i, j) and (j, i) are accumulated when (i, j) is encountered
        for a given offset. The default is False.
    normed : bool, optional
        If True, normalize each matrix `P[:, :, d, theta]` by dividing
        by the total number of accumulated co-occurrences for the given
        offset. The elements of the resulting matrix sum to 1. The
        default is False.

    Returns
    -------
    P : 4-D ndarray
        The grey-level co-occurrence histogram. The value
        `P[i,j,d,theta]` is the number of times that grey-level `j`
        occurs at a distance `d` and at an angle `theta` from
        grey-level `i`. If `normed` is `False`, the output is of
        type uint32, otherwise it is float64.

    References
    ----------
    .. [1] The GLCM Tutorial Home Page,
           http://www.fp.ucalgary.ca/mhallbey/tutorial.htm
    .. [2] Pattern Recognition Engineering, Morton Nadler & Eric P.
           Smith
    .. [3] Wikipedia, http://en.wikipedia.org/wiki/Co-occurrence_matrix


    Examples
    --------
    Compute 2 GLCMs: One for a 1-pixel offset to the right, and one
    for a 1-pixel offset upwards.

    >>> image = np.array([[0, 0, 1, 1],
    ...                   [0, 0, 1, 1],
    ...                   [0, 2, 2, 2],
    ...                   [2, 2, 3, 3]], dtype=np.uint8)
    >>> result = greycomatrix(image, [1], [0, np.pi/4, np.pi/2, 3*np.pi/4], levels=4)
    >>> result[:, :, 0, 0]
    array([[2, 2, 1, 0],
           [0, 2, 0, 0],
           [0, 0, 3, 1],
           [0, 0, 0, 1]], dtype=uint32)
    >>> result[:, :, 0, 1]
    array([[1, 1, 3, 0],
           [0, 1, 1, 0],
           [0, 0, 0, 2],
           [0, 0, 0, 0]], dtype=uint32)
    >>> result[:, :, 0, 2]
    array([[3, 0, 2, 0],
           [0, 2, 2, 0],
           [0, 0, 1, 2],
           [0, 0, 0, 0]], dtype=uint32)
    >>> result[:, :, 0, 3]
    array([[2, 0, 0, 0],
           [1, 1, 2, 0],
           [0, 0, 2, 1],
           [0, 0, 0, 0]], dtype=uint32)

    """
    assert_nD(image, 2)
    assert_nD(distances, 1, 'distances')
    assert_nD(angles, 1, 'angles')

    assert levels <= 256
    image = np.ascontiguousarray(image)
    assert image.min() >= 0
    assert image.max() < levels
    image = image.astype(np.uint8)
    distances = np.ascontiguousarray(distances, dtype=np.float64)
    angles = np.ascontiguousarray(angles, dtype=np.float64)

    P = np.zeros((levels, levels, len(distances), len(angles)),
                 dtype=np.uint32,
                 order='C')

    # count co-occurences
    _glcm_loop(image, distances, angles, levels, P)

    # make each GLMC symmetric
    if symmetric:
        Pt = np.transpose(P, (1, 0, 2, 3))
        P = P + Pt

    # normalize each GLMC
    if normed:
        P = P.astype(np.float64)
        glcm_sums = np.apply_over_axes(np.sum, P, axes=(0, 1))
        glcm_sums[glcm_sums == 0] = 1
        P /= glcm_sums

    return P
Exemplo n.º 48
0
def gabor_filter(image, frequency, theta=0, bandwidth=1, sigma_x=None,
                 sigma_y=None, n_stds=3, offset=0, mode='reflect', cval=0):
    """Return real and imaginary responses to Gabor filter.

    The real and imaginary parts of the Gabor filter kernel are applied to the
    image and the response is returned as a pair of arrays.

    Gabor filter is a linear filter with a Gaussian kernel which is modulated
    by a sinusoidal plane wave. Frequency and orientation representations of
    the Gabor filter are similar to those of the human visual system.
    Gabor filter banks are commonly used in computer vision and image
    processing. They are especially suitable for edge detection and texture
    classification.

    Parameters
    ----------
    image : 2-D array
        Input image.
    frequency : float
        Spatial frequency of the harmonic function. Specified in pixels.
    theta : float, optional
        Orientation in radians. If 0, the harmonic is in the x-direction.
    bandwidth : float, optional
        The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
        and `sigma_y` will decrease with increasing frequency. This value is
        ignored if `sigma_x` and `sigma_y` are set by the user.
    sigma_x, sigma_y : float, optional
        Standard deviation in x- and y-directions. These directions apply to
        the kernel *before* rotation. If `theta = pi/2`, then the kernel is
        rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
    n_stds : scalar, optional
        The linear size of the kernel is n_stds (3 by default) standard
        deviations.
    offset : float, optional
        Phase offset of harmonic function in radians.
    mode : string, optional
        Mode used to convolve image with a kernel, passed to `ndimage.convolve`
    cval : scalar, optional
        Value to fill past edges of input if `mode` of convolution is
        'constant'. The parameter is passed to `ndimage.convolve`.

    Returns
    -------
    real, imag : arrays
        Filtered images using the real and imaginary parts of the Gabor filter
        kernel. Images are of the same dimensions as the input one.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Gabor_filter
    .. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf

    Examples
    --------
    >>> from skimage.filter import gabor_filter
    >>> from skimage import data, io
    >>> from matplotlib import pyplot as plt  # doctest: +SKIP

    >>> image = data.coins()
    >>> # detecting edges in a coin image
    >>> filt_real, filt_imag = gabor_filter(image, frequency=0.6)
    >>> plt.figure()            # doctest: +SKIP
    >>> io.imshow(filt_real)    # doctest: +SKIP
    >>> io.show()               # doctest: +SKIP

    >>> # less sensitivity to finer details with the lower frequency kernel
    >>> filt_real, filt_imag = gabor_filter(image, frequency=0.1)
    >>> plt.figure()            # doctest: +SKIP
    >>> io.imshow(filt_real)    # doctest: +SKIP
    >>> io.show()               # doctest: +SKIP
    """
    assert_nD(image, 2)
    g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, n_stds,
                     offset)

    filtered_real = ndimage.convolve(image, np.real(g), mode=mode, cval=cval)
    filtered_imag = ndimage.convolve(image, np.imag(g), mode=mode, cval=cval)

    return filtered_real, filtered_imag
Exemplo n.º 49
0
def test_assert_nD():
    z = np.random.random(200**2).reshape((200, 200))
    x = z[10:30, 30:10]
    with testing.raises(ValueError):
        assert_nD(x, 2)
Exemplo n.º 50
0
def greycomatrix(image, distances, angles, levels=256, symmetric=False,
                 normed=False):
    """Calculate the grey-level co-occurrence matrix.

    A grey level co-occurence matrix is a histogram of co-occuring
    greyscale values at a given offset over an image.

    Parameters
    ----------
    image : array_like of uint8
        Integer typed input image. The image will be cast to uint8, so
        the maximum value must be less than 256.
    distances : array_like
        List of pixel pair distance offsets.
    angles : array_like
        List of pixel pair angles in radians.
    levels : int, optional
        The input image should contain integers in [0, levels-1],
        where levels indicate the number of grey-levels counted
        (typically 256 for an 8-bit image). The maximum value is
        256.
    symmetric : bool, optional
        If True, the output matrix `P[:, :, d, theta]` is symmetric. This
        is accomplished by ignoring the order of value pairs, so both
        (i, j) and (j, i) are accumulated when (i, j) is encountered
        for a given offset. The default is False.
    normed : bool, optional
        If True, normalize each matrix `P[:, :, d, theta]` by dividing
        by the total number of accumulated co-occurrences for the given
        offset. The elements of the resulting matrix sum to 1. The
        default is False.

    Returns
    -------
    P : 4-D ndarray
        The grey-level co-occurrence histogram. The value
        `P[i,j,d,theta]` is the number of times that grey-level `j`
        occurs at a distance `d` and at an angle `theta` from
        grey-level `i`. If `normed` is `False`, the output is of
        type uint32, otherwise it is float64.

    References
    ----------
    .. [1] The GLCM Tutorial Home Page,
           http://www.fp.ucalgary.ca/mhallbey/tutorial.htm
    .. [2] Pattern Recognition Engineering, Morton Nadler & Eric P.
           Smith
    .. [3] Wikipedia, http://en.wikipedia.org/wiki/Co-occurrence_matrix


    Examples
    --------
    Compute 2 GLCMs: One for a 1-pixel offset to the right, and one
    for a 1-pixel offset upwards.

    >>> image = np.array([[0, 0, 1, 1],
    ...                   [0, 0, 1, 1],
    ...                   [0, 2, 2, 2],
    ...                   [2, 2, 3, 3]], dtype=np.uint8)
    >>> result = greycomatrix(image, [1], [0, np.pi/4, np.pi/2, 3*np.pi/4], levels=4)
    >>> result[:, :, 0, 0]
    array([[2, 2, 1, 0],
           [0, 2, 0, 0],
           [0, 0, 3, 1],
           [0, 0, 0, 1]], dtype=uint32)
    >>> result[:, :, 0, 1]
    array([[1, 1, 3, 0],
           [0, 1, 1, 0],
           [0, 0, 0, 2],
           [0, 0, 0, 0]], dtype=uint32)
    >>> result[:, :, 0, 2]
    array([[3, 0, 2, 0],
           [0, 2, 2, 0],
           [0, 0, 1, 2],
           [0, 0, 0, 0]], dtype=uint32)
    >>> result[:, :, 0, 3]
    array([[2, 0, 0, 0],
           [1, 1, 2, 0],
           [0, 0, 2, 1],
           [0, 0, 0, 0]], dtype=uint32)

    """
    assert_nD(image, 2)
    assert_nD(distances, 1, 'distances')
    assert_nD(angles, 1, 'angles')

    assert levels <= 256
    image = np.ascontiguousarray(image)
    assert image.min() >= 0
    assert image.max() < levels
    image = image.astype(np.uint8)
    distances = np.ascontiguousarray(distances, dtype=np.float64)
    angles = np.ascontiguousarray(angles, dtype=np.float64)

    P = np.zeros((levels, levels, len(distances), len(angles)),
                 dtype=np.uint32, order='C')

    # count co-occurences
    _glcm_loop(image, distances, angles, levels, P)

    # make each GLMC symmetric
    if symmetric:
        Pt = np.transpose(P, (1, 0, 2, 3))
        P = P + Pt

    # normalize each GLMC
    if normed:
        P = P.astype(np.float64)
        glcm_sums = np.apply_over_axes(np.sum, P, axes=(0, 1))
        glcm_sums[glcm_sums == 0] = 1
        P /= glcm_sums

    return P
Exemplo n.º 51
0
    def detect(self, image):
        """Detect CENSURE keypoints along with the corresponding scale.

        Parameters
        ----------
        image : 2D ndarray
            Input image.

        """

        # (1) First we generate the required scales on the input grayscale
        # image using a bi-level filter and stack them up in `filter_response`.

        # (2) We then perform Non-Maximal suppression in 3 x 3 x 3 window on
        # the filter_response to suppress points that are neither minima or
        # maxima in 3 x 3 x 3 neighbourhood. We obtain a boolean ndarray
        # `feature_mask` containing all the minimas and maximas in
        # `filter_response` as True.
        # (3) Then we suppress all the points in the `feature_mask` for which
        # the corresponding point in the image at a particular scale has the
        # ratio of principal curvatures greater than `line_threshold`.
        # (4) Finally, we remove the border keypoints and return the keypoints
        # along with its corresponding scale.

        assert_nD(image, 2)

        num_scales = self.max_scale - self.min_scale

        image = np.ascontiguousarray(_prepare_grayscale_input_2D(image))

        # Generating all the scales
        filter_response = _filter_image(image, self.min_scale, self.max_scale,
                                        self.mode)

        # Suppressing points that are neither minima or maxima in their
        # 3 x 3 x 3 neighborhood to zero
        minimas = minimum_filter(filter_response, (3, 3, 3)) == filter_response
        maximas = maximum_filter(filter_response, (3, 3, 3)) == filter_response

        feature_mask = minimas | maximas
        feature_mask[filter_response < self.non_max_threshold] = False

        for i in range(1, num_scales):
            # sigma = (window_size - 1) / 6.0, so the window covers > 99% of
            #                                  the kernel's distribution
            # window_size = 7 + 2 * (min_scale - 1 + i)
            # Hence sigma = 1 + (min_scale - 1 + i)/ 3.0
            _suppress_lines(feature_mask[:, :, i], image,
                            (1 + (self.min_scale + i - 1) / 3.0),
                            self.line_threshold)

        rows, cols, scales = np.nonzero(feature_mask[..., 1:num_scales])
        keypoints = np.column_stack([rows, cols])
        scales = scales + self.min_scale + 1

        if self.mode == 'dob':
            self.keypoints = keypoints
            self.scales = scales
            return

        cumulative_mask = np.zeros(keypoints.shape[0], dtype=np.bool)

        if self.mode == 'octagon':
            for i in range(self.min_scale + 1, self.max_scale):
                c = (OCTAGON_OUTER_SHAPE[i - 1][0] - 1) // 2 \
                    + OCTAGON_OUTER_SHAPE[i - 1][1]
                cumulative_mask |= (
                    _mask_border_keypoints(image.shape, keypoints, c)
                    & (scales == i))
        elif self.mode == 'star':
            for i in range(self.min_scale + 1, self.max_scale):
                c = STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] \
                    + STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] // 2
                cumulative_mask |= (
                    _mask_border_keypoints(image.shape, keypoints, c)
                    & (scales == i))

        self.keypoints = keypoints[cumulative_mask]
        self.scales = scales[cumulative_mask]
Exemplo n.º 52
0
def greycoprops(P, prop='contrast'):
    """Calculate texture properties of a GLCM.

    Compute a feature of a grey level co-occurrence matrix to serve as
    a compact summary of the matrix. The properties are computed as
    follows:

    - 'contrast': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}(i-j)^2`
    - 'dissimilarity': :math:`\\sum_{i,j=0}^{levels-1}P_{i,j}|i-j|`
    - 'homogeneity': :math:`\\sum_{i,j=0}^{levels-1}\\frac{P_{i,j}}{1+(i-j)^2}`
    - 'ASM': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}^2`
    - 'energy': :math:`\\sqrt{ASM}`
    - 'correlation':
        .. math:: \\sum_{i,j=0}^{levels-1} P_{i,j}\\left[\\frac{(i-\\mu_i) \\
                  (j-\\mu_j)}{\\sqrt{(\\sigma_i^2)(\\sigma_j^2)}}\\right]


    Parameters
    ----------
    P : ndarray
        Input array. `P` is the grey-level co-occurrence histogram
        for which to compute the specified property. The value
        `P[i,j,d,theta]` is the number of times that grey-level j
        occurs at a distance d and at an angle theta from
        grey-level i.
    prop : {'contrast', 'dissimilarity', 'homogeneity', 'energy', \
            'correlation', 'ASM'}, optional
        The property of the GLCM to compute. The default is 'contrast'.

    Returns
    -------
    results : 2-D ndarray
        2-dimensional array. `results[d, a]` is the property 'prop' for
        the d'th distance and the a'th angle.

    References
    ----------
    .. [1] The GLCM Tutorial Home Page,
           http://www.fp.ucalgary.ca/mhallbey/tutorial.htm

    Examples
    --------
    Compute the contrast for GLCMs with distances [1, 2] and angles
    [0 degrees, 90 degrees]

    >>> image = np.array([[0, 0, 1, 1],
    ...                   [0, 0, 1, 1],
    ...                   [0, 2, 2, 2],
    ...                   [2, 2, 3, 3]], dtype=np.uint8)
    >>> g = greycomatrix(image, [1, 2], [0, np.pi/2], levels=4,
    ...                  normed=True, symmetric=True)
    >>> contrast = greycoprops(g, 'contrast')
    >>> contrast
    array([[ 0.58333333,  1.        ],
           [ 1.25      ,  2.75      ]])

    """
    assert_nD(P, 4, 'P')

    (num_level, num_level2, num_dist, num_angle) = P.shape
    assert num_level == num_level2
    assert num_dist > 0
    assert num_angle > 0

    # create weights for specified property
    I, J = np.ogrid[0:num_level, 0:num_level]
    if prop == 'contrast':
        weights = (I - J) ** 2
    elif prop == 'dissimilarity':
        weights = np.abs(I - J)
    elif prop == 'homogeneity':
        weights = 1. / (1. + (I - J) ** 2)
    elif prop in ['ASM', 'energy', 'correlation']:
        pass
    else:
        raise ValueError('%s is an invalid property' % (prop))

    # compute property for each GLCM
    if prop == 'energy':
        asm = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]
        results = np.sqrt(asm)
    elif prop == 'ASM':
        results = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]
    elif prop == 'correlation':
        results = np.zeros((num_dist, num_angle), dtype=np.float64)
        I = np.array(range(num_level)).reshape((num_level, 1, 1, 1))
        J = np.array(range(num_level)).reshape((1, num_level, 1, 1))
        diff_i = I - np.apply_over_axes(np.sum, (I * P), axes=(0, 1))[0, 0]
        diff_j = J - np.apply_over_axes(np.sum, (J * P), axes=(0, 1))[0, 0]

        std_i = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_i) ** 2),
                                           axes=(0, 1))[0, 0])
        std_j = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_j) ** 2),
                                           axes=(0, 1))[0, 0])
        cov = np.apply_over_axes(np.sum, (P * (diff_i * diff_j)),
                                 axes=(0, 1))[0, 0]

        # handle the special case of standard deviations near zero
        mask_0 = std_i < 1e-15
        mask_0[std_j < 1e-15] = True
        results[mask_0] = 1

        # handle the standard case
        mask_1 = mask_0 == False
        results[mask_1] = cov[mask_1] / (std_i[mask_1] * std_j[mask_1])
    elif prop in ['contrast', 'dissimilarity', 'homogeneity']:
        weights = weights.reshape((num_level, num_level, 1, 1))
        results = np.apply_over_axes(np.sum, (P * weights), axes=(0, 1))[0, 0]

    return results