Ejemplo n.º 1
0
def test_mask_border_keypoints():
    keypoints = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]])
    assert_equal(_mask_border_keypoints((10, 10), keypoints, 0),
                 [1, 1, 1, 1, 1])
    assert_equal(_mask_border_keypoints((10, 10), keypoints, 2),
                 [0, 0, 1, 1, 1])
    assert_equal(_mask_border_keypoints((4, 4), keypoints, 2), [0, 0, 1, 0, 0])
    assert_equal(_mask_border_keypoints((10, 10), keypoints, 5),
                 [0, 0, 0, 0, 0])
    assert_equal(_mask_border_keypoints((10, 10), keypoints, 4),
                 [0, 0, 0, 0, 1])
Ejemplo n.º 2
0
def test_mask_border_keypoints():
    keypoints = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]])
    assert_equal(_mask_border_keypoints((10, 10), keypoints, 0),
                 [1, 1, 1, 1, 1])
    assert_equal(_mask_border_keypoints((10, 10), keypoints, 2),
                 [0, 0, 1, 1, 1])
    assert_equal(_mask_border_keypoints((4, 4), keypoints, 2),
                 [0, 0, 1, 0, 0])
    assert_equal(_mask_border_keypoints((10, 10), keypoints, 5),
                 [0, 0, 0, 0, 0])
    assert_equal(_mask_border_keypoints((10, 10), keypoints, 4),
                 [0, 0, 0, 0, 1])
Ejemplo n.º 3
0
    def extract(self, image, keypoints):
        # patch size to build descriptor from
        patch_size = self.patch_size
        desc_size = self.descriptor_size
        # random = np.random.RandomState()
        # random.seed(self.sample_seed)
        ## why 8?
        # samples = np.array((patch_size / 5.0) * random.randn(desc_size * 8)).astype(np.int32)
        # hps2 = - (patch_size-2) // 2
        # samples = samples[(samples < hps) & (samples > hps2)]
        # d2 = desc_size*2
        # pos0 = samples[:d2].reshape(desc_size, 2)
        # pos1 = samples[d2:d2*2].reshape(desc_size, 2)

        # pos0 = np.ascontiguousarray(pos0)
        # pos1 = np.ascontiguousarray(pos1)
        hps = patch_size // 2
        self.mask = _mask_border_keypoints(image.shape, keypoints, hps)

        self.keypoints = np.array(keypoints[self.mask, :], dtype=np.intp, order="C", copy=False)

        self.descriptors = []
        for nn in range(self.keypoints.shape[0]):
            kx, ky = self.keypoints[nn]
            patch = image[kx - hps : kx + hps, ky - hps : ky + hps]
            self.descriptors.append(zernike_moments(patch, 8.0, 12))
        self.descriptors = np.array(self.descriptors)
Ejemplo n.º 4
0
    def _extract_octave(self, octave_image, keypoints, orientations):
        mask = _mask_border_keypoints(octave_image.shape, keypoints,
                                      distance=20)
        keypoints = np.array(keypoints[mask], dtype=np.intp, order='C',
                             copy=False)
        orientations = np.array(orientations[mask], dtype=np.double, order='C',
                                copy=False)

        descriptors = _orb_loop(octave_image, keypoints, orientations)

        return descriptors, mask
Ejemplo n.º 5
0
    def _extract_octave(self, octave_image, keypoints, orientations):
        mask = _mask_border_keypoints(octave_image.shape,
                                      keypoints,
                                      distance=20)
        keypoints = np.array(keypoints[mask],
                             dtype=np.intp,
                             order='C',
                             copy=False)
        orientations = np.array(orientations[mask],
                                dtype=np.double,
                                order='C',
                                copy=False)

        descriptors = _orb_loop(octave_image, keypoints, orientations)

        return descriptors, mask
Ejemplo n.º 6
0
    def _detect_octave(self, octave_image):
        # Extract keypoints for current octave
        fast_response = corner_fast(octave_image, self.fast_n,
                                    self.fast_threshold)
        keypoints = corner_peaks(fast_response, min_distance=1)

        if len(keypoints) == 0:
            return (np.zeros((0, 2), dtype=np.double),
                    np.zeros((0, ), dtype=np.double),
                    np.zeros((0, ), dtype=np.double))

        mask = _mask_border_keypoints(octave_image.shape, keypoints,
                                      distance=16)
        keypoints = keypoints[mask]

        orientations = corner_orientations(octave_image, keypoints,
                                           OFAST_MASK)

        harris_response = corner_harris(octave_image, method='k',
                                        k=self.harris_k)
        responses = harris_response[keypoints[:, 0], keypoints[:, 1]]

        return keypoints, orientations, responses
Ejemplo n.º 7
0
    def _detect_octave(self, octave_image):
        # Extract keypoints for current octave
        fast_response = corner_fast(octave_image, self.fast_n,
                                    self.fast_threshold)
        keypoints = corner_peaks(fast_response, min_distance=1)

        if len(keypoints) == 0:
            return (np.zeros(
                (0, 2), dtype=np.double), np.zeros(
                    (0, ), dtype=np.double), np.zeros((0, ), dtype=np.double))

        mask = _mask_border_keypoints(octave_image.shape,
                                      keypoints,
                                      distance=16)
        keypoints = keypoints[mask]

        orientations = corner_orientations(octave_image, keypoints, OFAST_MASK)

        harris_response = corner_harris(octave_image,
                                        method='k',
                                        k=self.harris_k)
        responses = harris_response[keypoints[:, 0], keypoints[:, 1]]

        return keypoints, orientations, responses
Ejemplo n.º 8
0
    def detect(self, image):
        """Detect CENSURE keypoints along with the corresponding scale.

        Parameters
        ----------
        image : 2D ndarray
            Input image.

        """

        # (1) First we generate the required scales on the input grayscale
        # image using a bi-level filter and stack them up in `filter_response`.

        # (2) We then perform Non-Maximal suppression in 3 x 3 x 3 window on
        # the filter_response to suppress points that are neither minima or
        # maxima in 3 x 3 x 3 neighbourhood. We obtain a boolean ndarray
        # `feature_mask` containing all the minimas and maximas in
        # `filter_response` as True.
        # (3) Then we suppress all the points in the `feature_mask` for which
        # the corresponding point in the image at a particular scale has the
        # ratio of principal curvatures greater than `line_threshold`.
        # (4) Finally, we remove the border keypoints and return the keypoints
        # along with its corresponding scale.

        num_scales = self.max_scale - self.min_scale

        image = np.ascontiguousarray(_prepare_grayscale_input_2D(image))

        # Generating all the scales
        filter_response = _filter_image(image, self.min_scale, self.max_scale,
                                        self.mode)

        # Suppressing points that are neither minima or maxima in their
        # 3 x 3 x 3 neighborhood to zero
        minimas = minimum_filter(filter_response, (3, 3, 3)) == filter_response
        maximas = maximum_filter(filter_response, (3, 3, 3)) == filter_response

        feature_mask = minimas | maximas
        feature_mask[filter_response < self.non_max_threshold] = False

        for i in range(1, num_scales):
            # sigma = (window_size - 1) / 6.0, so the window covers > 99% of
            #                                  the kernel's distribution
            # window_size = 7 + 2 * (min_scale - 1 + i)
            # Hence sigma = 1 + (min_scale - 1 + i)/ 3.0
            _suppress_lines(feature_mask[:, :, i], image,
                            (1 + (self.min_scale + i - 1) / 3.0),
                            self.line_threshold)

        rows, cols, scales = np.nonzero(feature_mask[..., 1:num_scales])
        keypoints = np.column_stack([rows, cols])
        scales = scales + self.min_scale + 1

        if self.mode == 'dob':
            self.keypoints = keypoints
            self.scales = scales
            return

        cumulative_mask = np.zeros(keypoints.shape[0], dtype=np.bool)

        if self.mode == 'octagon':
            for i in range(self.min_scale + 1, self.max_scale):
                c = (OCTAGON_OUTER_SHAPE[i - 1][0] - 1) // 2 \
                    + OCTAGON_OUTER_SHAPE[i - 1][1]
                cumulative_mask |= (
                    _mask_border_keypoints(image.shape, keypoints, c)
                    & (scales == i))
        elif self.mode == 'star':
            for i in range(self.min_scale + 1, self.max_scale):
                c = STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] \
                    + STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] // 2
                cumulative_mask |= (
                    _mask_border_keypoints(image.shape, keypoints, c)
                    & (scales == i))

        self.keypoints = keypoints[cumulative_mask]
        self.scales = scales[cumulative_mask]
Ejemplo n.º 9
0
    def detect(self, image):
        """Detect CENSURE keypoints along with the corresponding scale.

        Parameters
        ----------
        image : 2D ndarray
            Input image.

        """

        # (1) First we generate the required scales on the input grayscale
        # image using a bi-level filter and stack them up in `filter_response`.

        # (2) We then perform Non-Maximal suppression in 3 x 3 x 3 window on
        # the filter_response to suppress points that are neither minima or
        # maxima in 3 x 3 x 3 neighbourhood. We obtain a boolean ndarray
        # `feature_mask` containing all the minimas and maximas in
        # `filter_response` as True.
        # (3) Then we suppress all the points in the `feature_mask` for which
        # the corresponding point in the image at a particular scale has the
        # ratio of principal curvatures greater than `line_threshold`.
        # (4) Finally, we remove the border keypoints and return the keypoints
        # along with its corresponding scale.

        assert_nD(image, 2)

        num_scales = self.max_scale - self.min_scale

        image = np.ascontiguousarray(_prepare_grayscale_input_2D(image))

        # Generating all the scales
        filter_response = _filter_image(image, self.min_scale, self.max_scale,
                                        self.mode)

        # Suppressing points that are neither minima or maxima in their
        # 3 x 3 x 3 neighborhood to zero
        minimas = minimum_filter(filter_response, (3, 3, 3)) == filter_response
        maximas = maximum_filter(filter_response, (3, 3, 3)) == filter_response

        feature_mask = minimas | maximas
        feature_mask[filter_response < self.non_max_threshold] = False

        for i in range(1, num_scales):
            # sigma = (window_size - 1) / 6.0, so the window covers > 99% of
            #                                  the kernel's distribution
            # window_size = 7 + 2 * (min_scale - 1 + i)
            # Hence sigma = 1 + (min_scale - 1 + i)/ 3.0
            _suppress_lines(feature_mask[:, :, i], image,
                            (1 + (self.min_scale + i - 1) / 3.0),
                            self.line_threshold)

        rows, cols, scales = np.nonzero(feature_mask[..., 1:num_scales])
        keypoints = np.column_stack([rows, cols])
        scales = scales + self.min_scale + 1

        if self.mode == 'dob':
            self.keypoints = keypoints
            self.scales = scales
            return

        cumulative_mask = np.zeros(keypoints.shape[0], dtype=np.bool)

        if self.mode == 'octagon':
            for i in range(self.min_scale + 1, self.max_scale):
                c = (OCTAGON_OUTER_SHAPE[i - 1][0] - 1) // 2 \
                    + OCTAGON_OUTER_SHAPE[i - 1][1]
                cumulative_mask |= (
                    _mask_border_keypoints(image.shape, keypoints, c)
                    & (scales == i))
        elif self.mode == 'star':
            for i in range(self.min_scale + 1, self.max_scale):
                c = STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] \
                    + STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] // 2
                cumulative_mask |= (
                    _mask_border_keypoints(image.shape, keypoints, c)
                    & (scales == i))

        self.keypoints = keypoints[cumulative_mask]
        self.scales = scales[cumulative_mask]
Ejemplo n.º 10
0
def keypoints_censure(image, min_scale=1, max_scale=7, mode='DoB',
                      non_max_threshold=0.15, line_threshold=10):
    """**Experimental function**.

    Extracts CenSurE keypoints along with the corresponding scale using
    either Difference of Boxes, Octagon or STAR bi-level filter.

    Parameters
    ----------
    image : 2D ndarray
        Input image.
    min_scale : int
        Minimum scale to extract keypoints from.
    max_scale : int
        Maximum scale to extract keypoints from. The keypoints will be
        extracted from all the scales except the first and the last i.e.
        from the scales in the range [min_scale + 1, max_scale - 1].
    mode : {'DoB', 'Octagon', 'STAR'}
        Type of bi-level filter used to get the scales of the input image.
        Possible values are 'DoB', 'Octagon' and 'STAR'. The three modes
        represent the shape of the bi-level filters i.e. box(square), octagon
        and star respectively. For instance, a bi-level octagon filter consists
        of a smaller inner octagon and a larger outer octagon with the filter
        weights being uniformly negative in both the inner octagon while
        uniformly positive in the difference region. Use STAR and Octagon for
        better features and DoB for better performance.
    non_max_threshold : float
        Threshold value used to suppress maximas and minimas with a weak
        magnitude response obtained after Non-Maximal Suppression.
    line_threshold : float
        Threshold for rejecting interest points which have ratio of principal
        curvatures greater than this value.

    Returns
    -------
    keypoints : (N, 2) array
        Location of the extracted keypoints in the ``(row, col)`` format.
    scales : (N, 1) array
        The corresponding scale of the N extracted keypoints.

    References
    ----------
    .. [1] Motilal Agrawal, Kurt Konolige and Morten Rufus Blas
           "CenSurE: Center Surround Extremas for Realtime Feature
           Detection and Matching",
           http://link.springer.com/content/pdf/10.1007%2F978-3-540-88693-8_8.pdf

    .. [2] Adam Schmidt, Marek Kraft, Michal Fularz and Zuzanna Domagala
           "Comparative Assessment of Point Feature Detectors and
           Descriptors in the Context of Robot Navigation"
           http://www.jamris.org/01_2013/saveas.php?QUEST=JAMRIS_No01_2013_P_11-20.pdf

    """

    # (1) First we generate the required scales on the input grayscale image
    # using a bi-level filter and stack them up in `filter_response`.
    # (2) We then perform Non-Maximal suppression in 3 x 3 x 3 window on the
    # filter_response to suppress points that are neither minima or maxima in
    # 3 x 3 x 3 neighbourhood. We obtain a boolean ndarray `feature_mask`
    # containing all the minimas and maximas in `filter_response` as True.
    # (3) Then we suppress all the points in the `feature_mask` for which the
    # corresponding point in the image at a particular scale has the ratio of
    # principal curvatures greater than `line_threshold`.
    # (4) Finally, we remove the border keypoints and return the keypoints
    # along with its corresponding scale.

    image = np.squeeze(image)
    if image.ndim != 2:
        raise ValueError("Only 2-D gray-scale images supported.")

    mode = mode.lower()
    if mode not in ('dob', 'octagon', 'star'):
        raise ValueError('Mode must be one of "DoB", "Octagon", "STAR".')

    if min_scale < 1 or max_scale < 1 or max_scale - min_scale < 2:
        raise ValueError('The scales must be >= 1 and the number of scales '
                         'should be >= 3.')

    image = img_as_float(image)
    image = np.ascontiguousarray(image)

    # Generating all the scales
    filter_response = _filter_image(image, min_scale, max_scale, mode)

    # Suppressing points that are neither minima or maxima in their 3 x 3 x 3
    # neighbourhood to zero
    minimas = minimum_filter(filter_response, (3, 3, 3)) == filter_response
    maximas = maximum_filter(filter_response, (3, 3, 3)) == filter_response

    feature_mask = minimas | maximas
    feature_mask[filter_response < non_max_threshold] = False

    for i in range(1, max_scale - min_scale):
        # sigma = (window_size - 1) / 6.0, so the window covers > 99% of the
        #                                  kernel's distribution
        # window_size = 7 + 2 * (min_scale - 1 + i)
        # Hence sigma = 1 + (min_scale - 1 + i)/ 3.0
        _suppress_lines(feature_mask[:, :, i], image,
                        (1 + (min_scale + i - 1) / 3.0), line_threshold)

    rows, cols, scales = np.nonzero(feature_mask[..., 1:max_scale - min_scale])
    keypoints = np.column_stack([rows, cols])
    scales = scales + min_scale + 1

    if mode == 'dob':
        return keypoints, scales

    cumulative_mask = np.zeros(keypoints.shape[0], dtype=np.bool)

    if mode == 'octagon':
        for i in range(min_scale + 1, max_scale):
            c = (OCTAGON_OUTER_SHAPE[i - 1][0] - 1) // 2 \
                + OCTAGON_OUTER_SHAPE[i - 1][1]
            cumulative_mask |= _mask_border_keypoints(image, keypoints, c) \
                               & (scales == i)
    elif mode == 'star':
        for i in range(min_scale + 1, max_scale):
            c = STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] \
                + STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] // 2
            cumulative_mask |= _mask_border_keypoints(image, keypoints, c) \
                               & (scales == i)

    return keypoints[cumulative_mask], scales[cumulative_mask]
Ejemplo n.º 11
0
def keypoints_censure(image,
                      min_scale=1,
                      max_scale=7,
                      mode='DoB',
                      non_max_threshold=0.15,
                      line_threshold=10):
    """**Experimental function**.

    Extracts CenSurE keypoints along with the corresponding scale using
    either Difference of Boxes, Octagon or STAR bi-level filter.

    Parameters
    ----------
    image : 2D ndarray
        Input image.
    min_scale : int
        Minimum scale to extract keypoints from.
    max_scale : int
        Maximum scale to extract keypoints from. The keypoints will be
        extracted from all the scales except the first and the last i.e.
        from the scales in the range [min_scale + 1, max_scale - 1].
    mode : {'DoB', 'Octagon', 'STAR'}
        Type of bi-level filter used to get the scales of the input image.
        Possible values are 'DoB', 'Octagon' and 'STAR'. The three modes
        represent the shape of the bi-level filters i.e. box(square), octagon
        and star respectively. For instance, a bi-level octagon filter consists
        of a smaller inner octagon and a larger outer octagon with the filter
        weights being uniformly negative in both the inner octagon while
        uniformly positive in the difference region. Use STAR and Octagon for
        better features and DoB for better performance.
    non_max_threshold : float
        Threshold value used to suppress maximas and minimas with a weak
        magnitude response obtained after Non-Maximal Suppression.
    line_threshold : float
        Threshold for rejecting interest points which have ratio of principal
        curvatures greater than this value.

    Returns
    -------
    keypoints : (N, 2) array
        Location of the extracted keypoints in the ``(row, col)`` format.
    scales : (N, 1) array
        The corresponding scale of the N extracted keypoints.

    References
    ----------
    .. [1] Motilal Agrawal, Kurt Konolige and Morten Rufus Blas
           "CenSurE: Center Surround Extremas for Realtime Feature
           Detection and Matching",
           http://link.springer.com/content/pdf/10.1007%2F978-3-540-88693-8_8.pdf

    .. [2] Adam Schmidt, Marek Kraft, Michal Fularz and Zuzanna Domagala
           "Comparative Assessment of Point Feature Detectors and
           Descriptors in the Context of Robot Navigation"
           http://www.jamris.org/01_2013/saveas.php?QUEST=JAMRIS_No01_2013_P_11-20.pdf

    """

    # (1) First we generate the required scales on the input grayscale image
    # using a bi-level filter and stack them up in `filter_response`.
    # (2) We then perform Non-Maximal suppression in 3 x 3 x 3 window on the
    # filter_response to suppress points that are neither minima or maxima in
    # 3 x 3 x 3 neighbourhood. We obtain a boolean ndarray `feature_mask`
    # containing all the minimas and maximas in `filter_response` as True.
    # (3) Then we suppress all the points in the `feature_mask` for which the
    # corresponding point in the image at a particular scale has the ratio of
    # principal curvatures greater than `line_threshold`.
    # (4) Finally, we remove the border keypoints and return the keypoints
    # along with its corresponding scale.

    image = np.squeeze(image)
    if image.ndim != 2:
        raise ValueError("Only 2-D gray-scale images supported.")

    mode = mode.lower()
    if mode not in ('dob', 'octagon', 'star'):
        raise ValueError('Mode must be one of "DoB", "Octagon", "STAR".')

    if min_scale < 1 or max_scale < 1 or max_scale - min_scale < 2:
        raise ValueError('The scales must be >= 1 and the number of scales '
                         'should be >= 3.')

    image = img_as_float(image)
    image = np.ascontiguousarray(image)

    # Generating all the scales
    filter_response = _filter_image(image, min_scale, max_scale, mode)

    # Suppressing points that are neither minima or maxima in their 3 x 3 x 3
    # neighbourhood to zero
    minimas = minimum_filter(filter_response, (3, 3, 3)) == filter_response
    maximas = maximum_filter(filter_response, (3, 3, 3)) == filter_response

    feature_mask = minimas | maximas
    feature_mask[filter_response < non_max_threshold] = False

    for i in range(1, max_scale - min_scale):
        # sigma = (window_size - 1) / 6.0, so the window covers > 99% of the
        #                                  kernel's distribution
        # window_size = 7 + 2 * (min_scale - 1 + i)
        # Hence sigma = 1 + (min_scale - 1 + i)/ 3.0
        _suppress_lines(feature_mask[:, :, i], image,
                        (1 + (min_scale + i - 1) / 3.0), line_threshold)

    rows, cols, scales = np.nonzero(feature_mask[..., 1:max_scale - min_scale])
    keypoints = np.column_stack([rows, cols])
    scales = scales + min_scale + 1

    if mode == 'dob':
        return keypoints, scales

    cumulative_mask = np.zeros(keypoints.shape[0], dtype=np.bool)

    if mode == 'octagon':
        for i in range(min_scale + 1, max_scale):
            c = (OCTAGON_OUTER_SHAPE[i - 1][0] - 1) // 2 \
                + OCTAGON_OUTER_SHAPE[i - 1][1]
            cumulative_mask |= _mask_border_keypoints(image, keypoints, c) \
                               & (scales == i)
    elif mode == 'star':
        for i in range(min_scale + 1, max_scale):
            c = STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] \
                + STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] // 2
            cumulative_mask |= _mask_border_keypoints(image, keypoints, c) \
                               & (scales == i)

    return keypoints[cumulative_mask], scales[cumulative_mask]