Exemple #1
0
    def initialize(self, image_data):
        """
        Initializes the bag of words descriptor and returns the mapped results of image_data

        :param image_data: ndarray [n, 3D image]
        :return: list [label, [1D image descriptor]]
        """
        termination_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        bow_model = cv2.BOWKMeansTrainer(self._num_clusters, termination_criteria)

        key_point_tensor = {}
        for i in range(image_data.shape[0]):
            cv_image = image_data[i]
            descriptors, key_points = BagOfFeaturesTransform.extract_features_descriptors(cv_image, self._patch_size)

            key_point_tensor[i] = key_points
            bow_model.add(descriptors[1])

        self._clusters = bow_model.cluster()

        self._img_descriptor_mapper = cv2.BOWImgDescriptorExtractor(non_free.SURF_create(extended=True),
                                                                    cv2.FlannBasedMatcher_create())
        self._img_descriptor_mapper.setVocabulary(self._clusters)

        training_x = []
        for img_idx, img_descriptors in key_point_tensor.items():
            image_quantized_descriptor = self._img_descriptor_mapper.compute(image_data[img_idx], img_descriptors)
            training_x.append(image_quantized_descriptor)

        return np.vstack(training_x)
Exemple #2
0
def BoW(dir, VW, strategy, plot, H_threshold):
    surf = xfeatures2d.SURF_create(H_threshold)
    BoW_whole = []
    for category in ['Coast', 'Forest', 'Highway', 'Mountain', 'Suburb']:
        img_name = os.listdir(dir + category)
        for file in img_name:
            img_path = dir + category + '/' + file
            img = imread(img_path, 0)
            _, descriptors = surf.detectAndCompute(img, None)
            descriptors = descriptors[:100]
            if strategy == 'Hard-Sum':
                BoW_image = HardSum(descriptors, VW)
            elif strategy == 'Soft-Sum':
                BoW_image = SoftSum(descriptors, VW)
            elif strategy == 'Soft-Max':
                BoW_image = SoftMax(descriptors, VW)
            BoW_whole.append(BoW_image)
    BoW_whole = np.array(BoW_whole)
    print('BoW shape: {}'.format(BoW_whole.shape))

    if plot is True:
        for idx in [0, 10, 20, 30, 40]:
            plt.bar(np.arange(BoW_whole.shape[1]), BoW_whole[idx], width=1, edgecolor='b')
            #plt.ylim((0.0, 1.0))
            plt.title('BoW by {}'.format(strategy))
            plt.xlabel('')
            plt.ylabel('')
            plt.savefig(os.path.join(args.output_path, 'BoW_by_{}_{}'.format(strategy, idx)))
            #plt.show()
            plt.clf()

    return BoW_whole
Exemple #3
0
def get_features(pic, hessian_threshold=100, show_key_point=False):
    surf = contrib.SURF_create(hessian_threshold)
    key, des = surf.detectAndCompute(pic, None)
    if show_key_point:
        kp_image = np.zeros(pic.shape[:-1]).astype(np.uint8)
        kp_image = cv2.drawKeypoints(kp_image, key, None, [255, 255, 255])
        cv2.namedWindow("key point", cv2.WINDOW_NORMAL)
        cv2.imshow("key point", kp_image)
        cv2.waitKey()
    return key, des
Exemple #4
0
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

        self._algos = [cv2.AKAZE_create()]
        self._algos.append(cv2.BRISK_create())
        self._algos.append(cv2.KAZE_create())
        self._algos.append(cv2.ORB_create())
        self._algos.append(xfeatures2d.SIFT_create())
        self._algos.append(xfeatures2d.SURF_create())

        self._times = OrderedDict()
        self._nkps = OrderedDict()

        self._images = [cv2.imread(image) for image in self.files]
    def __setstate__(self, state):
        """
        Restores the state of the object; sets the image descriptor mapper. Note that the training set is not restored

        :param state: saved state dictionary
        """
        self.__dict__.update(state)
        self._img_descriptor_mapper = cv2.BOWImgDescriptorExtractor(
            non_free.SURF_create(extended=True),
            cv2.FlannBasedMatcher_create())
        self._img_descriptor_mapper.setVocabulary(self._clusters)
        self._X = None

        return
Exemple #6
0
    def extract_features_descriptors(image, patch_size=16):
        """
        Computes features based on the patch size

        :param image: input cv2 image
        :param patch_size: size of the patches in the grid
        :return: list of SURF descriptors
        """

        key_points = BagOfFeaturesTransform.extract_features(image, patch_size)

        surf = non_free.SURF_create(extended=True)
        descriptors = surf.compute(image, key_points)

        return descriptors, key_points
Exemple #7
0
    def create_descriptor(self, descriptor, detector):
        """ Create descriptor object.

        Parameters
        ----------
        descriptor : str
            An optional descriptor type to create.
        detector: str
            Detector name, to check if valid combination.
        """
        if descriptor is 'AKAZE':  # AKAZE only allows AKAZE or KAZE detectors
            if detector is 'AKAZE' or detector is 'KAZE':
                desc = cv2.AKAZE_create()
            else:
                return None
        elif descriptor is 'BRISK':
            desc = cv2.BRISK_create()
        elif descriptor is 'FREAK':
            desc = xfeatures2d.FREAK_create()
        elif descriptor is 'KAZE':  # KAZE only allows KAZE or AKAZE detectors
            if detector is 'AKAZE' or detector is 'KAZE':
                desc = cv2.KAZE_create()
            else:
                return None
        elif descriptor is 'ORB':
            desc = cv2.ORB_create()
        elif descriptor is 'BRIEF':
            desc = xfeatures2d.BriefDescriptorExtractor_create()
        elif descriptor is 'DAISY':
            desc = xfeatures2d.DAISY_create()
        elif descriptor is 'FREAK':
            desc = xfeatures2d.FREAK_create()
        elif descriptor is 'LATCH':
            desc = xfeatures2d.LATCH_create()
        elif descriptor is 'SIFT':
            desc = xfeatures2d.SIFT_create()
        elif descriptor is 'SURF':
            desc = xfeatures2d.SURF_create()
        else:
            raise ValueError("Unsupported descriptor")

        return desc
Exemple #8
0
def InterestPoints(dir, H_threshold):
    surf = xfeatures2d.SURF_create(H_threshold)
    first = True
    for category in ['Coast', 'Forest', 'Highway', 'Mountain', 'Suburb']:
        img_name = os.listdir(os.path.join(dir, category))
        for file in img_name:
            img_path = os.path.join(os.path.join(dir, category), file)
            img = imread(img_path, 0)
            _, descriptors = surf.detectAndCompute(img, None)
            #print(len(descriptors))
            descriptors = descriptors[:200]
            if descriptors is None:
                print('No descriptor in {}'.format(img_path))
                continue
            if first:
                interest_pts = descriptors
                first = False
            else:
                interest_pts = np.append(interest_pts, descriptors, axis=0)
    print('interest points shape: {}'.format(interest_pts.shape))
    return interest_pts
Exemple #9
0
    def create_detector(self, detector):
        """ Create detector object.

        Parameters
        ----------
        detector : str
            The detector type to create.
        """
        if detector is 'Agast':
            det = cv2.AgastFeatureDetector_create()
        elif detector is 'AKAZE':
            det = cv2.AKAZE_create()
        elif detector is 'BRISK':
            det = cv2.BRISK_create()
        elif detector is 'Fast':
            det = cv2.FastFeatureDetector_create()
        elif detector is 'GFTT':
            det = cv2.GFTTDetector_create()
        elif detector is 'KAZE':
            det = cv2.KAZE_create()
        elif detector is 'MSER':
            det = cv2.MSER_create()
        elif detector is 'ORB':
            det = cv2.ORB_create()

        elif detector is 'MSD':
            det = xfeatures2d.MSDDetector_create()
        elif detector is 'SIFT':
            det = xfeatures2d.SIFT_create()
        elif detector is 'SURF':
            det = xfeatures2d.SURF_create()
        elif detector is 'Star':
            det = xfeatures2d.StarDetector_create()
        else:
            raise ValueError("Unsupported detector")

        return det
Exemple #10
0
import serial
import time
from picamera.array import PiRGBArray
from picamera import PiCamera
import math

arduino = True
remote = True

if arduino:
    sr = serial.Serial('/dev/ttyACM0', 115200)
    time.sleep(3)

#cv2.ocl.setUseOpenCL(False)

detector = xf.SURF_create(10000, 10, 5, extended=True)
#detector = cv2.ORB_create()
# detector = cv2.FastFeatureDetector_create()
matcher = cv2.BFMatcher(cv2.NORM_L2)

compute = detector
#compute = xf.SURF_create()
# compute = cv2.ORB_create()

if remote:
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

    get_ip = os.popen(
        'ifconfig | grep -Eo "inet (addr:)?([0-9]*\.){3}[0-9]*" | grep -Eo "([0-9]*\.){3}[0-9]*" | grep -v "127.0.0.1"'
    ).read()
Exemple #11
0
# -*- coding: utf-8 -*-
__author__ = 'zBritva'

import cv2
import numpy as np
from cv2 import xfeatures2d

# camera
cap = cv2.VideoCapture(1)

# main method
surf = xfeatures2d.SURF_create()
#surf.setUpright(True)
# surf.extended = True #for expand descriptor to 128bits, default is 64

# image of land zone (H-mark, landmark)
land_zone = cv2.imread('Marks/H-helipad.png')

# keypoint and descriptors of landmark
landmark_keypoints, landmark_descriptors = surf.detectAndCompute(
    land_zone, None)
# convert to float32
landmark_descriptors = np.float32(landmark_descriptors)

# Configure FLANN
FLANN_INDEX_KDTREE = 0
FLANN_INDEX_LSH = 6
# index_params = dict(algorithm=FLANN_INDEX_LSH,
#                     table_number=6,  # 12
#                     key_size=12,  # 20
#                     multi_probe_level=1)  # 2
Exemple #12
0
#!/usr/bin/env python3

import cv2
from cv2 import xfeatures2d

file = '../performance/boat/img1.pgm'
img = cv2.imread(file)

akaze = cv2.AKAZE_create()
points = akaze.detect(img)

descriptors = [akaze]
descriptors.append(cv2.BRISK_create())
descriptors.append(cv2.KAZE_create())
descriptors.append(cv2.ORB_create())
descriptors.append(xfeatures2d.BriefDescriptorExtractor_create())
descriptors.append(xfeatures2d.DAISY_create())
descriptors.append(xfeatures2d.FREAK_create())
descriptors.append(xfeatures2d.LATCH_create())
descriptors.append(xfeatures2d.LUCID_create(1, 1))
descriptors.append(xfeatures2d.SIFT_create())
descriptors.append(xfeatures2d.SURF_create())

for descriptor in descriptors:
    kps, des = descriptor.compute(img, points)
    print("Algorithm: {}, size: {}, type: {}".format(descriptor, des[0].size,
                                                     des[0].dtype))
Exemple #13
0
    # Set up a parser for command line arguments
    parser = argparse.ArgumentParser( "Detect object" )
    parser.add_argument( "object", default="id", nargs='?', help="The object to detect" )

    args = parser.parse_args()

    path = 'trainImg/' + args.object + ".jpg"

    # Load training image as grayscale
    trainImg = cv2.imread(path,0)

    # Initiate camera feed (will need to be adapted for robot to keep stream alive)
    cam = cv2.VideoCapture(0)

    # Initiate SURF detector with initial hessian threshold value 
    surf = xf.SURF_create(500)

    # Detect keypoints and compute descriptors from train image using SURF algorithm
    orig_kp, orig_des = surf.detectAndCompute(trainImg,None)

    # Set up parameters for FLANN matching
    index_params = dict(algorithm = 0, trees = 8)   # Algorithm selection = Index K-D Tree
    # Specify number of times to recursively traverse index trees - higher = more accurate but slower
    search_params = dict(checks = 70)   

    # Initiate FLANN object with parameters
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    frames = 0
    initiate = False
    total_duration = 0
    # Match and display output loop
Exemple #14
0
                     svm)
from pixel.detector import OpenCVDetector
from pixel.utility import (pixel_to_lonlat,
                           cut_keypoint,
                           write_image,
                           plot_image)

#
# Technique 1 -  Marker-based image segmentation using watershed algorithm
#

# Open image files and intiatlize SURF detector
scene = imread('/Users/tstavish/Data/surf2pixelSVM/harbor.tif',
               IMREAD_GRAYSCALE)
scene_3band = imread('/Users/tstavish/Data/harbor/harbor3.tif')
detector = xfeatures2d.SURF_create(400)
keypoints, descriptors = detector.detectAndCompute(scene, None)

# Loop through keypoints
no_of_kp = 10
keypoint_cut = []
keypoint_cut_3band = []
for kp in keypoints[:no_of_kp]:
    print('angle = ', kp.angle)
    print('class_id = ', kp.class_id)
    print('octave = ', kp.octave)
    print('x, y = ', kp.pt[0], kp.pt[1])
    print('response = ', kp.response)
    print('size = ', kp.size)
    keypoint_cut.append(cut_keypoint(int(kp.pt[0]), int(kp.pt[1]),
                        kp.size, scene, False))
Exemple #15
0
import numpy as np
import cv2.xfeatures2d as xf
import cv2
import paths

path = paths.path_to_imgs

img1 = cv2.imread('D:/Osborne_base/sello12.png', 0)    # trainImage
# img2 = cv2.imread('C:/Users/usuario/Desktop/documentos/1877-L119.M23_Tomas_Osborne_Bohl/'
#                   '1/1877-L119.M23_Tomas_Osborne_Bohl.I_1/IMG_0001.png', 0)  # queryImage

img2 = cv2.imread(path + '/1883-L119.M29/43/IMG_0001.png', 0)

# Initiate SURF detector
surf = xf.SURF_create()

# find the keypoints and descriptors with SIFT
kp1, des1 = surf.detectAndCompute(img1, None)
kp2, des2 = surf.detectAndCompute(img2, None)

# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)   # or pass empty dictionary

flann = cv2.FlannBasedMatcher(index_params, search_params)

matches = flann.knnMatch(des1, des2, k=2)

# Need to draw only good matches, so create a mask
matchesMask = [[0, 0] for i in range(len(matches))]
Exemple #16
0
Uses SURF to match two images.

Based on the sample code from opencv:
  samples/python2/find_obj.py

USAGE
  find_obj.py <image1> <image2>
'''

import numpy
import cv2
from cv2 import xfeatures2d as xf

import sys

detector = xf.SURF_create(400, 5, 5)
matcher = cv2.BFMatcher(cv2.NORM_L2)

###############################################################################
# Image Matching
###############################################################################


def match_images(kp1, desc1, img2):
    """Given two images, returns the matches"""

    kp2, desc2 = detector.detectAndCompute(img2, None)
    #print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))

    raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)  #2
    kp_pairs = filter_matches(kp1, kp2, raw_matches)
Exemple #17
0
def SURF(img1, img2):
    surf = cv.SURF_create()
    kp1, des1 = surf.detectAndCompute(img1, None)
    kp2, des2 = surf.detectAndCompute(img2, None)
    return des1, des2, kp1, kp2
Exemple #18
0
import time
from picamera.array import PiRGBArray
from picamera import PiCamera
from imutils.video import VideoStream
import imutils
from pivideostream import PiVideoStream

arduino = False

if arduino:
    sr = serial.Serial('/dev/cu.usbmodem1411', 115200)
    time.sleep(3)

cv2.ocl.setUseOpenCL(False)

detector = xf.SURF_create(10000, 5, 5)
# detector = cv2.ORB_create(200)
# detector = cv2.FastFeatureDetector_create()
matcher = cv2.BFMatcher(cv2.NORM_L2)

compute = detector
# compute = xf.SURF_create()
# compute = cv2.ORB_create()

sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

get_ip = os.popen(
    'ifconfig | grep -Eo "inet (addr:)?([0-9]*\.){3}[0-9]*" | grep -Eo "([0-9]*\.){3}[0-9]*" | grep -v "127.0.0.1"'
).read()
# ## Great they were converted!
#
# # Let's use SURF to extract features from these images!

# In[7]:

import cv2
import numpy as np
from cv2 import xfeatures2d

img = cv2.imread(file_name1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# Create SURF object. You can specify params here or later.
# Here I set Hessian Threshold to 400
surf = xfeatures2d.SURF_create(400)

kp, des = surf.detectAndCompute(img, None)
img = cv2.drawKeypoints(gray,
                        kp,
                        img,
                        flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

cv2.imwrite('surf_keypoints.jpg', img)
plt.imshow(img, ), plt.show()

# In[6]:

import cv2
import numpy as np
from cv2 import xfeatures2d
Exemple #20
0
def setupMatch(obj_name, obj_path, alg_params):
    global match_feedback
    global neck_angles
    global robot

    match_feedback = dict([('left_counter', 0), ('right_counter', 0),
                           ('loc_counter', 0), ('last_centre', (0, 0)),
                           ('no_match', 0), ('object_located', False),
                           ('error', 0), ('last_turn', "Right"),
                           ('no_desc', 0)])

    # Path to object image
    path = 'trainImg/' + obj_path

    # Load training image as grayscale
    img1 = cv2.imread(path, 0)

    # Initiate camera feed (will need to be adapted for robot to keep stream alive)
    #cam = cv2.VideoCapture(0)

    # Initiate SURF detector with initial hessian value  (set by default or through UI)
    # Larger threshold should render fewer more salient points, smaller more but less salient points
    surf = xf.SURF_create(alg_params['hes_threshold'])

    # Set to use 128 descriptor size - not used with KD-tree ANN matching as it is known to give poor performance for high dimensionality descriptors
    #surf.setExtended(True)

    # Setting Upright flags means algorithm does not consider rotation - still good to about 15 degrees
    #surf.setUpright(True)

    # Detect keypoints and compute descriptors using SURF algorithm
    kp1, des1 = surf.detectAndCompute(img1, None)

    # Set up parameters for FLANN matching
    # Tell FLANN matcher to use k-dimensional index trees (8) - trees are randomised and searched in parallel
    index_params = dict(algorithm=0, trees=8)

    # Specify number of times to recursively traverse index trees
    search_params = dict(checks=100)

    # Initiate FLANN object with parameters
    flann = cv2.FlannBasedMatcher(index_params, search_params)

    # Establish connection to robot via Websockets
    robot = py_websockets_bot.WebsocketsBot("192.168.42.1")

    # Create mini driver sensor configuration
    # Used to configure the inputs on the mini driver board
    sensorConfig = py_websockets_bot.mini_driver.SensorConfiguration(
        configD12=py_websockets_bot.mini_driver.PIN_FUNC_ULTRASONIC_READ)

    robot_config = robot.get_robot_config()
    robot_config.miniDriverSensorConfiguration = sensorConfig
    robot.set_robot_config(robot_config)
    robot.start_streaming_camera_images(getLastImage)

    # Sets neck degrees to initial values (should centre neck if servos configured correctly)
    robot.set_neck_angles(pan_angle_degrees=neck_angles['pan'],
                          tilt_angle_degrees=neck_angles['tilt'])

    # Main loop processes current frame and matches features from original image
    while (True):
        try:
            robot.update()

            if latest_camera_image != None:
                img2 = latest_camera_image

                # Convert frame to grayscale (algorithm uses pixel gray intensities)
                grey = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

                # Detect and compute keypoints/descripts for stream frame
                kp2, des2 = surf.detectAndCompute(grey, None)

                if (des2 is None) or (des2.all()):
                    print "No descriptors to match"
                    if match_feedback['no_desc'] > 30:
                        neck_angles, match_feedback = rc.lookAround(
                            robot, neck_angles, match_feedback)
                        match_feedback['no_desc'] = 0
                    match_feedback['no_desc'] += 1
                else:
                    # Calculate descriptor matches with FLANN
                    matches = flann.knnMatch(des1, des2, k=2)

                    # Send keypoints/matching descriptors to find location of object in image and return image with bounding box around image
                    img2 = matchAndBox(img1, kp1, img2, kp2, matches,
                                       alg_params)
                    #print match_feedback['left_counter']
                    #print match_feedback['right_counter']
                    #print match_feedback['last_centre']
                title = "Detecting: " + obj_name
                # Display frame
                cv2.imshow(title, img2)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        except cv2.error:
            if match_feedback['error'] > 20 and rc.minDistanceReached(
                    robot, 20):
                #robot.set_motor_speeds(35.0,-30.0)
                neck_angles, match_feedback = rc.lookAround(
                    robot, neck_angles, match_feedback)
                match_feedback['error'] = 0
            elif match_feedback['error'] > 30:
                robot.set_motor_speeds(-35.0, -35.0)
                match_feedback['error'] = 0
            match_feedback['error'] += 1
            #print "Please check camera feed - ensure it is not obscured"
    robot.disconnect()
    cv2.destroyAllWindows()
    def __init__(self, training_images_dir, **kwargs):
        """
        This data provider utilizes the visual bag of words algorithm to map image_file
        to feature vectors for the one-class SVM classifier.
        Process:
            - Partition each image into a grid and generate SURF descriptor from each patch
            - Compute K clusters from all of the features from all of the image_file (visual bag of words)
            - Construct normalized histogram for each image
            - Feature vector is then the values of the normalized histogram (vector quantization)

        :param training_images_dir: (string)
        :param kwargs:
            - num_clusters: (Integer) Size of the visual bag of words
            - resize_image: (tuple(x, y)) resize input image
            - patch_size: (Integer) size of patch to compute a descriptor
        """

        # note~ not much arg validation here...

        self._resize_image = kwargs.pop("resize_image", ())
        self._patch_size = kwargs.pop("patch_size", 16)

        termination_criteria = (cv2.TERM_CRITERIA_EPS +
                                cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        bow_model = cv2.BOWKMeansTrainer(kwargs.pop("num_clusters", 500),
                                         termination_criteria)

        key_point_tensor = {}
        training_counter = 0

        for root, sub_dirs, files in os.walk(training_images_dir):
            for image_file in files:
                if not image_file.endswith(".jpg"):
                    continue

                training_counter += 1
                if training_counter % 1000 == 0:
                    print(f"{training_counter} images completed")

                image_path = os.path.join(root, image_file)

                cv_image = DataProviderSURF.read_image(image_path,
                                                       self._resize_image)
                descriptors, key_points = DataProviderSURF.extract_features_descriptors(
                    cv_image, self._patch_size)

                key_point_tensor[image_file] = [cv_image, key_points]
                bow_model.add(descriptors[1])

        print(f"{training_counter} total number of images in training.")

        self._clusters = bow_model.cluster()

        self._img_descriptor_mapper = cv2.BOWImgDescriptorExtractor(
            non_free.SURF_create(extended=True),
            cv2.FlannBasedMatcher_create())
        self._img_descriptor_mapper.setVocabulary(self._clusters)

        training_x_list = []
        for img, img_data in key_point_tensor.items():
            image_descriptor = self._img_descriptor_mapper.compute(
                img_data[0], img_data[1])
            training_x_list.append(image_descriptor)

        self._X = np.vstack(training_x_list)

        return
Exemple #22
0
import numpy as np
import cv2
from cv2 import xfeatures2d as xf
from matplotlib import pyplot as plt

img1 = cv2.imread('trainImg/strep.jpg', 0)  # Load train image as greyscale
img2 = cv2.imread('trainImg/strep2.jpg', 1)  # Load query image in RGB
grey = cv2.cvtColor(
    img2, cv2.COLOR_BGR2GRAY
)  # Convert query image to greyscale (SURF uses greyscale pixel intensities)

surf = xf.SURF_create(
    0)  # Initialise surf object with minimum hessian threshold

# Detect keypoints and compute detectors for both images
train_kp, train_des = surf.detectAndCompute(img1, None)
query_kp, query_des = surf.detectAndCompute(grey, None)

# Match descriptors with brute force matching
brute_force = cv2.BFMatcher()
matches = brute_force.knnMatch(train_des, query_des, k=2)

counter = 0
# Compare descriptor distance to find good matches
good_matches = []
for m, n in matches:
    if m.distance < 0.7 * n.distance:
        counter += 1
        good_matches.append([m])

print counter
Exemple #23
0
def create_detector():
    surf = xf.SURF_create(hessianThreshold=400, upright=True, extended=True)
    return surf