Exemple #1
0
def set_detector_by_name(name):
    det = None
    if name == "Agast":
        det = cv2.AgastFeatureDetector_create()
    elif name == "FAST":
        det = cv2.FastFeatureDetector_create()
    elif name == "GFTT":
        det = cv2.GFTTDetector_create(310)
    elif name == "HARRIS":
        det = cv2.GFTTDetector_create(310,
                                      0.001,
                                      10,
                                      3,
                                      useHarrisDetector=True)
    elif name == "KAZE":
        det = cv2.KAZE_create(False, False, 0.001, 4, 4)
    elif name == "ORB":
        det = cv2.ORB_create()
    elif name == "STAR":
        det = cv2.xfeatures2d.StarDetector_create()
    elif name == "SURF":
        det = cv2.xfeatures2d.SURF_create()
    elif name == "SIFT":
        det = cv2.xfeatures2d.SIFT_create()
    elif name == "AKAZE":
        det = cv2.AKAZE_create()
    else:
        print "Not Implemented Error ..."
    return det
Exemple #2
0
    def __init__(self, config='GFTT-BRIEF'):
        super().__init__()

        if config == 'GFTT-BRIEF':
            self.feature_detector = cv2.GFTTDetector_create(
                maxCorners=1000,
                minDistance=12.0,
                qualityLevel=0.001,
                useHarrisDetector=False)

            self.descriptor_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create(
                bytes=32, use_orientation=False)

        elif config == 'GFTT-BRISK':
            self.feature_detector = cv2.GFTTDetector_create(
                maxCorners=2000,
                minDistance=15.0,
                qualityLevel=0.01,
                useHarrisDetector=False)

            self.descriptor_extractor = cv2.BRISK_create()

        elif config == 'ORB-ORB':
            self.feature_detector = cv2.ORB_create(nfeatures=1000,
                                                   scaleFactor=1.2,
                                                   nlevels=1,
                                                   edgeThreshold=31)
            self.descriptor_extractor = self.feature_detector

        else:
            raise NotImplementedError

        self.descriptor_matcher = cv2.BFMatcher(cv2.NORM_HAMMING,
                                                crossCheck=False)

        self.matching_cell_size = 15  # pixels
        self.matching_neighborhood = 3
        self.matching_distance = 30

        self.frustum_near = 0.1  # meters
        self.frustum_far = 1000.0

        self.ground = True

        self.lc_max_inbetween_distance = 50
        self.lc_distance_threshold = 15
        self.lc_embedding_distance = 20.0

        self.view_image_width = 400
        self.view_image_height = 130
        self.view_camera_width = 0.75
        self.view_viewpoint_x = 0
        self.view_viewpoint_y = -500  # -10
        self.view_viewpoint_z = -100  # -0.1
        self.view_viewpoint_f = 2000

        self.line_detector = cv2.line_descriptor.BinaryDescriptor_createBinaryDescriptor(
        )
        self.line_extractor = self.line_detector
        self.line_matching_distance = 30
Exemple #3
0
    def __init__(self, config='GFTT-BRIEF'):
        super().__init__()

# Set descriptor-descriptor pair GFTT-BRIEF for feature detection
        if config == 'GFTT-BRIEF':
            self.feature_detector = cv2.GFTTDetector_create(
                maxCorners=1000, minDistance=12.0, 
                qualityLevel=0.001, useHarrisDetector=False)

            self.descriptor_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create(
                bytes=32, use_orientation=False)

# Set alternative descriptor pair ORB-BRIEF
        elif config == 'GFTT-BRISK':
            self.feature_detector = cv2.GFTTDetector_create(
                maxCorners=2000, minDistance=15.0, 
                qualityLevel=0.01, useHarrisDetector=False)

            self.descriptor_extractor = cv2.BRISK_create()

        elif config == 'ORB-ORB':
            self.feature_detector = cv2.ORB_create(
                nfeatures=1000, scaleFactor=1.2, nlevels=1, edgeThreshold=31)
            self.descriptor_extractor = self.feature_detector

        else:
            raise NotImplementedError

# Set bruce force matcher of cv2 defined with hamming distance and return all values not only best match,
# therefore crossCheck = false
        self.descriptor_matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)

# Set visual aspects for matching
        self.matching_cell_size = 15   # pixels
        self.matching_neighborhood = 3
        self.matching_distance = 30

# Frustum projection of the map points around the predicted pose
        self.frustum_near = 0.1    # meters
        self.frustum_far = 1000.0

        self.ground = True

# Set values for distances to detect valid ones
        self.lc_max_inbetween_distance = 50
        self.lc_distance_threshold = 15
        self.lc_embedding_distance = 20.0

        self.view_image_width = 400
        self.view_image_height = 130
        self.view_camera_width = 0.75
        self.view_viewpoint_x = 0
        self.view_viewpoint_y = -500   # -10
        self.view_viewpoint_z = -100   # -0.1
        self.view_viewpoint_f = 2000
Exemple #4
0
def detect(videoCapture):
    frameCount = int(videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)) - 2
    kps = []
    descriptors = []
    i = 0
    while True and i <= frameCount:
        showProgress(i, frameCount, "Detecting video:")

        i += 1

        videoCapture.grab()
        r, img = videoCapture.retrieve()
        if not r:
            videoCapture.release()
            break

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        featDetector = cv2.GFTTDetector_create(500, 0.01, 1, 3, True, 0.04)
        features = featDetector.detect(gray)
        brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
        kp, des = brief.compute(gray, features)

        kps.append(kp)
        descriptors.append(des)
    sys.stdout.write("\n")
    return kps, descriptors
Exemple #5
0
 def __init__(self, ratio_matching = 0.5, ratio_pose = 0.5, detecteur = 'fast', descripteur = 'orb'):
     self.ratio_matching = ratio_matching
     self.ratio_pose = ratio_pose
     self.detecteur = detecteur
     self.descripteur = descripteur
     self.detecteurs = {  'akaze': cv2.AKAZE_create(),
                          'agast': cv2.AgastFeatureDetector_create(),
                          'brisk': cv2.BRISK_create(),
                          'fast' : cv2.FastFeatureDetector_create(),
                          'gftt' : cv2.GFTTDetector_create(),
                          'kaze' : cv2.KAZE_create(),
                          'mser' : cv2.MSER_create(),
                          'orb'  : cv2.ORB_create(),
                          'blob' : cv2.SimpleBlobDetector_create() }
     self.descripteurs = {'brisk': cv2.BRISK_create(),
                          'orb'  : cv2.ORB_create(),}
     self.detector =   self.detecteurs[self.detecteur]
     self.descriptor = self.descripteurs[self.descripteur]
     self.matcheur = cv2.BFMatcher(normType = self.descriptor.defaultNorm(), crossCheck = True )
     self.KF = []
     self.current_kf = None
     self.traj = []
     self.current_time = 0
     self.prev_pts = None
     print 'construction du SLAM'
Exemple #6
0
    def feature_detector_create(self, detector_name, adaptation):

        if int(self.OPENCV_MAJOR) < 3:
            name = adaptation + detector_name
            detector = cv2.FeatureDetector_create(name)
        else:
            if detector_name == DetectorType.ORB:
                detector = cv2.ORB(adaptation)
            elif detector_name == DetectorType.FAST:
                # noinspection PyUnresolvedReferences
                detector = cv2.FastFeatureDetector_create()
            elif detector_name == DetectorType.STAR:
                # noinspection PyUnresolvedReferences
                detector = cv2.xfeatures2d.StarDetector_create()
            elif detector_name == DetectorType.MSER:
                # noinspection PyUnresolvedReferences
                detector = cv2.MSER_create()
            elif detector_name == DetectorType.GFTT:
                # noinspection PyUnresolvedReferences
                detector = cv2.GFTTDetector_create()
            elif detector_name == DetectorType.HARRIS:
                # noinspection PyUnresolvedReferences
                detector = cv2.xfeatures2d.HarrisLaplaceFeatureDetector_create()
            elif detector_name == DetectorType.BLOB:
                # noinspection PyUnresolvedReferences
                detector = cv2.SimpleBlobDetector_create()
            else:  # detector.detector() == DetectorType.BRISK:
                detector = cv2.BRISK(adaptation)

        return detector
    def __init__(self, feature='GFTT'):
        self.epipolar_range = 1.0  # pixel
        self.max_depth = 20  # meter
        self.min_depth = 0.01  # meter

        if feature == 'GFTT':
            self.detector = cv2.GFTTDetector_create(maxCorners=500,
                                                    qualityLevel=0.01,
                                                    minDistance=9,
                                                    blockSize=9)
            self.extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create(
                bytes=32, use_orientation=False)
        elif feature == 'ORB':
            self.detector = cv2.ORB_create(nfeatures=1000,
                                           scaleFactor=1.2,
                                           nlevels=8,
                                           edgeThreshold=31,
                                           patchSize=31)
            self.extractor = self.detector
        else:
            raise NotImplementedError

        self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        self.dist_std_scale = 0.4

        self.disparity_matches = 40
        self.min_good_matches = 40
        self.matching_distance = 25
        self.min_inliers = 40
        self.restart_tracking = 3

        self.max_update_time = 0.45

        self.cell_size = 15
    def __init__(self):
        self.feature_detector = cv2.GFTTDetector_create(
            maxCorners=600,
            minDistance=15.0,
            qualityLevel=0.001,
            useHarrisDetector=False)
        self.descriptor_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create(
            bytes=32, use_orientation=False)
        self.descriptor_matcher = cv2.BFMatcher(cv2.NORM_HAMMING,
                                                crossCheck=False)

        self.matching_distance = 30
        self.matching_distance_ratio = 0.8

        self.depth_near = 0.1
        self.depth_far = 10

        self.lc_min_inbetween_keyframes = 2  # frames
        self.lc_max_inbetween_distance = 3  # meters
        self.lc_embedding_distance = 30
        self.lc_inliers_threshold = 13
        self.lc_inliers_ratio = 0.3

        self.view_camera_width = 0.05
        self.view_viewpoint_x = 0
        self.view_viewpoint_y = -1
        self.view_viewpoint_z = -10
        self.view_viewpoint_f = 2000
        self.view_image_width = 320
        self.view_image_height = 240

        self.view_point_cloud = False
 def __init__(self):
     self.detector = cv2.GFTTDetector_create(maxCorners=1000,
                                             minDistance=15.0,
                                             qualityLevel=0.001,
                                             useHarrisDetector=False)
     self.descriptor = cv2.xfeatures2d.BriefDescriptorExtractor_create(
         bytes=32, use_orientation=False)
     self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
Exemple #10
0
def detectar(x):
    t = cv.getTrackbarPos('Threshold', 'Deteccion de Rasgos')

    # Crea un objeto de acuerdo al metodo seleccionado
    # y llama a la funcion para detectar los rasgos
    if metodo == '1':
        # Crea un objeto "Good Features to Track"
        gftt = cv.GFTTDetector_create(0, 0.01, t, 3)
        kp = gftt.detect(input_img, None)
    elif metodo == '2':
        # Crea un objeto tipo FAST
        fast = cv.FastFeatureDetector_create(t)
        # Detecta los rasgos
        kp = fast.detect(input_img, None)
    elif metodo == '3':
        # Crea un objeto tipo AGAST
        agast = cv.AgastFeatureDetector_create(t)
        # Detecta los rasgos
        kp = agast.detect(input_img, None)
    elif metodo == '4':
        #Crea un objeto tipo star(?) / BRIEF
        star = cv.xfeatures2d.StarDetector_create(t)
        #Detecta los rasgos
        kp = star.detect(input_img, None)
    elif metodo == '5':
        #Crea un objeto tipo ORB
        orb = cv.ORB_create(t)
        #Detecta los ragos
        kp = orb.detect(input_img, None)
    elif metodo == '6':
        # Crea un objeto tipo AKAZE, la funcion requiere valores mas bajos de t
        akaze = cv.AKAZE_create(threshold=0.001 * t)
        kp = akaze.detect(input_img, None)
        print(t)
    elif metodo == '7':
        # Crea un objeto tipo BRISK
        brisk = cv.BRISK_create(t)
        kp = brisk.detect(input_img, None)
    elif metodo == '8':
        # Crea un objeto tipo KAZE, la funcion requiere valores mas bajos de t
        kaze = cv.KAZE_create(threshold=0.001 * t)
        kp = kaze.detect(input_img, None)
    elif metodo == '9':
        # Crea un objeto tipo SIFT
        sift = cv.xfeatures2d.SIFT_create(t)
        kp = sift.detect(input_img, None)
    elif metodo == '10':
        # Crea un objeto tipo SURF
        surf = cv.xfeatures2d.SURF_create(t)
        kp = surf.detect(input_img, None)

    else:
        return

    # Dibuja los rasgos detectados sobre la imagen original
    output_img = cv.drawKeypoints(input_img, kp, None, color=(255, 0, 0))
    cv.imshow('Deteccion de Rasgos', output_img)
Exemple #11
0
 def __init__(self,
              pixels_per_feature=500,
              min_match_n=20,
              grid_cells_cols=8):
     self.min_match_n = min_match_n
     self.pixels_per_feature = pixels_per_feature
     self.gftt = cv2.GFTTDetector_create(maxCorners=4096 * 4,
                                         qualityLevel=0.02,
                                         minDistance=9,
                                         blockSize=5)
     self.grid_cells_cols = grid_cells_cols
Exemple #12
0
        def detect(self):
            src_image = self.get_opencv_image()
            if src_image.all() != None:
                detector = cv2.GFTTDetector_create()

                keypoints = detector.detect(src_image)
                detected_image = cv2.drawKeypoints(
                    src_image,
                    keypoints,
                    None,
                    color=(0, 0, 255),
                )
                self.set_opencv_image(detected_image)
            self.update()
Exemple #13
0
    def __init__(self):
        self.feature_detector = cv2.GFTTDetector_create(
            maxCorners=600,
            minDistance=15.0,
            qualityLevel=0.001,
            useHarrisDetector=False)
        self.descriptor_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create(
            bytes=32, use_orientation=False)
        self.descriptor_matcher = cv2.BFMatcher(cv2.NORM_HAMMING,
                                                crossCheck=False)

        self.matching_cell_size = 15  # pixels
        self.matching_neighborhood = 2
        self.matching_distance = 30
        self.matching_distance_ratio = 0.8

        self.virtual_baseline = 0.1  # meters
        self.depth_near = 0.1
        self.depth_far = 10
        self.frustum_near = 0.1
        self.frustum_far = 50.0

        self.pnp_min_measurements = 30
        self.pnp_max_iterations = 10
        self.init_min_points = 30

        self.local_window_size = 10
        self.keyframes_buffer_size = 5
        self.ba_max_iterations = 10

        self.min_tracked_points = 150
        self.min_tracked_points_ratio = 0.75

        self.lc_min_inbetween_keyframes = 15  # frames
        self.lc_max_inbetween_distance = 3  # meters
        self.lc_embedding_distance = 30
        self.lc_inliers_threshold = 13
        self.lc_inliers_ratio = 0.3
        self.lc_distance_threshold = 1.5  # meters
        self.lc_max_iterations = 20

        self.view_camera_width = 0.05
        self.view_viewpoint_x = 0
        self.view_viewpoint_y = -1
        self.view_viewpoint_z = -10
        self.view_viewpoint_f = 2000
        self.view_image_width = 400
        self.view_image_height = 250
Exemple #14
0
def gftt(detector, emparejador, opcion, nombre1, nombre2, norma):

    #Lee las imagenes a analizar
    if opcion == 'd':
        img1 = cv.imread('tpic5.png', cv.IMREAD_GRAYSCALE)
        img2 = cv.imread('tpic5_flipped.png', cv.IMREAD_GRAYSCALE)

    if opcion == 'db':
        img1 = cv.imread('thome.jpg', cv.IMREAD_GRAYSCALE)
        img2 = cv.imread('thome_escale.jpg', cv.IMREAD_GRAYSCALE)

    if opcion == 'dc':
        img1 = cv.imread('tgrafizq.png', cv.IMREAD_GRAYSCALE)
        img2 = cv.imread('tgrafder.png', cv.IMREAD_GRAYSCALE)

    if opcion == 'n':
        img1 = cv.imread(nombre1, cv.IMREAD_GRAYSCALE)
        img2 = cv.imread(nombre2, cv.IMREAD_GRAYSCALE)

    #Inicia el Detector y Descriptor
    gftt = cv.GFTTDetector_create()
    brief = cv.xfeatures2d.BriefDescriptorExtractor_create()

    #Detecta Rasgos y Calcula el descriptor
    kpa = gftt.detect(img1, None)
    kp1, des1 = brief.compute(img1, kpa)

    kpb = gftt.detect(img2, None)
    kp2, des2 = brief.compute(img2, kpb)

    #Muestra Rasgos y Guarda la imagen correspondiente
    imgx = cv.drawKeypoints(img1, kp1, None, color=(0, 255, 255))
    imgy = cv.drawKeypoints(img2, kp2, None, color=(0, 255, 255))
    window_namex = "Rasgos Caracteristicos imagen 1"
    window_namey = "Rasgos Caracteristicos imagen 2"
    cv.namedWindow(window_namex)
    cv.namedWindow(window_namey)
    cv.resizeWindow(window_namex, 500, 400)
    cv.resizeWindow(window_namey, 500, 400)
    cv.imshow(window_namex, imgx)
    cv.imshow(window_namey, imgy)
    save(imgx, emparejador, detector, norma, tag1)
    save(imgy, emparejador, detector, norma, tag2)

    #Envia los datos a la etapa de emparejamiento
    menu_emparejamiento(emparejador, kp1, des1, kp2, des2, img1, img2, norma,
                        detector)
    def callback(self, ros_data):
        '''Callback function of subscribed topic. 
        Here images get converted and features detected'''
        if VERBOSE:
            print 'received image of type: "%s"' % ros_data.format

        #### direct conversion to CV2 ####
        np_arr = np.fromstring(ros_data.data, np.uint8)
        #image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
        image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)  # OpenCV >= 3.0:

        #### Feature detectors using CV2 ####
        # "","Grid","Pyramid" +
        # "FAST","GFTT","HARRIS","MSER","ORB","SIFT","STAR","SURF"
        method = "GridFAST"
        #feat_det = cv2.FeatureDetector_create(method)
        feat_det = cv2.GFTTDetector_create()

        time1 = time.time()

        # convert np image to grayscale
        featPoints = feat_det.detect(cv2.cvtColor(image_np,
                                                  cv2.COLOR_BGR2GRAY))
        time2 = time.time()
        if VERBOSE:
            print '%s detector found: %s points in: %s sec.' % (
                method, len(featPoints), time2 - time1)

        for featpoint in featPoints:
            x, y = featpoint.pt
            cv2.circle(image_np, (int(x), int(y)), 3, (0, 0, 255), -1)

        #cv2.imshow('cv_img', image_np)
        #cv2.waitKey(2)

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
        # Publish new image
        self.image_pub.publish(msg)
Exemple #16
0
def create_panorama(descriptor_name, detector_name, normalized_imgs,
                    output_path, img_name):
    # (2) detect keypoints and extract local invariant descriptors
    detector = {
        "MSER": lambda: cv.MSER_create(),
        "FAST": lambda: cv.FastFeatureDetector_create(),
        "AGAST": lambda: cv.AgastFeatureDetector_create(),
        "GFFT": lambda: cv.GFTTDetector_create(),
        "STAR": lambda: cv.xfeatures2d.StarDetector_create()
    }[detector_name]()
    descriptor = {
        "sift": lambda: cv.xfeatures2d.SIFT_create(),
        "surf": lambda: cv.xfeatures2d.SURF_create(),
        "brief": lambda: cv.xfeatures2d.BriefDescriptorExtractor_create(),
        "orb": lambda: cv.ORB_create(nfeatures=1500),
        "kaze": lambda: cv.KAZE_create(),
        "akaze": lambda: cv.AKAZE_create(),
    }[descriptor_name]()

    def default_describe(img):
        return descriptor.detectAndCompute(img.astype('uint8'), None)

    descriptor_apply_function = {
        "brief":
        lambda img: extract_and_describe_with_brief(img, detector, descriptor)
    }.get(descriptor_name, lambda img: default_describe(img))
    matcher = cv.BFMatcher()
    ratio = 0.75
    reproj_threshold = 4.
    alg = descriptor_name if descriptor_name in [
        "sift", "surf", "orb", "kaze", "akaze"
    ] else "{}_{}".format(descriptor_name, detector_name)

    def r_stitch(a, b):
        return stitch(a[1], b[1], ratio, descriptor_apply_function, matcher,
                      reproj_threshold, True, a[0] + 1, b[0] + 1, img_name,
                      alg)

    result = reduce(r_stitch, enumerate(normalized_imgs))
    save_img(result[1], "{}/{}_result_{}.jpg".format(output_path, img_name,
                                                     alg))
Exemple #17
0
 def setup(self):
     super(FeatureExtraction, self).setup()
     if self._feature_type == 'ORB':
         defaults = dict(nfeatures=10000)
         defaults.update(self._kwargs)
         self._descriptor = cv2.ORB_create(**defaults)
     elif self._feature_type == 'BRISK':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.BRISK_create(**defaults)
     elif self._feature_type == 'SURF':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.xfeatures2d.SURF_create(**defaults)
     elif self._feature_type == 'SIFT':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.xfeatures2d.SIFT_create(**defaults)
     elif self._feature_type == 'KAZE':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.KAZE_create(**defaults)
     elif self._feature_type == 'AKAZE':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.AKAZE_create(**defaults)
     elif self._feature_type == 'FREAK':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.xfeatures2d.FREAK_create(**defaults)
         self._detector = cv2.xfeatures2d.SURF_create()
     elif self._feature_type == 'FAST':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.FastFeatureDetector_create(**defaults)
     elif self._feature_type == 'GFTT':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.GFTTDetector_create(**defaults)
     else:
         raise ValueError("Invalid feature type")
Exemple #18
0
def gftt(detector, emparejador, opcion, nombre1, nombre2):

    #img1 = cv.imread('box.png',cv.IMREAD_GRAYSCALE)          # queryImage
    if opcion == 'd':
        img1 = cv.imread('graf1.png', cv.IMREAD_GRAYSCALE)  # queryImage
        img2 = cv.imread('graf3.png', cv.IMREAD_GRAYSCALE)  # trainImage
    if opcion == 'n':
        img1 = cv.imread(nombre1, cv.IMREAD_GRAYSCALE)  # queryImage
        img2 = cv.imread(nombre2, cv.IMREAD_GRAYSCALE)  # trainImage

    gftt = cv.GFTTDetector_create()
    # Initiate BRIEF extractor
    brief = cv.xfeatures2d.BriefDescriptorExtractor_create()

    kpa = gftt.detect(img1, None)
    kp1, des1 = brief.compute(img1, kpa)

    kpb = gftt.detect(img1, None)
    kp2, des2 = brief.compute(img2, kpb)

    menu_emparejamiento(emparejador, kp1, des1, kp2, des2, img1, img2)
Exemple #19
0
 def __init__(self, camera, annotations):
     self.prev_frame = None
     self.current_frame = None
     self.prev_keypoints = None 
     self.current_keypoints = None
     self.prev_des = None 
     self.current_des = None
     self.matches = None
     self.detector = cv2.GFTTDetector_create(
             maxCorners=1000, minDistance=15.0, 
             qualityLevel=0.001, useHarrisDetector=False)
     self.descriptor = cv2.xfeatures2d.BriefDescriptorExtractor_create(
             bytes=32, use_orientation=False)
     self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True)
     self.focal = camera.focal
     self.projection_center = (camera.cx, camera.cy)
     self.frame_number = 0
     self.R = np.eye(3)
     self.t = np.zeros((3,1))
     self.trueX, self.trueY, self.trueZ = 0, 0, 0
     with open(annotations) as f:
         self.annotations = f.readlines()
def detectar(x):
    t = cv.getTrackbarPos('Threshold', 'Deteccion de Rasgos')
    # Crea un objeto de acuerdo al metodo seleccionado
    # y llama a la funcion para detectar los rasgos
    if metodo == '1':
        gftt = cv.GFTTDetector_create(0, 0.01, t, 3)
        kp = gftt.detect(input_img, None)
    elif metodo == '2':
        # Crea un objeto tipo FAST
        fast = cv.FastFeatureDetector_create(t)
        # Detecta los rasgos
        kp = fast.detect(input_img, None)
    elif metodo == '3':
        # Crea un objeto tipo AGAST
        agast = cv.AgastFeatureDetector_create(t)
        # Detecta los rasgos
        kp = agast.detect(input_img, None)
    else:
        return
    # Dibuja los rasgos detectados sobre la imagen original
    output_img = cv.drawKeypoints(input_img, kp, None, color=(255, 0, 0))
    cv.imshow('Deteccion de Rasgos', output_img)
Exemple #21
0
    def create_detector(self, detector):
        """ Create detector object.

        Parameters
        ----------
        detector : str
            The detector type to create.
        """
        if detector is 'Agast':
            det = cv2.AgastFeatureDetector_create()
        elif detector is 'AKAZE':
            det = cv2.AKAZE_create()
        elif detector is 'BRISK':
            det = cv2.BRISK_create()
        elif detector is 'Fast':
            det = cv2.FastFeatureDetector_create()
        elif detector is 'GFTT':
            det = cv2.GFTTDetector_create()
        elif detector is 'KAZE':
            det = cv2.KAZE_create()
        elif detector is 'MSER':
            det = cv2.MSER_create()
        elif detector is 'ORB':
            det = cv2.ORB_create()

        elif detector is 'MSD':
            det = xfeatures2d.MSDDetector_create()
        elif detector is 'SIFT':
            det = xfeatures2d.SIFT_create()
        elif detector is 'SURF':
            det = xfeatures2d.SURF_create()
        elif detector is 'Star':
            det = xfeatures2d.StarDetector_create()
        else:
            raise ValueError("Unsupported detector")

        return det
    def descriptors_matching(self,
                             datasetName,
                             dirPath,
                             csvTest,
                             Ns=500,
                             good=0.3):

        df = pandas.read_csv(csvTest)
        IMAGE = np.random.randint(0, len(df))

        fileName = df['rgb'][IMAGE]

        if datasetName == "VEDAI":
            imageRgbPath = dirPath + fileName[:-6] + "co.png"
            imageIrPath = dirPath + fileName[:-6] + "ir.png"

        else:
            sys.exit("ERROR: Unknown dataset")

        print("Loading: ", imageRgbPath)
        img_rgb = cv2.imread(imageRgbPath)
        print("[image RGB]: ", np.shape(img_rgb))

        print("Loading: ", imageIrPath)
        img_ir = cv2.imread(imageIrPath)
        print("[image IR]: ", np.shape(img_ir))

        if np.size(img_rgb) != np.size(img_ir):
            sys.exit("ERROR: Different images resolution")

        if np.size(img_rgb, 0) % self.patchSize != 0:
            img_rgb = img_rgb[0:np.size(img_rgb, 0) // self.patchSize *
                              self.patchSize, :]
            img_ir = img_ir[0:np.size(img_rgb, 0) // self.patchSize *
                            self.patchSize, :]
            print("[image RGB]: ", np.shape(img_rgb))
            print("[image IR]: ", np.shape(img_ir))

        if np.size(img_rgb, 1) % self.patchSize != 0:
            img_rgb = img_rgb[:, 0:np.size(img_rgb, 0) // self.patchSize *
                              self.patchSize]
            img_ir = img_ir[:, 0:np.size(img_rgb, 0) // self.patchSize *
                            self.patchSize]
            print("[image RGB]: ", np.shape(img_rgb))
            print("[image IR]: ", np.shape(img_ir))

        img_rgb_raw = img_rgb
        img_ir_raw = img_ir

        img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
        img_ir = cv2.cvtColor(img_ir, cv2.COLOR_RGB2GRAY)

        img_rgb_gray = img_rgb
        img_ir_gray = img_ir

        # Extract keypoints using GFTT
        mask = np.zeros(np.shape(img_rgb), np.uint8)
        mask[self.patchSize:np.size(img_rgb, 0) - self.patchSize - 1,
             self.patchSize:np.size(img_rgb, 1) - self.patchSize - 1] = 255
        rgb_corners = cv2.goodFeaturesToTrack(img_rgb, Ns, 0.01, 3, mask=mask)
        ir_corners = cv2.goodFeaturesToTrack(img_ir, Ns, 0.01, 3, mask=mask)

        if np.size(rgb_corners, 0) != Ns and np.size(ir_corners, 0) != Ns:
            sys.exit("Error: Not enough features detected")

        # Extract patches around keypoints
        img_rgb = img_rgb - np.mean(img_rgb)
        img_ir = img_ir - np.mean(img_ir)

        img_rgb_input = np.empty(
            (np.size(rgb_corners, 0), self.patchSize, self.patchSize, 1))
        img_ir_input = np.empty(
            (np.size(ir_corners, 0), self.patchSize, self.patchSize, 1))

        for c in range(np.size(rgb_corners, 0)):
            x = int(rgb_corners[c, 0, 0])
            y = int(rgb_corners[c, 0, 1])
            img_rgb_input[c, :, :, 0] = img_rgb[y:y + self.patchSize,
                                                x:x + self.patchSize]

        for c in range(np.size(ir_corners, 0)):
            x = int(ir_corners[c, 0, 0])
            y = int(ir_corners[c, 0, 1])
            img_ir_input[c, :, :, 0] = img_ir[y:y + self.patchSize,
                                              x:x + self.patchSize]

        # Feed network
        x_representation, y_representation, Ws_x, Ws_y, Wx_x, Wy_y, siamese_features_x, siamese_features_y, asymmetric_features_x, asymmetric_features_y = self.hybridModel(
            [img_rgb_input, img_ir_input], training=False)

        # Brute Force Matcher solution, pick 2-norm distance minimum as a pair
        dis_list = []
        min_min_dis = float("inf")
        for current in range(Ns):

            min_dis = float("inf")
            min_idx = 0

            for candidate in range(Ns):

                dis = np.linalg.norm(x_representation[current, :] -
                                     y_representation[candidate, :])

                if dis < min_dis:
                    min_idx = candidate
                    min_dis = dis

            dis_list.append([current, min_idx, min_dis])

            if min_dis < min_min_dis:
                min_min_dis = min_dis

        dis_list = np.array(dis_list)
        sorted_idx = np.argsort(dis_list[:, 2])
        sorted_idx = sorted_idx[:int(good * len(sorted_idx))]
        matchesImage = np.hstack([img_rgb_raw, img_ir_raw])
        h, w = np.shape(img_rgb)

        for f in range(len(sorted_idx)):

            rgb_x = int(rgb_corners[int(dis_list[f, 0]), 0, 0])
            rgb_y = int(rgb_corners[int(dis_list[f, 0]), 0, 1])

            ir_x = int(ir_corners[int(dis_list[f, 1]), 0, 0]) + w
            ir_y = int(ir_corners[int(dis_list[f, 1]), 0, 1])

            R = np.random.randint(0, 255)
            G = np.random.randint(0, 255)
            B = np.random.randint(0, 255)

            cv2.line(matchesImage, (rgb_x, rgb_y), (ir_x, ir_y), (B, G, R), 1)

        matchesImage = cv2.cvtColor(matchesImage, cv2.COLOR_BGR2RGB)

        plt.figure()
        plt.imshow(matchesImage)
        plt.title("Siamese Matching")
        plt.show()

        # GFTT + ORB Feature matching
        gftt = cv2.GFTTDetector_create(Ns, 0.01, 3)
        rgb_kp = gftt.detect(img_rgb_gray, mask)
        ir_kp = gftt.detect(img_ir_gray, mask)

        orb = cv2.ORB_create()
        _, rgb_des = orb.compute(img_rgb_gray, rgb_kp)
        _, ir_des = orb.compute(img_ir_gray, ir_kp)

        bf = cv2.BFMatcher(cv2.NORM_HAMMING, False)

        # Match descriptors.
        matches = bf.match(rgb_des, ir_des)

        # Sort them in the order of their distance.
        matches = sorted(matches, key=lambda x: x.distance)

        # Draw first matches.
        img_rgb_raw = cv2.cvtColor(img_rgb_raw, cv2.COLOR_BGR2RGB)
        orb_img = cv2.drawMatches(img_rgb_raw, rgb_kp, img_ir_raw, ir_kp,
                                  matches[:len(sorted_idx)], None)

        plt.figure()
        plt.imshow(orb_img)
        plt.title("ORB Matching")
        plt.show()
Exemple #23
0
def detectFeatures(img, detector):
    ##Detects new features in the frame.
    ##Uses the Feature Detector selected.

    if detector == 'SIFT':
        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts, des = sift.detectAndCompute(img, None)

    elif detector == 'SURF':
        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts, des = surf.detectAndCompute(img, None)

    elif detector == 'ORB':
        orb = cv2.ORB_create(1000)
        feature_pts, des = orb.detectAndCompute(img, None)

    elif detector == 'BRISK':
        brisk = cv2.BRISK_create()
        feature_pts, des = brisk.detectAndCompute(img, None)

    elif detector == 'KAZE':
        kaze = cv2.KAZE_create()
        feature_pts, des = kaze.detectAndCompute(img, None)

    elif detector == 'AKAZE':
        akaze = cv2.AKAZE_create()
        feature_pts, des = akaze.detectAndCompute(img, None)

########################### SimpleBlobDetector as detector#############################################
    elif detector == 'SimpleBlobDetector_BRIEF':
        retval = cv2.SimpleBlobDetector_create()
        feature_pts = retval.detect(img, None)

        brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
        feature_pts, des = brief.compute(img, feature_pts)

    elif detector == 'SimpleBlobDetector_VGG':
        retval = cv2.SimpleBlobDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.VGG_create()
        des = retval.compute(img, feature_pts)

    elif detector == 'SimpleBlobDetector_BoostDesc':
        retval = cv2.SimpleBlobDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.BoostDesc_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'SimpleBlobDetector_LATCH':
        retval = cv2.SimpleBlobDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.LATCH_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'SimpleBlobDetector_DAISY':
        retval = cv2.SimpleBlobDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.DAISY_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'SimpleBlobDetector_FREAK':
        retval = cv2.SimpleBlobDetector_create()
        feature_pts = retval.detect(img, None)

        freakExtractor = cv2.xfeatures2d.FREAK_create()
        feature_pts, des = freakExtractor.compute(img, feature_pts)

    elif detector == 'SimpleBlobDetector_LUCID':
        retval = cv2.SimpleBlobDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.LUCID_create()
        feature_pts, des = retval.compute(img, feature_pts)

################################### FAST as detector##############################################
    elif detector == 'FAST_BRIEF':
        fast = cv2.FastFeatureDetector_create(threshold=50,
                                              nonmaxSuppression=True)
        feature_pts = fast.detect(img, None)

        brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
        feature_pts, des = brief.compute(img, feature_pts)

    elif detector == 'FAST_VGG':
        fast = cv2.FastFeatureDetector_create(threshold=50,
                                              nonmaxSuppression=True)
        feature_pts = fast.detect(img, None)

        retval = cv2.xfeatures2d.VGG_create()
        des = retval.compute(img, feature_pts)

    elif detector == 'FAST_BoostDesc':
        fast = cv2.FastFeatureDetector_create(threshold=50,
                                              nonmaxSuppression=True)
        feature_pts = fast.detect(img, None)

        retval = cv2.xfeatures2d.BoostDesc_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'FAST_LATCH':
        fast = cv2.FastFeatureDetector_create(threshold=50,
                                              nonmaxSuppression=True)
        feature_pts = fast.detect(img, None)

        retval = cv2.xfeatures2d.LATCH_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'FAST_DAISY':
        fast = cv2.FastFeatureDetector_create(threshold=50,
                                              nonmaxSuppression=True)
        feature_pts = fast.detect(img, None)

        retval = cv2.xfeatures2d.DAISY_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'FAST_FREAK':
        fast = cv2.FastFeatureDetector_create(threshold=50,
                                              nonmaxSuppression=True)
        feature_pts = fast.detect(img, None)

        freakExtractor = cv2.xfeatures2d.FREAK_create()
        feature_pts, des = freakExtractor.compute(img, feature_pts)

    elif detector == 'FAST_LUCID':
        fast = cv2.FastFeatureDetector_create(threshold=50,
                                              nonmaxSuppression=True)
        feature_pts = fast.detect(img, None)

        retval = cv2.xfeatures2d.LUCID_create()
        feature_pts, des = retval.compute(img, feature_pts)

#################################### GFTT as detector#############################################

    elif detector == 'GFTT_BRIEF':
        retval = cv2.GFTTDetector_create()
        feature_pts = retval.detect(img, None)

        brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
        feature_pts, des = brief.compute(img, feature_pts)

    elif detector == 'GFTT_VGG':
        retval = cv2.GFTTDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.VGG_create()
        des = retval.compute(img, feature_pts)

    elif detector == 'GFTT_BoostDesc':
        retval = cv2.GFTTDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.BoostDesc_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'GFTT_LATCH':
        retval = cv2.GFTTDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.LATCH_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'GFTT_DAISY':
        retval = cv2.GFTTDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.DAISY_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'GFTT_FREAK':
        retval = cv2.GFTTDetector_create()
        feature_pts = retval.detect(img, None)

        freakExtractor = cv2.xfeatures2d.FREAK_create()
        feature_pts, des = freakExtractor.compute(img, feature_pts)

    elif detector == 'GFTT_LUCID':
        retval = cv2.GFTTDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.LUCID_create()
        feature_pts, des = retval.compute(img, feature_pts)

###################################  AGAST as detector ################################################

    elif detector == 'AGAST_BRIEF':
        retval = cv2.AgastFeatureDetector_create()
        feature_pts = retval.detect(img, None)

        brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
        feature_pts, des = brief.compute(img, feature_pts)

    elif detector == 'AGAST_VGG':
        retval = cv2.AgastFeatureDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.VGG_create()
        des = retval.compute(img, feature_pts)

    elif detector == 'AGAST_BoostDesc':
        retval = cv2.AgastFeatureDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.BoostDesc_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'AGAST_LATCH':
        retval = cv2.AgastFeatureDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.LATCH_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'AGAST_DAISY':
        retval = cv2.AgastFeatureDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.DAISY_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'AGAST_FREAK':
        retval = cv2.AgastFeatureDetector_create()
        feature_pts = retval.detect(img, None)

        freakExtractor = cv2.xfeatures2d.FREAK_create()
        feature_pts, des = freakExtractor.compute(img, feature_pts)

    elif detector == 'AGAST_LUCID':
        retval = cv2.AgastFeatureDetector_create()
        feature_pts = retval.detect(img, None)

        retval = cv2.xfeatures2d.LUCID_create()
        feature_pts, des = retval.compute(img, feature_pts)

#################################  STAR  as detector ##################################################

    elif detector == 'STAR_BRIEF':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
        feature_pts, des = brief.compute(img, feature_pts)

    elif detector == 'STAR_VGG':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        retval = cv2.xfeatures2d.VGG_create()
        des = retval.compute(img, feature_pts)

    elif detector == 'STAR_BoostDesc':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        retval = cv2.xfeatures2d.BoostDesc_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'STAR_LATCH':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        retval = cv2.xfeatures2d.LATCH_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'STAR_DAISY':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        retval = cv2.xfeatures2d.DAISY_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'STAR_FREAK':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        freakExtractor = cv2.xfeatures2d.FREAK_create()
        feature_pts, des = freakExtractor.compute(img, feature_pts)

    elif detector == 'STAR_LUCID':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        retval = cv2.xfeatures2d.LUCID_create()
        feature_pts, des = retval.compute(img, feature_pts)

################################  MSER  as detector ################################################

    elif detector == 'MSER_BRIEF':
        mser = cv2.MSER_create()
        feature_pts = mser.detect(img, None)

        brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
        feature_pts, des = brief.compute(img, feature_pts)

    elif detector == 'MSER_VGG':
        mser = cv2.MSER_create()
        feature_pts = mser.detect(img, None)

        retval = cv2.xfeatures2d.VGG_create()
        des = retval.compute(img, feature_pts)

    elif detector == 'MSER_BoostDesc':
        mser = cv2.MSER_create()
        feature_pts = mser.detect(img, None)

        retval = cv2.xfeatures2d.BoostDesc_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'MSER_LATCH':
        mser = cv2.MSER_create()
        feature_pts = mser.detect(img, None)

        retval = cv2.xfeatures2d.LATCH_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'MSER_DAISY':
        mser = cv2.MSER_create()
        feature_pts = mser.detect(img, None)

        retval = cv2.xfeatures2d.DAISY_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'MSER_FREAK':
        mser = cv2.MSER_create()
        feature_pts = mser.detect(img, None)

        freakExtractor = cv2.xfeatures2d.FREAK_create()
        feature_pts, des = freakExtractor.compute(img, feature_pts)

    elif detector == 'MSER_LUCID':
        mser = cv2.MSER_create()
        feature_pts = mser.detect(img, None)

        retval = cv2.xfeatures2d.LUCID_create()
        feature_pts, des = retval.compute(img, feature_pts)

#####################################  SIFT  as detector #################################################

    elif detector == 'SIFT_BRIEF':
        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts = sift.detect(img, None)

        brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
        feature_pts, des = brief.compute(img, feature_pts)

    elif detector == 'SIFT_VGG':
        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts = sift.detect(img, None)

        retval = cv2.xfeatures2d.VGG_create()
        des = retval.compute(img, feature_pts)

    elif detector == 'SIFT_BoostDesc':
        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts = sift.detect(img, None)

        retval = cv2.xfeatures2d.BoostDesc_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'SIFT_LATCH':
        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts = sift.detect(img, None)

        retval = cv2.xfeatures2d.LATCH_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'SIFT_DAISY':
        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts = sift.detect(img, None)

        retval = cv2.xfeatures2d.DAISY_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'SIFT_FREAK':
        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts = sift.detect(img, None)

        freakExtractor = cv2.xfeatures2d.FREAK_create()
        feature_pts, des = freakExtractor.compute(img, feature_pts)

    elif detector == 'SIFT_LUCID':
        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts = sift.detect(img, None)

        retval = cv2.xfeatures2d.LUCID_create()
        feature_pts, des = retval.compute(img, feature_pts)

###################################  SURF  as detector ###################################################

    elif detector == 'SURF_BRIEF':
        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts = surf.detect(img, None)

        brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
        feature_pts, des = brief.compute(img, feature_pts)

    elif detector == 'SURF_VGG':
        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts = surf.detect(img, None)

        retval = cv2.xfeatures2d.VGG_create()
        des = retval.compute(img, feature_pts)

    elif detector == 'SURF_BoostDesc':
        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts = surf.detect(img, None)

        retval = cv2.xfeatures2d.BoostDesc_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'SURF_LATCH':
        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts = surf.detect(img, None)

        retval = cv2.xfeatures2d.LATCH_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'SURF_DAISY':
        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts = surf.detect(img, None)

        retval = cv2.xfeatures2d.DAISY_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'SURF_FREAK':
        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts = surf.detect(img, None)

        freakExtractor = cv2.xfeatures2d.FREAK_create()
        feature_pts, des = freakExtractor.compute(img, feature_pts)

    elif detector == 'SURF_LUCID':
        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts = surf.detect(img, None)

        retval = cv2.xfeatures2d.LUCID_create()
        feature_pts, des = retval.compute(img, feature_pts)

#####################################  ORB  as detector ###############################################

    elif detector == 'ORB_BRIEF':
        orb = cv2.ORB_create(1000)
        feature_pts = orb.detect(img, None)

        brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
        feature_pts, des = brief.compute(img, feature_pts)

    elif detector == 'ORB_VGG':
        orb = cv2.ORB_create(1000)
        feature_pts = orb.detect(img, None)

        retval = cv2.xfeatures2d.VGG_create()
        des = retval.compute(img, feature_pts)

    elif detector == 'ORB_BoostDesc':
        orb = cv2.ORB_create(1000)
        feature_pts = orb.detect(img, None)

        retval = cv2.xfeatures2d.BoostDesc_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'ORB_LATCH':
        orb = cv2.ORB_create(1000)
        feature_pts = orb.detect(img, None)

        retval = cv2.xfeatures2d.LATCH_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'ORB_DAISY':
        orb = cv2.ORB_create(1000)
        feature_pts = orb.detect(img, None)

        retval = cv2.xfeatures2d.DAISY_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'ORB_FREAK':
        orb = cv2.ORB_create(1000)
        feature_pts = orb.detect(img, None)

        freakExtractor = cv2.xfeatures2d.FREAK_create()
        feature_pts, des = freakExtractor.compute(img, feature_pts)

    elif detector == 'ORB_LUCID':
        orb = cv2.ORB_create(1000)
        feature_pts = orb.detect(img, None)

        retval = cv2.xfeatures2d.LUCID_create()
        feature_pts, des = retval.compute(img, feature_pts)

####################################  BRISK  as detector ################################################

    elif detector == 'BRISK_BRIEF':
        brisk = cv2.BRISK_create()
        feature_pts = brisk.detect(img, None)

        brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
        feature_pts, des = brief.compute(img, feature_pts)

    elif detector == 'BRISK_VGG':
        brisk = cv2.BRISK_create()
        feature_pts = brisk.detect(img, None)

        retval = cv2.xfeatures2d.VGG_create()
        des = retval.compute(img, feature_pts)

    elif detector == 'BRISK_BoostDesc':
        brisk = cv2.BRISK_create()
        feature_pts = brisk.detect(img, None)

        retval = cv2.xfeatures2d.BoostDesc_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'BRISK_LATCH':
        brisk = cv2.BRISK_create()
        feature_pts = brisk.detect(img, None)

        retval = cv2.xfeatures2d.LATCH_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'BRISK_DAISY':
        brisk = cv2.BRISK_create()
        feature_pts = brisk.detect(img, None)

        retval = cv2.xfeatures2d.DAISY_create()
        feature_pts, des = retval.compute(img, feature_pts)

    elif detector == 'BRISK_FREAK':
        brisk = cv2.BRISK_create()
        feature_pts = brisk.detect(img, None)

        freakExtractor = cv2.xfeatures2d.FREAK_create()
        feature_pts, des = freakExtractor.compute(img, feature_pts)

    elif detector == 'BRISK_LUCID':
        brisk = cv2.BRISK_create()
        feature_pts = brisk.detect(img, None)

        retval = cv2.xfeatures2d.LUCID_create()
        feature_pts, des = retval.compute(img, feature_pts)

#######################################  SIFT as Descriptor #######################################

    elif detector == 'SimpleBlobDetector_SIFT':
        retval = cv2.SimpleBlobDetector_create()
        feature_pts = retval.detect(img, None)

        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts, des = sift.compute(img, feature_pts)

    elif detector == 'FAST_SIFT':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts, des = sift.compute(img, feature_pts)

    elif detector == 'GFTT_SIFT':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts, des = sift.compute(img, feature_pts)

    elif detector == 'AGAST_SIFT':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts, des = sift.compute(img, feature_pts)

    elif detector == 'STAR_SIFT':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts, des = sift.compute(img, feature_pts)

    elif detector == 'MSER_SIFT':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        sift = cv2.xfeatures2d.SIFT_create()
        feature_pts, des = sift.compute(img, feature_pts)

###################################### SURF as Descriptor #############################################

    elif detector == 'SimpleBlobDetector_SURF':
        retval = cv2.SimpleBlobDetector_create()
        feature_pts = retval.detect(img, None)

        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts, des = surf.compute(img, feature_pts)

    elif detector == 'FAST_SURF':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts, des = surf.compute(img, feature_pts)

    elif detector == 'GFTT_SURF':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts, des = surf.compute(img, feature_pts)

    elif detector == 'AGAST_SURF':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts, des = surf.compute(img, feature_pts)

    elif detector == 'STAR_SURF':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts, des = surf.compute(img, feature_pts)

    elif detector == 'MSER_SURF':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        surf = cv2.xfeatures2d.SURF_create(1000)
        feature_pts, des = surf.compute(img, feature_pts)

####################################  ORB as Descriptor  #############################################

    elif detector == 'SimpleBlobDetector_ORB':
        retval = cv2.SimpleBlobDetector_create()
        feature_pts = retval.detect(img, None)

        orb = cv2.ORB_create(1000)
        feature_pts, des = orb.compute(img, feature_pts)

    elif detector == 'FAST_ORB':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        orb = cv2.ORB_create(1000)
        feature_pts, des = orb.compute(img, feature_pts)

    elif detector == 'GFTT_ORB':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        orb = cv2.ORB_create(1000)
        feature_pts, des = orb.compute(img, feature_pts)

    elif detector == 'AGAST_ORB':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        orb = cv2.ORB_create(1000)
        feature_pts, des = orb.compute(img, feature_pts)

    elif detector == 'STAR_ORB':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        orb = cv2.ORB_create(1000)
        feature_pts, des = orb.compute(img, feature_pts)

    elif detector == 'MSER_ORB':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        orb = cv2.ORB_create(1000)
        feature_pts, des = orb.compute(img, feature_pts)

################################### BRISK as Descriptor #############################################

    elif detector == 'SimpleBlobDetector_BRISK':
        retval = cv2.SimpleBlobDetector_create()
        feature_pts = retval.detect(img, None)

        brisk = cv2.BRISK_create()
        feature_pts, des = brisk.compute(img, feature_pts)

    elif detector == 'FAST_BRISK':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        brisk = cv2.BRISK_create()
        feature_pts, des = brisk.compute(img, feature_pts)

    elif detector == 'GFTT_BRISK':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        brisk = cv2.BRISK_create()
        feature_pts, des = brisk.compute(img, feature_pts)

    elif detector == 'AGAST_BRISK':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        brisk = cv2.BRISK_create()
        feature_pts, des = brisk.compute(img, feature_pts)

    elif detector == 'STAR_BRISK':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        brisk = cv2.BRISK_create()
        feature_pts, des = brisk.compute(img, feature_pts)

    elif detector == 'MSER_BRISK':
        star = cv2.xfeatures2d.StarDetector_create()
        feature_pts = star.detect(img, None)

        brisk = cv2.BRISK_create()
        feature_pts, des = brisk.compute(img, feature_pts)

    elif detector == 'Tomasi_ORB':
        orb = cv2.ORB_create()
        features = cv2.goodFeaturesToTrack(img,
                                           1000,
                                           qualityLevel=0.01,
                                           minDistance=7)

        keypoints = [
            cv2.KeyPoint(x=feature[0][0], y=feature[0][1], _size=20)
            for feature in features
        ]
        feature_pts, des = orb.compute(img, keypoints)

    # return features extracted

    feature_pts = np.array([x.pt for x in feature_pts], dtype=np.float32)

    return feature_pts
Exemple #24
0
# 0905.py
import cv2
import numpy as np

src = cv2.imread('./data/chessBoard.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)

#1
##goodF = cv2.GFTTDetector.create()
goodF = cv2.GFTTDetector_create()
kp = goodF.detect(gray)
print('len(kp)=', len(kp))
dst = cv2.drawKeypoints(gray, kp, None, color=(0, 0, 255))
cv2.imshow('dst', dst)

#2
goodF2 = cv2.GFTTDetector_create(maxCorners=50,
                                 qualityLevel=0.1,
                                 minDistance=10,
                                 useHarrisDetector=True)
kp2 = goodF2.detect(gray)
print('len(kp2)=', len(kp2))
dst2 = cv2.drawKeypoints(gray, kp2, None, color=(0, 0, 255))
cv2.imshow('dst2', dst2)
cv2.waitKey()
cv2.destroyAllWindows()
Exemple #25
0
def fastDetect(img1L,img2L,feature_detector = 0):
    """ 
    Feature detection
  
    Parameters: 
    img1L : Left image of first frame
    img2L : Left image of second frame
    feature detector  : 0 = FAST 1 = GFTT
  
    Returns: 
    matched features in left and right images
    """
    H,W = img1L.shape
    TILE_H = 10
    TILE_W = 20
    kp = []

    if(feature_detector == 0):
        featureEngine = cv2.FastFeatureDetector_create()
        
        for y in range(0, H, TILE_H):
            for x in range(0, W, TILE_W):
                imPatch = img1L[y:y+TILE_H, x:x+TILE_W]
                keypoints = featureEngine.detect(imPatch)
                for pt in keypoints:
                    pt.pt = (pt.pt[0] + x, pt.pt[1] + y)
                if (len(keypoints) > 10):
                    keypoints = sorted(keypoints, key=lambda x: -x.response)
                    for kpt in keypoints[0:10]:
                        kp.append(kpt)
                else:
                    for kpt in keypoints:
                        kp.append(kpt)

    if(feature_detector == 1):
        featureEngine = cv2.GFTTDetector_create(maxCorners=4000, minDistance=8.0, qualityLevel=0.001, useHarrisDetector=False)
        keypoints = featureEngine.detect(img1L)
        
        for kpt in keypoints:
            kp.append(kpt)


    features1 = cv2.KeyPoint_convert(kp)
    features1 = np.expand_dims(features1, axis=1)

    # Parameters for lucas kanade optical flow
    lk_params = dict( winSize  = (15,15),
                        maxLevel = 3,
                        criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 50, 0.03))

    features2, st, err = cv2.calcOpticalFlowPyrLK(img1L, img2L, features1, None, flags=cv2.MOTION_AFFINE, **lk_params)

    # separate points that were tracked successfully
    ptTrackable = np.where(st == 1, 1,0).astype(bool)
    features1_KLT = features1[ptTrackable, ...]
    features2_KLT = features2[ptTrackable, ...]
    features2_KLT = np.around(features2_KLT)

    # among tracked points take points within error measue
    error = 4
    errTrackablePoints = err[ptTrackable, ...]
    errThresholdedPoints = np.where(errTrackablePoints < error, 1, 0).astype(bool)
    features1_KLT = features1_KLT[errThresholdedPoints, ...]
    features2_KLT = features2_KLT[errThresholdedPoints, ...]


    # check for validity of tracked point Coordinates
    hPts = np.where(features2_KLT[:,1] >= H)
    wPts = np.where(features2_KLT[:,0] >= W)
    outTrackPts = hPts[0].tolist() + wPts[0].tolist()
    outDeletePts = list(set(outTrackPts))

    if len(outDeletePts) > 0:
        features1_KLT_L = np.delete(features1_KLT, outDeletePts, axis=0)
        features2_KLT_L = np.delete(features2_KLT, outDeletePts, axis=0)
    else:
        features1_KLT_L = features1_KLT
        features2_KLT_L = features2_KLT

    return features1_KLT_L,features2_KLT_L
"""
特征提取之关键点检测(GFTTDetector)
"""

import cv2 as cv

image = cv.imread("images/test4.jpg")
cv.imshow("input", image)

# 创建GFTT特征检测器
gftt = cv.GFTTDetector_create(100, 0.01, 1, 3, False, 0.04)
kp1 = gftt.detect(image, None)
for marker in kp1:
    result = cv.drawMarker(image,
                           tuple(int(i) for i in marker.pt),
                           color=(0, 255, 0))

cv.imshow("GFTT-Keypoint-Detect", result)
cv.waitKey(0)
cv.destroyAllWindows()
Exemple #27
0
LastEditTime : 2021-09-01 10:55:15
Description  : 特征提取之关键点检测 - GFTTDetector
    该方法是基于shi-tomas角点检测变化而来的一种特征提取方法,OpenCV创建该检测器的API与goodfeaturetotrack的API参数极其类似:
    Ptr<GFTTDetector> cv::GFTTDetector::create(
        int maxCorners = 1000,
        double qualityLevel = 0.01,
        double minDistance = 1,
        int blockSize = 3,
        bool useHarrisDetector = false,
        double k = 0.04 
    )
    唯一不同的,该方法返回一个指针。

    PS:
    需要注意的是该方法无法提取描述子,只支持提取关键点!
'''

import cv2 as cv

img = cv.imread("../data/images/test1.png")
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)

gftt = cv.GFTTDetector_create()
keypoints = gftt.detect(gray, None)

img = cv.drawKeypoints(img, keypoints, None)
cv.imshow("result", img)

cv.waitKey(0)
cv.destroyAllWindows()
class DetectorDescriptor:
    detectors = {
        'Agast': cv2.AgastFeatureDetector_create(),
        'AKAZE': cv2.AKAZE_create(),
        'BRISK': cv2.BRISK_create(),
        'Fast': cv2.FastFeatureDetector_create(),
        'GFTT': cv2.GFTTDetector_create(),
        'KAZE': cv2.KAZE_create(),
        'MSER': cv2.MSER_create(),
        'ORB': cv2.ORB_create()
    }
    xdetectors = {
        #        'Boost': xfeatures2d.BoostDesc_create(),
        'Harris': xfeatures2d.HarrisLaplaceFeatureDetector_create(),
        #        'PCT': xfeatures2d.PCTSignatures_create(),
        'Star': xfeatures2d.StarDetector_create()
    }

    descriptors = {
        'AKAZE': None,
        'BRISK': cv2.BRISK_create(),
        'KAZE': None,
        'ORB': cv2.ORB_create(),
    }
    xdescriptors = {
        #        'Boost': xfeatures2d.BoostDesc_create(),
        'BRIEF': xfeatures2d.BriefDescriptorExtractor_create(),
        'DAISY': xfeatures2d.DAISY_create(),
        'FREAK': xfeatures2d.FREAK_create(),
        'LATCH': xfeatures2d.LATCH_create(),
        #        'LUCID': xfeatures2d.LUCID_create(),
        'VGG': xfeatures2d.VGG_create()
    }

    def __init__(self, det_s, des_s=None):
        self._string_re = re.compile(r'<([^\W_]+)_?(\w+)?')

        try:
            self.det = self.detectors[det_s]
        except KeyError:
            try:
                self.det = self.xdetectors[det_s]
            except KeyError:
                raise ValueError("Unsupported detector")

        if des_s:
            try:
                self.desc = self.descriptors[des_s]
            except KeyError:
                try:
                    self.desc = self.xdescriptors[des_s]
                except KeyError:
                    raise ValueError("Unsupported descriptor")

            # AKAZE and KAZE special case
            if self.desc is None:
                self.desc = self._create_kaze_descriptor(des_s)
        else:
            self.desc = None

    def _create_kaze_descriptor(self, des_s):
        """AKAZE only allows AKAZE or KAZE detectors."""

        if isinstance(self.det, cv2.AKAZE) or isinstance(self.det, cv2.KAZE):
            if des_s == 'AKAZE':
                return cv2.AKAZE_create()
            else:
                return cv2.KAZE_create()
        else:
            return None

    def _stringify(self, obj):
        match = self._string_re.match(str(obj))
        if match.group(2):  # In the case of 'xfeatures2d_SIFT' etc.
            return match.group(2)
        else:
            return match.group(1)

    @property
    def detector_s(self):
        return self._stringify(self.det)

    @property
    def descriptor_s(self):
        return self._stringify(self.desc)

    def detect_and_compute(self, image):
        return self.det.detectAndCompute(image, None)

    def detect(self, image):
        try:
            keypoints = self.det.detect(image)
        except:
            return ([])
        else:
            return keypoints

    def compute(self, image, keypoints):
        try:
            (keypoints, descriptors) = self.desc.compute(image, keypoints)
        except:
            return ([], [])
        else:
            return (keypoints, descriptors)
Exemple #29
0
This shows to use different algorithms for detector and descriptor in two steps
instead of single step in detectAndCompute.
"""

import cv2
import matplotlib.pyplot as plt
import time

img1 = cv2.imread(r'imgs/1.png', cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread(r'imgs/6.png', cv2.IMREAD_GRAYSCALE)

start1 = time.time()

#kp_detector = cv2.AKAZE_create()
kp_detector = cv2.GFTTDetector_create()

descriptor_extractor = cv2.SIFT_create()
#descriptor_extractor = cv2.KAZE_create()

keypoints1 = kp_detector.detect(img1)
keypoints2 = kp_detector.detect(img2)

print(keypoints1)
print(keypoints2)

keypoints1, descriptors1 = descriptor_extractor.compute(img1, keypoints1)
keypoints2, descriptors2 = descriptor_extractor.compute(img2, keypoints2)

# print(img1_kp)
# print(img1_desc)
Exemple #30
0
            for j, f2 in enumerate(kp):
                if i == j:
                    continue
                if distance(f1, f2)<distE:
                    mask[j] = False
    np_kp = np.array(kp)
    return list(np_kp[mask])

src = cv2.imread('Chessboard.jpg')
gray= cv2.cvtColor(src,cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0.0)

fastF = cv2.FastFeatureDetector_create(threshold=30)
mserF = cv2.MSER_create(10)
blobF = cv2.SimpleBlobDetector_create()
goodF = cv2.GFTTDetector_create(maxCorners= 20,minDistance = 10)

kp= fastF.detect(gray)
print('len(kp)=', len(kp))

filtered_kp = filteringByDistance(kp, 10)
print('len(filtered_kp)=', len(filtered_kp))
dst = cv2.drawKeypoints(gray, filtered_kp, None, color=(0,0,255))   
cv2.imshow('dst',  dst)

briskF = cv2.BRISK_create()
filtered_kp, des = briskF.compute(gray, filtered_kp)
print('des.shape=', des.shape)
print('des=', des)

dst2 = cv2.drawKeypoints(gray, filtered_kp, None, color=(0,0,255))