Example #1
0
 def get_features(self, img):
     surf = cv.SURF(self.hessian_th, self.octaves, self.layers, True)
     keypoints = cv.vector_KeyPoint()
     features = surf(img, cv.Mat(), keypoints)
     return keypoints, np.array(features)
 def get_features(self, img):
     surf = cv.SURF(self.hessian_th, self.octaves, self.layers, True)  
     keypoints = cv.vector_KeyPoint()
     features = surf(img, cv.Mat(), keypoints)  
     return keypoints, np.array(features)
Example #3
0
    cv.cvtColor(object_color, object, cv.CV_BGR2GRAY)

    # corners
    src_corners = [
        cv.Point(0, 0),
        cv.Point(object.cols, 0),
        cv.Point(object.cols, object.rows),
        cv.Point(0, object.rows)
    ]
    dst_corners = [cv.Point()] * 4

    # find keypoints on both images
    surf = cv.SURF(500, 4, 2, True)
    mask = cv.Mat()
    tt = float(cv.getTickCount())
    objectKeypoints = cv.vector_KeyPoint()
    objectDescriptors = surf(object, mask, objectKeypoints)
    print("Object Descriptors: %d\n" % len(objectKeypoints))
    imageKeypoints = cv.vector_KeyPoint()
    imageDescriptors = surf(image, mask, imageKeypoints)
    print("Image Descriptors: %d\n" % len(imageKeypoints))
    tt = float(cv.getTickCount()) - tt
    print("Extraction time = %gms\n" % (tt / (cv.getTickFrequency() * 1000.)))

    # create a correspond Mat
    correspond = cv.Mat(image.rows + object.rows, image.cols, cv.CV_8UC1,
                        cv.Scalar(0))

    # copy the images to correspond -- numpy way
    correspond[:object.rows, :object.cols] = object[:]
    correspond[object.rows:, :image.cols] = image[:]
Example #4
0
        print("Can not load %s and/or %s\n" \
            "Usage: find_obj [<object_filename> <scene_filename>]\n" \
            % (object_filename, scene_filename))
        exit(-1)
    object = cv.Mat(object_color.size(), cv.CV_8UC1)
    cv.cvtColor( object_color, object, cv.CV_BGR2GRAY )
    
    # corners
    src_corners = [cv.Point(0,0), cv.Point(object.cols, 0), cv.Point(object.cols, object.rows), cv.Point(0, object.rows)]
    dst_corners = [cv.Point()]*4

    # find keypoints on both images
    surf = cv.SURF(500, 4, 2, True)
    mask = cv.Mat()
    tt = float(cv.getTickCount())    
    objectKeypoints = cv.vector_KeyPoint()
    objectDescriptors = surf(object, mask, objectKeypoints)
    print("Object Descriptors: %d\n" % len(objectKeypoints))
    imageKeypoints = cv.vector_KeyPoint()
    imageDescriptors = surf(image, mask, imageKeypoints)
    print("Image Descriptors: %d\n" % len(imageKeypoints))
    tt = float(cv.getTickCount()) - tt
    print("Extraction time = %gms\n" % (tt/(cv.getTickFrequency()*1000.)))
    
    # create a correspond Mat
    correspond = cv.Mat(image.rows+object.rows, image.cols, cv.CV_8UC1, cv.Scalar(0))
    
    # copy the images to correspond -- numpy way
    correspond[:object.rows, :object.cols] = object[:]
    correspond[object.rows:, :image.cols] = image[:]