Exemple #1
0
# USAGE
# python extract_rootsift.py --image jp_01.png

# import the necessary packages
from __future__ import print_function
from pyimagesearch.descriptors import RootSIFT
import argparse
import cv2

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())

# initialize the keypoint detector and local invariant descriptor
detector = cv2.FeatureDetector_create("SIFT")
extractor = RootSIFT()

# load the input image, convert it to grayscale, detect keypoints, and then
# extract local invariant descriptors
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kps = detector.detect(gray)
(kps, descs) = extractor.compute(gray, kps)

# show the shape of the keypoints and local invariant descriptors array
print("[INFO] # of keypoints detected: {}".format(len(kps)))
print("[INFO] feature vector shape: {}".format(descs.shape))
Exemple #2
0
if args["extractor"] == "RootSIFT":
    extractor = RootSIFT()

else:
    extractor = cv2.DescriptorExtractor_create(args["extractor"])

imageA = cv2.imread(arg["first"])
imageB = cv2.imread(arg["second"])
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)

kpsA = detector.detect(grayA)
kpsB = detector.detect(grayB)

(kpsA, featuresA) = extractor.compute(grayA, kpsA)
(kpsB, featuresB) = extractor.compute(grayB, kpsB)

# match the keypoints using the Euclidean distance and initialize
# the list of actual matches
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []

# loop over the raw matches
for m in rawMatches:
    # ensure the distance passes David Lowe's ratio test
    if len(m) == 2 and m[0].distance < m[1].distance * 0.8:
        matches.append((m[0].trainIdx, m[0].queryIdx))

# show some diagnostic information
print("# of keypoints from first image: {}".format(len(kpsA)))