def job():
    # initialize the keypoint detector, local invariant descriptor, and the
    # descriptor
    # pipeline
    detector = cv2.FeatureDetector_create("SURF")
    descriptor = RootSIFT()
    dad = DetectAndDescribe(detector, descriptor)

    # loop over the lines of input
    for line in Mapper.parse_input(sys.stdin):
        # parse the line into the image ID, path, and image
        (imageID, path, image) = Mapper.handle_input(line.strip())

        # describe the image and initialize the output list
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = imutils.resize(image, width=320)
        (kps, descs) = dad.describe(image)
        output = []

        # loop over the keypoints and descriptors
        for (kp, vec) in zip(kps, descs):
            # update the output list as a 2-tuple of the keypoint (x, y)-coordinates
            # and the feature vector
            output.append((kp.tolist(), vec.tolist()))

        # output the row to the reducer
        Mapper.output_row(imageID, path, output, sep="\t")
Beispiel #2
0
# USAGE
# python extract_rootsift.py --image jp_01.png

# import the necessary packages
from __future__ import print_function
from pyimagesearch.descriptors import RootSIFT
import argparse
import cv2

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())

# initialize the keypoint detector and local invariant descriptor
detector = cv2.FeatureDetector_create("SIFT")
extractor = RootSIFT()

# load the input image, convert it to grayscale, detect keypoints, and then
# extract local invariant descriptors
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kps = detector.detect(gray)
(kps, descs) = extractor.compute(gray, kps)

# show the shape of the keypoints and local invariant descriptors array
print("[INFO] # of keypoints detected: {}".format(len(kps)))
print("[INFO] feature vector shape: {}".format(descs.shape))
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
  help="Path to the directory that contains the images to be indexed")
ap.add_argument("-f", "--features-db", required=True,
  help="Path to where the features database will be stored")
ap.add_argument("-a", "--approx-images", type=int, default=250,
  help="Approximate # of images in the dataset")
ap.add_argument("-b", "--max-buffer-size", type=int, default=50000,
  help="Maximum buffer size for # of features to be stored in memory")
args = vars(ap.parse_args())

# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = cv2.FeatureDetector_create("GFTT")
descriptor = RootSIFT()
dad = DetectAndDescribe(detector, descriptor)

# initialize the feature indexer
fi = FeatureIndexer(args["features_db"], estNumImages=args["approx_images"],
  maxBufferSize=args["max_buffer_size"], verbose=True)

# grab the image paths and randomly shuffle them
imagePaths = list(paths.list_images(args["dataset"]))
random.shuffle(imagePaths)

# loop over the images in the dataset
for (i, imagePath) in enumerate(imagePaths):
# check to see if progress should be displayed
    if i > 0 and i % 10 == 0:
        fi._debug("processed {} images".format(i), msgType="[PROGRESS]")
Beispiel #4
0
                type=str,
                default="BruteForce",
                help="Feature matcher to use")
ap.add_argument(
    "-v",
    "--visualize-each",
    type=int,
    default=-1,
    help="Whether or not each match should be visualized individually")
args = vars(ap.parse_args())

detector = cv2.FeatureDetector_create(args["detector"])
extractor = cv2.DescriptorExtractor_create(args["matcher"])

if args["extractor"] == "RootSIFT":
    extractor = RootSIFT()

else:
    extractor = cv2.DescriptorExtractor_create(args["extractor"])

imageA = cv2.imread(arg["first"])
imageB = cv2.imread(arg["second"])
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)

kpsA = detector.detect(grayA)
kpsB = detector.detect(grayB)

(kpsA, featuresA) = extractor.compute(grayA, kpsA)
(kpsB, featuresB) = extractor.compute(grayB, kpsB)