コード例 #1
0
def job():
    # initialize the keypoint detector, local invariant descriptor, and the descriptor pipeline
    detector = FeatureDetector_create("SURF")
    descriptor = DescriptorExtractor_create("RootSIFT")
    dad = DetectAndDescribe(detector, descriptor)

    # loop over the lines of input
    for line in Mapper.parse_input(sys.stdin):
        # parse the line into the image ID, path, and image
        imageID, path, image = Mapper.handle_input(line.strip())

        # describe the image and initialize the output list
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = imutils.resize(image, width=320)
        (kps, descs) = dad.describe(image)
        output = []

        # loop over the keypoints and descriptors
        for kp, vec in zip(kps, descs):
            # update the output list as a 2-tuple of the keypoint (x,y)-coordinates and the feature vector
            output.append((kp.tolist(), vec.tolist()))

        # output the row to the reducer
        Mapper.output_row(imageID, path, output, sep="\t")
コード例 #2
0
                default=500,
                help="Approximate # of images in the dataset")
ap.add_argument(
    "-b",
    "--max-buffer-size",
    type=int,
    default=50000,
    help="Maximum buffer size for # of features to be stored in memory")
ap.add_argument("-z", "--feature-detector", default="GFTT")
ap.add_argument("-e", "--extractor", default="BRISK")
args = vars(ap.parse_args())

# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = FeatureDetector_create(args["feature_detector"])
descriptor = DescriptorExtractor_create(args["extractor"])
dad = DetectAndDescribe(detector, descriptor)

print("Using {} feature detector".format(args["feature_detector"]))
print("Using {} descriptor extractor".format(args["extractor"]))

# initialize the feature indexer, then grab the image paths and randomly shuffle
# them
fi = FeatureIndexer(args["features_db"],
                    estNumImages=args["approx_images"],
                    maxBufferSize=args["max_buffer_size"],
                    verbose=True)
imagePaths = list(paths.list_images(args["dataset"]))
random.shuffle(imagePaths)

# loop over the images in the dataset
コード例 #3
0
                help="Path to the bag-of-visual-words database")
ap.add_argument("-c", "--codebook", required=True, help="Path to the codebook")
ap.add_argument("-i",
                "--idf",
                type=str,
                help="Path to inverted document frequencies array")
ap.add_argument("-r",
                "--relevant",
                required=True,
                help="Path to relevant dictionary")
args = vars(ap.parse_args())

# Initialize the keypoints detector, local invariant descriptor, descriptor pipeline,
# distance metric, and inverted document frequency array
detector = FeatureDetector_create("SURF")
descriptor = DescriptorExtractor_create("RootSIFT")
dad = DetectAndDescribe(detector, descriptor)
distanceMetric = dists.chi2_distance
idf = None

# If the path to the inverted document frequency array was supplied, then load the
# idf array and update the distance metric
if args["idf"] is not None:
    idf = pickle.loads(open(args["idf"], "rb").read())
    distanceMetric = distance.cosine

# Load the codebook vocabulary and initialize the bag-of-visual-words transformer
vocab = pickle.loads(open(args["codebook"], "rb").read())
bovw = BagOfVisualWords(vocab)

# Connect to redis and initialize the searcher
コード例 #4
0
    default=50000,
    help="Maximum buffer size for # of features to be stored in memory")
args = vars(ap.parse_args())

##################
# CREATE KEYPOINT FEATURES
##################
print('\n\n STARTING FEATURE EXTRACTION \n\n')

# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = FeatureDetector_create('GFTT')

# RootSIFT was originally used in results presented in blog/README
# ORB was substituted since RootSIFT is not currently shipped with opencv-contrib-python
descriptor = DescriptorExtractor_create('ORB')
dad = DetectAndDescribe(detector, descriptor)

# initialize the feature indexer, then grab the image paths and sort
fi = FeatureIndexer('{}/features.hdf5'.format(args["output"]),
                    estNumImages=args["approx_images"],
                    maxBufferSize=args["max_buffer_size"],
                    verbose=True)
imagePaths = list(paths.list_images(args["dataset"]))
imagePaths.sort()

# loop over the images in the dataset
for (i, imagePath) in enumerate(imagePaths):
    # check to see if progress should be displayed
    if i > 0 and i % 50 == 0:
        fi._debug("processed {} images".format(i), msgType="[PROGRESS]")
コード例 #5
0
# author:    Adrian Rosebrock
# website:   http://www.pyimagesearch.com

# USAGE
# python feature_demo.py

# import the necessary packages
from imutils.feature import DescriptorExtractor_create
from imutils.feature import FeatureDetector_create
from imutils.feature import corners_to_keypoints

# ensure the keypoint detection and local invariant descriptors are
# working properly
detector = FeatureDetector_create("SIFT")
extractor = DescriptorExtractor_create("SIFT")
print(detector)
print(extractor)
print(corners_to_keypoints)
コード例 #6
0
    "-b",
    "--max-buffer-size",
    type=int,
    default=50000,
    help="Maximum buffer size for # of features to be stored in memory")
args = vars(ap.parse_args())

##################
# CREATE KEYPOINT FEATURES
##################
print('\n\n STARTING FEATURE EXTRACTION \n\n')

# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = FeatureDetector_create('GFTT')
descriptor = DescriptorExtractor_create('RootSIFT')
dad = DetectAndDescribe(detector, descriptor)

# initialize the feature indexer, then grab the image paths and sort
fi = FeatureIndexer('{}/features.hdf5'.format(args["output"]),
                    estNumImages=args["approx_images"],
                    maxBufferSize=args["max_buffer_size"],
                    verbose=True)
imagePaths = list(paths.list_images(args["dataset"]))
imagePaths.sort()

# loop over the images in the dataset
for (i, imagePath) in enumerate(imagePaths):
    # check to see if progress should be displayed
    if i > 0 and i % 50 == 0:
        fi._debug("processed {} images".format(i), msgType="[PROGRESS]")
コード例 #7
0
ap.add_argument("-a",
                "--approx-images",
                type=int,
                default=500,
                help="Approximate # of images in the dataset")
ap.add_argument(
    "-b",
    "--max-buffer-size",
    type=int,
    default=50000,
    help="Maximum buffer size for # of features to be stored in memory")
args = vars(ap.parse_args())

# Initialize the keypoint detector, local invariant descriptor, and the descriptor pipeline
detector = FeatureDetector_create("BRISK")
descriptor = DescriptorExtractor_create("BRISK")
dad = DetectAndDescribe(detector, descriptor)

# Initialize the feature indexer
fi = FeatureIndexer(args["features_db"],
                    estNumImages=args["approx_images"],
                    maxBufferSize=args["max_buffer_size"],
                    verbose=True)
# Loop over the images in the dataset
for (i, imagePath) in enumerate(sorted(paths.list_images(args["dataset"]))):
    # Check to see if progress should be displayed
    if i > 0 and i % 10 == 0:
        fi._debug("processed {} images".format(i), msgType="[PROGRESS]")

    # Extract the image filename (i.e. the unique image ID) from the image path, then load the image itself
    filename = imagePath[imagePath.rfind("\\") + 1:]