# the best 16, then update the visualizations dictionary
			topResults = sorted(topResults, key=lambda r:r[0])[:16]
			vis[j] = topResults

	# update the progress bar
	pbar.update(i)

# close the features database
pbar.finish()
featuresDB.close()
print("[INFO] writing visualizations to file...")

# loop over the top results
for (vwID, results) in vis.items():
	# initialize the results montage
	montage = ResultsMontage((64, 64), 4, 16)

	# loop over the results
	for (_, (x, y), imageID) in results:
		# load the current image
		p = str(imageID)
		image = cv2.imread(p)
		image = imutils.resize(image, width=320)
		(h, w) = image.shape[:2]
		
		# extract a 8x8 region surrounding the keypoint
		(startX, endX) = (max(0, x - 16), min(w, x + 16))
		(startY, endY) = (max(0, y - 16), min(h, y + 16))
		roi = image[int(startY):int(endY), int(startX):int(endX)]
		
		# add the ROI to the montage
Example #2
0
# extract features from the query image and construct a bag-of-visual-words from it
(_, descs) = dad.describe(queryImage)
hist = bovw.describe(descs).tocoo()

# connect to redis and perform the search
redisDB = Redis(host="localhost", port=6379, db=0)
searcher = Searcher(redisDB,
                    args["bovw_db"],
                    args["features_db"],
                    idf=idf,
                    distanceMetric=distanceMetric)
sr = searcher.search(hist, numResults=20)
print("[INFO] search took: {:.2f}s".format(sr.search_time))

# initialize the results montage
montage = ResultsMontage((240, 320), 5, 20)

# loop over the individual results
for (i, (score, resultID, resultIdx)) in enumerate(sr.results):
    # load the result image and display it
    print("[RESULT] {result_num}. {result} - {score:.2f}".format(
        result_num=i + 1, result=resultID, score=score))
    result = cv2.imread("{}/{}".format(args["dataset"], resultID))
    montage.addResult(result,
                      text="#{}".format(i + 1),
                      highlight=resultID in queryRelevant)

# show the output image of results
cv2.imshow("Results", imutils.resize(montage.montage, height=700))
cv2.waitKey(0)
searcher.finish()
Example #3
0
print("[INFO] loading CALTECH Faces dataset...")
(training, testing, names) = load_caltech_faces(args["dataset"],
                                                min_faces=21,
                                                flatten=True,
                                                test_size=0.25)

# compute the PCA (eigenfaces) representation of the data, then project the training data
# onto the eigenfaces subspace
print("[INFO] creating eigenfaces...")
pca = RandomizedPCA(n_components=args["num_components"], whiten=True)
trainData = pca.fit_transform(training.data)

# check to see if the PCA components should be visualized
if args["visualize"] > 0:
    # initialize the montage for the components
    montage = ResultsMontage((62, 47), 4, 16)

    # loop over the first 16 individual components
    for (i, component) in enumerate(pca.components_[:16]):
        # reshape the component to a 2D matrix, then convert the data type to an unsigned
        # 8-bit integer so it can be displayed with OpenCV
        component = component.reshape((62, 47))
        component = exposure.rescale_intensity(component,
                                               out_range=(0,
                                                          255)).astype("uint8")
        component = np.dstack([component] * 3)
        montage.addResult(component)

    # show the mean and principal component visualizations
    # show the mean image
    mean = pca.mean_.reshape((62, 47))