# randomly generate the vocabulary/cluster centers along with the feature
# vectors -- we'll generate 10 feature vectos containing 6 real-valued
# entries, along with a codebook containing 3 'visual words'
np.random.seed(42)
vocab = np.random.uniform(size=(3, 6))
features = np.random.uniform(size=(10, 6))
print("[INFO] vocabulary:\n{}\n".format(vocab))
print("[INFO] features:\n{}\n".format(features))

# initialize our bag of visual words histogram -- it will contain 3 entries,
# one for each of the possible visual words
hist = np.zeros((3,), dtype="int32")

# loop over the inidividual feature vectors
for (i, f) in enumerate(features):
	# compute the Euclidean distance between the current feature vector
	# and the 3 visual words; then, find the index of the visual word
	# with the smallest distance
	D = pairwise.euclidean_distances(f.reshape(1, -1), Y=vocab)
	j = np.argmin(D)

	print("[INFO] Closest visual word to feature #{}: {}".format(i, j))
	hist[j] += 1
	print("[INFO] Updated histogram: {}".format(hist))

# this apply our `BagOfVisualWords` class and make this process super
# speedy
bovw = BagOfVisualWords(vocab, sparse=False)
hist = bovw.describe(features)
print("[INFO] BOVW histogram: {}".format(hist))
Пример #2
0
# distance metric, and inverted document frequency array
detector = cv2.FeatureDetector_create("SURF")
descriptor = RootSIFT()
dad = DetectAndDescribe(detector, descriptor)
distanceMetric = dists.chi2_distance
idf = None

# if the path to the inverted document frequency array was supplied, then load the
# idf array and update the distance metric
if args["idf"] is not None:
    idf = cPickle.loads(open(args["idf"]).read())
    distanceMetric = distance.cosine

# load the codebook vocabulary and initialize the bag-of-visual-words transformer
vocab = cPickle.loads(open(args["codebook"]).read())
bovw = BagOfVisualWords(vocab)

# load the relevant queries dictionary and lookup the relevant results for the
# query image
relevant = json.loads(open(args["relevant"]).read())
queryFilename = args["query"][args["query"].rfind("/") + 1:]
queryRelevant = relevant[queryFilename]

# load the query image and process it
queryImage = cv2.imread(args["query"])
cv2.imshow("Query", imutils.resize(queryImage, width=320))
queryImage = imutils.resize(queryImage, width=320)
queryImage = cv2.cvtColor(queryImage, cv2.COLOR_BGR2GRAY)

# extract features from the query image and construct a bag-of-visual-words from it
(_, descs) = dad.describe(queryImage)
Пример #3
0
ap.add_argument("-c", "--codebook", required=True, help="Path to the codebook")
ap.add_argument("-i", "--idf", required=True, help="Path to inverted document frequencies array")
ap.add_argument("-r", "--relevant", required=True, help = "Path to relevant dictionary")
ap.add_argument("-q", "--query", required=True, help="Path to the query image")
args = vars(ap.parse_args())

# initialize the keypoint detector, local invariant descriptor, and descriptor
detector = cv2.FeatureDetector_create("SURF")
descriptor = RootSIFT()
dad = DetectAndDescribe(detector, descriptor)

# load the inverted document frequency array and codebook vocabulary, then
# initialize the bag-of-visual-words transformer
idf = cPickle.loads(open(args["idf"]).read())
vocab = cPickle.loads(open(args["codebook"]).read())
bovw = BagOfVisualWords(vocab)

# load the relevant queries dictionary and lookup the relevant results for the
# query image
relevant = json.loads(open(args["relevant"]).read())
queryFilename = args["query"][args["query"].rfind("/") + 1:]
queryRelevant = relevant[queryFilename]

# load the query image and process it
queryImage = cv2.imread(args["query"])
cv2.imshow("Query", imutils.resize(queryImage, width=320))
queryImage = imutils.resize(queryImage, width=320)
queryImage = cv2.cvtColor(queryImage, cv2.COLOR_BGR2GRAY)

# extract features from the query image and construct a bag-of-visual-words from it
(queryKps, queryDescs) = dad.describe(queryImage)
Пример #4
0
ap.add_argument(
    "-b",
    "--bovw-db",
    required=True,
    help="Path to where the bag-of-visual-words database will be stored")
ap.add_argument(
    "-s",
    "--max-buffer-size",
    type=int,
    default=500,
    help="Maximum buffer size for # of features to be stored in memory")
args = vars(ap.parse_args())

# load the codebook vocabulary and initialize the bag-of-visual-words transformer
vocab = cPickle.loads(open(args["codebook"]).read())
bovw = BagOfVisualWords(vocab)

# open the features database and initialize the bag-of-visual-words indexer
featuresDB = h5py.File(args["features_db"], mode="r")
bi = BOVWIndexer(bovw.codebook.shape[0],
                 args["bovw_db"],
                 estNumImages=featuresDB["image_ids"].shape[0],
                 maxBufferSize=args["max_buffer_size"])

# loop over the image IDs and index
for (i,
     (imageID,
      offset)) in enumerate(zip(featuresDB["image_ids"], featuresDB["index"])):
    # check to see if progress should be displayed
    if i > 0 and i % 10 == 0:
        bi._debug("processed {} images".format(i), msgType="[PROGRESS]")
Пример #5
0
                type=int,
                default=2,
                help="# of pyramid levels to generate")
ap.add_argument("-m", "--model", required=True, help="Path to the classifier")
args = vars(ap.parse_args())

# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = cv2.FeatureDetector_create("GFTT")
descriptor = RootSIFT()
dad = DetectAndDescribe(detector, descriptor)

# load the codebook vocabulary and initialize the bag-of-visual-words transformer
# and the pyramid of bag-of-visual-words descriptor
vocab = cPickle.loads(open(args["codebook"]).read())
bovw = BagOfVisualWords(vocab)
pbow = PBOW(bovw, numLevels=args["levels"])

# load the classifier and grab the list of image paths
model = cPickle.loads(open(args["model"]).read())
imagePaths = list(paths.list_images(args["images"]))

# initialize the list of true labels and predicted labels
print("[INFO] extracting features from testing data...")
trueLabels = []
predictedLabels = []

# loop over the image paths
for imagePath in imagePaths:
    # extract the true label from the image patn and update the true labels list
    trueLabels.append(imagePath.split("/")[-2])
Пример #6
0
#load the histogram DB
for (i, imageID) in enumerate(bovwDB["bovw"]):
    hist = bovwDB["bovw"][i]
    data.append(hist)
print("[INFO] histograms of dataset loaded...")

#init the detector and descriptor (same as index_features.py)
detector = cv2.xfeatures2d.SURF_create()
descriptor = RootSIFT()
dad = DetectAndDescribe(detector, descriptor)

print("[INFO] detector initialized...")

# load the codebook vocabulary and initialize the bag-of-visual-words transformer (same as extract_bovw)
vocab = cPickle.loads(open(codebook, "rb").read())
bovw = BagOfVisualWords(vocab)
print("[INFO] bovw created...")

#parse the params
if DEBUG == False:
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-i",
                    "--image",
                    required=True,
                    help="Path to image to find")
    ap.add_argument("-b",
                    "--nb-bins",
                    required=True,
                    type=int,
                    help="Number of categories of watches")