def job():
    # initialize the keypoint detector, local invariant descriptor, and the
    # descriptor
    # pipeline
    detector = cv2.FeatureDetector_create("SURF")
    descriptor = RootSIFT()
    dad = DetectAndDescribe(detector, descriptor)

    # loop over the lines of input
    for line in Mapper.parse_input(sys.stdin):
        # parse the line into the image ID, path, and image
        (imageID, path, image) = Mapper.handle_input(line.strip())

        # describe the image and initialize the output list
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = imutils.resize(image, width=320)
        (kps, descs) = dad.describe(image)
        output = []

        # loop over the keypoints and descriptors
        for (kp, vec) in zip(kps, descs):
            # update the output list as a 2-tuple of the keypoint (x, y)-coordinates
            # and the feature vector
            output.append((kp.tolist(), vec.tolist()))

        # output the row to the reducer
        Mapper.output_row(imageID, path, output, sep="\t")
Esempio n. 2
0
fi = FeatureIndexer(args["features_db"], estNumImages=args["approx_images"],
  maxBufferSize=args["max_buffer_size"], verbose=True)

# grab the image paths and randomly shuffle them
imagePaths = list(paths.list_images(args["dataset"]))
random.shuffle(imagePaths)

# loop over the images in the dataset
for (i, imagePath) in enumerate(imagePaths):
# check to see if progress should be displayed
    if i > 0 and i % 10 == 0:
        fi._debug("processed {} images".format(i), msgType="[PROGRESS]")
    # load the image and pre-process it
    image = cv2.imread(imagePath)
    image = imutils.resize(image, width=320)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # describe the image
    (kps, descs) = dad.describe(image)
    # if either the keypoints or descriptors are None, then ignore the image
    if kps is None or descs is None:
        continue

    # extract the image filename and label from the path, then index the features
    (label, filename) = imagePath.split("/")[-2:]
    k = "{}:{}".format(label, filename)
    fi.add(k, image.shape, kps, descs)

# finish the indexing process
fi.finish()
Esempio n. 3
0
bovw = BagOfVisualWords(vocab)

# load the relevant queries dictionary and lookup the relevant results for the
# query image
relevant = json.loads(open(args["relevant"]).read())
queryFilename = args["query"][args["query"].rfind("/") + 1:]
queryRelevant = relevant[queryFilename]

# load the query image and process it
queryImage = cv2.imread(args["query"])
cv2.imshow("Query", imutils.resize(queryImage, width=320))
queryImage = imutils.resize(queryImage, width=320)
queryImage = cv2.cvtColor(queryImage, cv2.COLOR_BGR2GRAY)

# extract features from the query image and construct a bag-of-visual-words from it
(_, descs) = dad.describe(queryImage)
hist = bovw.describe(descs).tocoo()

# connect to redis and perform the search
redisDB = Redis(host="localhost", port=6379, db=0)
searcher = Searcher(redisDB,
                    args["bovw_db"],
                    args["features_db"],
                    idf=idf,
                    distanceMetric=distanceMetric)
sr = searcher.search(hist, numResults=20)
print("[INFO] search took: {:.2f}s".format(sr.search_time))

# initialize the results montage
montage = ResultsMontage((240, 320), 5, 20)
Esempio n. 4
0
bovw = BagOfVisualWords(vocab)

# load the relevant queries dictionary and lookup the relevant results for the
# query image
relevant = json.loads(open(args["relevant"]).read())
queryFilename = args["query"][args["query"].rfind("/") + 1:]
queryRelevant = relevant[queryFilename]

# load the query image and process it
queryImage = cv2.imread(args["query"])
cv2.imshow("Query", imutils.resize(queryImage, width=320))
queryImage = imutils.resize(queryImage, width=320)
queryImage = cv2.cvtColor(queryImage, cv2.COLOR_BGR2GRAY)

# extract features from the query image and construct a bag-of-visual-words from it
(queryKps, queryDescs) = dad.describe(queryImage)
queryHist = bovw.describe(queryDescs).tocoo()

# connect to redis and perform the search
redisDB = Redis(host="localhost", port=6379, db=0)
searcher = Searcher(redisDB, args["bovw_db"], args["features_db"], idf=idf,
	distanceMetric=distance.cosine)
sr = searcher.search(queryHist, numResults=20)
print("[INFO] search took: {:.2f}s".format(sr.search_time))

# spatially verified the results
spatialVerifier = SpatialVerifier(args["features_db"], idf, vocab)
sv = spatialVerifier.rerank(queryKps, queryDescs, sr, numResults=20)
print("[INFO] spatial verification took: {:.2f}s".format(sv.search_time))

# initialize the results montage
    for (patch_id, (x, y, window_gray, window_hsv)) in enumerate(
            sliding_window_double(img_gray,
                                  img_hsv,
                                  stepSize=patch_size,
                                  windowSize=(win_size, win_size))):
        # Find x and y position in the patch grid
        patch_x = patch_id % patch_width
        patch_y = patch_id // patch_width

        # Ensure patch size
        if flag_resize_patch:
            window_gray = imutils.resize(window_gray, width=100)
            window_hsv = imutils.resize(window_hsv, width=100)

        # Describe gray patch
        (kps, descs) = dad.describe(window_gray)
        if kps is None or descs is None:
            continue
        hist = bovw.describe(descs)
        hist /= hist.sum()
        hist = hist.toarray()

        # Get lbp descriptions
        hist4 = desc4.describe(window_gray)
        hist8 = desc8.describe(window_gray)

        hist4 /= hist4.sum()
        hist8 /= hist8.sum()

        hist4 = hist4.reshape(1, -1)
        hist8 = hist8.reshape(1, -1)
Esempio n. 6
0
print("[INFO] extracting features from testing data...")
trueLabels = []
predictedLabels = []

# loop over the image paths
for imagePath in imagePaths:
    # extract the true label from the image patn and update the true labels list
    trueLabels.append(imagePath.split("/")[-2])

    # load the image and prepare it from description
    image = cv2.imread(imagePath)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = imutils.resize(gray, width=min(320, image.shape[1]))

    # describe the image and classify it
    (kps, descs) = dad.describe(gray)
    hist = pbow.describe(gray.shape[1], gray.shape[0], kps, descs)
    prediction = model.predict(hist)[0]
    predictedLabels.append(prediction)

# show a classification report
print(classification_report(trueLabels, predictedLabels))

# loop over a sample of the testing images
for i in np.random.choice(np.arange(0, len(imagePaths)),
                          size=(20, ),
                          replace=False):
    # load the image and show the prediction
    image = cv2.imread(imagePaths[i])

    # show the prediction
Esempio n. 7
0
#**********2.0 process the input image**********
#-----------------------------------------------

#load the image
queryWatch = cv2.imread(str(queryWatchFile))
print(str(queryWatchFile))

#normalize the image (same as in index_features)
queryWatch = imutils.resize(queryWatch, width=IMAGE_WIDTH)
cv2.imshow("Queried Watch", queryWatch)
cv2.waitKey(3)
queryWatch = cv2.cvtColor(queryWatch, cv2.COLOR_BGR2GRAY)

#describe the image
(kps, descs) = dad.describe(queryWatch)
#classify the descriptors in an histogram
histQueryWatch = bovw.describe(descs)
#grab the label corresponding to this histogram
labelQueryWatch = clt.predict(histQueryWatch)

#*********3.0 show similar watches**********
#-------------------------------------------
#show all similar watches found in the catalog
#grab all image paths that are assigned to the query watch label
labelPathsIDs = np.where(labels == labelQueryWatch)

# loop over the image paths that belong to the current label
for ID in labelPathsIDs[0]:
    # load the image and display it
    imageFile = basePathWatches + str(featuresDB["image_ids"][ID])