def job():
    # initialize the keypoint detector, local invariant descriptor, and the
    # descriptor
    # pipeline
    detector = cv2.FeatureDetector_create("SURF")
    descriptor = RootSIFT()
    dad = DetectAndDescribe(detector, descriptor)

    # loop over the lines of input
    for line in Mapper.parse_input(sys.stdin):
        # parse the line into the image ID, path, and image
        (imageID, path, image) = Mapper.handle_input(line.strip())

        # describe the image and initialize the output list
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = imutils.resize(image, width=320)
        (kps, descs) = dad.describe(image)
        output = []

        # loop over the keypoints and descriptors
        for (kp, vec) in zip(kps, descs):
            # update the output list as a 2-tuple of the keypoint (x, y)-coordinates
            # and the feature vector
            output.append((kp.tolist(), vec.tolist()))

        # output the row to the reducer
        Mapper.output_row(imageID, path, output, sep="\t")
Esempio n. 2
0
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
  help="Path to the directory that contains the images to be indexed")
ap.add_argument("-f", "--features-db", required=True,
  help="Path to where the features database will be stored")
ap.add_argument("-a", "--approx-images", type=int, default=250,
  help="Approximate # of images in the dataset")
ap.add_argument("-b", "--max-buffer-size", type=int, default=50000,
  help="Maximum buffer size for # of features to be stored in memory")
args = vars(ap.parse_args())

# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = cv2.FeatureDetector_create("GFTT")
descriptor = RootSIFT()
dad = DetectAndDescribe(detector, descriptor)

# initialize the feature indexer
fi = FeatureIndexer(args["features_db"], estNumImages=args["approx_images"],
  maxBufferSize=args["max_buffer_size"], verbose=True)

# grab the image paths and randomly shuffle them
imagePaths = list(paths.list_images(args["dataset"]))
random.shuffle(imagePaths)

# loop over the images in the dataset
for (i, imagePath) in enumerate(imagePaths):
# check to see if progress should be displayed
    if i > 0 and i % 10 == 0:
        fi._debug("processed {} images".format(i), msgType="[PROGRESS]")
    # load the image and pre-process it
Esempio n. 3
0
ap.add_argument("-i",
                "--idf",
                type=str,
                help="Path to inverted document frequencies array")
ap.add_argument("-r",
                "--relevant",
                required=True,
                help="Path to relevant dictionary")
ap.add_argument("-q", "--query", required=True, help="Path to the query image")
args = vars(ap.parse_args())

# initialize the keypoint detector, local invariant descriptor, descriptor pipeline,
# distance metric, and inverted document frequency array
detector = cv2.FeatureDetector_create("SURF")
descriptor = RootSIFT()
dad = DetectAndDescribe(detector, descriptor)
distanceMetric = dists.chi2_distance
idf = None

# if the path to the inverted document frequency array was supplied, then load the
# idf array and update the distance metric
if args["idf"] is not None:
    idf = cPickle.loads(open(args["idf"]).read())
    distanceMetric = distance.cosine

# load the codebook vocabulary and initialize the bag-of-visual-words transformer
vocab = cPickle.loads(open(args["codebook"]).read())
bovw = BagOfVisualWords(vocab)

# load the relevant queries dictionary and lookup the relevant results for the
# query image
dx = list(range(kernel_size)) * kernel_size
dy = []
for i in range(kernel_size):
    for j in range(kernel_size):
        dy.append(i)

win_size = 100
patch_size = win_size // kernel_size
print(win_size)
print(patch_size)

# Initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = FeatureDetector_create("GFTT")
descriptor = DescriptorExtractor_create(args["extractor"])
dad = DetectAndDescribe(detector, descriptor)

# Load inverse document frequency file
idf = pickle.loads(open("model/idf.cpickle", "rb").read())

# Load the codebook vocabulary and initialize the bag-of-visual-words transformer
vocab = pickle.loads(open(args["codebook"], "rb").read())
bovw = BagOfVisualWords(vocab)

# Load the bovw classifier
model = pickle.loads(open(args["model"], "rb").read())

# Load hue and saturation data
db_hs = h5py.File('model/hs-db.hdf5', mode='r')
hue_set = db_hs['hue'][::]
sat_set = db_hs['sat'][::]
Esempio n. 5
0
bovwDB = h5py.File("bovw/watchesBovw.hdf5")
codebook = "codebook/vocab.cpickle"
data = []
IMAGE_WIDTH = 320
basePathWatches = "datasets\\"

#load the histogram DB
for (i, imageID) in enumerate(bovwDB["bovw"]):
    hist = bovwDB["bovw"][i]
    data.append(hist)
print("[INFO] histograms of dataset loaded...")

#init the detector and descriptor (same as index_features.py)
detector = cv2.xfeatures2d.SURF_create()
descriptor = RootSIFT()
dad = DetectAndDescribe(detector, descriptor)

print("[INFO] detector initialized...")

# load the codebook vocabulary and initialize the bag-of-visual-words transformer (same as extract_bovw)
vocab = cPickle.loads(open(codebook, "rb").read())
bovw = BagOfVisualWords(vocab)
print("[INFO] bovw created...")

#parse the params
if DEBUG == False:
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-i",
                    "--image",
                    required=True,