Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--image", required=True, help="The image to show the keypoint for")
    parser.add_argument("-k", "--key-point_detector", default="FAST", help="The keypoint detector to use")
    parser.add_argument("-o", "--output", help="The output file to write the image to")
    args = vars(parser.parse_args())

    image = cv2.imread(args["image"])
    orig = image.copy()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    detector = FeatureDetector_create(args["key_point_detector"])

    kps = detector.detect(gray)

    print("# of keypoints: {}".format(len(kps)))

    for kp in kps:
        r = int(0.5 * kp.size)
        (x, y) = np.int0(kp.pt)
        cv2.circle(image, (x, y), r, (0, 255, 255), 2)

    combined_image = np.hstack([orig, image])
    cv2.imshow("Images", combined_image)
    cv2.waitKey(0)

    output_filename = args.get("output", None)
    if output_filename:
        cv2.imwrite(output_filename, combined_image)
Ejemplo n.º 2
0
def job():
    # initialize the keypoint detector, local invariant descriptor, and the descriptor pipeline
    detector = FeatureDetector_create("SURF")
    descriptor = DescriptorExtractor_create("RootSIFT")
    dad = DetectAndDescribe(detector, descriptor)

    # loop over the lines of input
    for line in Mapper.parse_input(sys.stdin):
        # parse the line into the image ID, path, and image
        imageID, path, image = Mapper.handle_input(line.strip())

        # describe the image and initialize the output list
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = imutils.resize(image, width=320)
        (kps, descs) = dad.describe(image)
        output = []

        # loop over the keypoints and descriptors
        for kp, vec in zip(kps, descs):
            # update the output list as a 2-tuple of the keypoint (x,y)-coordinates and the feature vector
            output.append((kp.tolist(), vec.tolist()))

        # output the row to the reducer
        Mapper.output_row(imageID, path, output, sep="\t")
Ejemplo n.º 3
0
                type=int,
                default=500,
                help="Approximate # of images in the dataset")
ap.add_argument(
    "-b",
    "--max-buffer-size",
    type=int,
    default=50000,
    help="Maximum buffer size for # of features to be stored in memory")
ap.add_argument("-z", "--feature-detector", default="GFTT")
ap.add_argument("-e", "--extractor", default="BRISK")
args = vars(ap.parse_args())

# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = FeatureDetector_create(args["feature_detector"])
descriptor = DescriptorExtractor_create(args["extractor"])
dad = DetectAndDescribe(detector, descriptor)

print("Using {} feature detector".format(args["feature_detector"]))
print("Using {} descriptor extractor".format(args["extractor"]))

# initialize the feature indexer, then grab the image paths and randomly shuffle
# them
fi = FeatureIndexer(args["features_db"],
                    estNumImages=args["approx_images"],
                    maxBufferSize=args["max_buffer_size"],
                    verbose=True)
imagePaths = list(paths.list_images(args["dataset"]))
random.shuffle(imagePaths)
                required=True,
                help="Path to the bag-of-visual-words database")
ap.add_argument("-c", "--codebook", required=True, help="Path to the codebook")
ap.add_argument("-i",
                "--idf",
                type=str,
                help="Path to inverted document frequencies array")
ap.add_argument("-r",
                "--relevant",
                required=True,
                help="Path to relevant dictionary")
args = vars(ap.parse_args())

# Initialize the keypoints detector, local invariant descriptor, descriptor pipeline,
# distance metric, and inverted document frequency array
detector = FeatureDetector_create("SURF")
descriptor = DescriptorExtractor_create("RootSIFT")
dad = DetectAndDescribe(detector, descriptor)
distanceMetric = dists.chi2_distance
idf = None

# If the path to the inverted document frequency array was supplied, then load the
# idf array and update the distance metric
if args["idf"] is not None:
    idf = pickle.loads(open(args["idf"], "rb").read())
    distanceMetric = distance.cosine

# Load the codebook vocabulary and initialize the bag-of-visual-words transformer
vocab = pickle.loads(open(args["codebook"], "rb").read())
bovw = BagOfVisualWords(vocab)
Ejemplo n.º 5
0
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i",
                "--images",
                required=True,
                help="Path to input images directory")
ap.add_argument("-c", "--codebook", required=True, help="Path to the codebook")
ap.add_argument("-m", "--model", required=True, help="Path to the classifier")

# sys.argv[1:] = '-i test_images -c output/vocab.cpickle -m output/bovw.hdf5 -m output/model.cpickle'.split()
args = vars(ap.parse_args())

# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = FeatureDetector_create("GFTT")
descriptor = DescriptorExtractor_create("RootSIFT")
dad = DetectAndDescribe(detector, descriptor)

# load the codebook vocabulary and initialize the bag-of-visual-words transformer
vocab = pickle.loads(open(args["codebook"], "rb").read())
bovw = BagOfVisualWords(vocab)

# load the classifier
model = pickle.loads(open(args["model"], "rb").read())

# loop over the image paths
for imagePath in paths.list_images(args["images"]):
    # load the image and prepare it from description
    image = cv2.imread(imagePath)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
Ejemplo n.º 6
0
ap.add_argument(
    "-b",
    "--max-buffer-size",
    type=int,
    default=50000,
    help="Maximum buffer size for # of features to be stored in memory")
args = vars(ap.parse_args())

##################
# CREATE KEYPOINT FEATURES
##################
print('\n\n STARTING FEATURE EXTRACTION \n\n')

# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = FeatureDetector_create('GFTT')

# RootSIFT was originally used in results presented in blog/README
# ORB was substituted since RootSIFT is not currently shipped with opencv-contrib-python
descriptor = DescriptorExtractor_create('ORB')
dad = DetectAndDescribe(detector, descriptor)

# initialize the feature indexer, then grab the image paths and sort
fi = FeatureIndexer('{}/features.hdf5'.format(args["output"]),
                    estNumImages=args["approx_images"],
                    maxBufferSize=args["max_buffer_size"],
                    verbose=True)
imagePaths = list(paths.list_images(args["dataset"]))
imagePaths.sort()

# loop over the images in the dataset
db = h5py.File(OUTPUT_FILE, mode="r")

list(db.keys())

print(db["image_ids"].shape)
print(db["image_ids"])
print(db["image_ids"].value)

imageid_db_lis = db["image_ids"]
#print(db["image_ids"][8])
print(db["index"].shape)

print(db["features"].shape)

detector = FeatureDetector_create("ORB")
matcher = DescriptorMatcher_create("BruteForce")

train_data = pd.read_csv("train_file_with_dbindex.csv", index_col=0)

df = train_data.groupby('landmark_id')['id'].nunique().sort_values(
    ascending=False).reset_index(name='count')

train_data['nearID'] = None
train_data['nearValue'] = None
train_data['farId'] = None
train_data['farValue'] = None

df2 = pd.DataFrame(np.array(df), index=df.index)

df2.columns = ['landmark_id', 'count']
Ejemplo n.º 8
0
# author:    Adrian Rosebrock
# website:   http://www.pyimagesearch.com

# USAGE
# python feature_demo.py

# import the necessary packages
from imutils.feature import DescriptorExtractor_create
from imutils.feature import FeatureDetector_create
from imutils.feature import corners_to_keypoints

# ensure the keypoint detection and local invariant descriptors are
# working properly
detector = FeatureDetector_create("SIFT")
extractor = DescriptorExtractor_create("SIFT")
print(detector)
print(extractor)
print(corners_to_keypoints)
                help="Path to the template file.")
ap.add_argument("-m",
                "--mask",
                required=True,
                help="Path to the output mask folder.")
ap.add_argument("-i", "--image", required=True, help="Path to the image file.")
args = vars(ap.parse_args())

# Extract template file name
(_, template_name) = os.path.split(args['template'])

# Mask file
mask_file = os.path.join(args['mask'], template_name)

# Initialize the keypoint detector, local invariant descriptor and descriptor
detector = FeatureDetector_create('SIFT')
descriptor = DescriptorExtractor_create('RootSIFT')
dad = DetectAndDescribe(detector, descriptor)
im = ImageMatcher(dad, glob.glob('templates' + "/*.jpg"))

# Read image
image = cv2.imread(args['image'])
image = imutils.resize(image, width=800)

# Read template
template = cv2.imread(args['template'])

# Read template's mask
mask = cv2.imread(mask_file, cv2.IMREAD_GRAYSCALE)
(T, mask) = cv2.threshold(mask, 254, 255, cv2.THRESH_BINARY)
Ejemplo n.º 10
0
                help="Path to where the features database will be stored")
ap.add_argument("-a",
                "--approx-images",
                type=int,
                default=500,
                help="Approximate # of images in the dataset")
ap.add_argument(
    "-b",
    "--max-buffer-size",
    type=int,
    default=50000,
    help="Maximum buffer size for # of features to be stored in memory")
args = vars(ap.parse_args())

# Initialize the keypoint detector, local invariant descriptor, and the descriptor pipeline
detector = FeatureDetector_create("BRISK")
descriptor = DescriptorExtractor_create("BRISK")
dad = DetectAndDescribe(detector, descriptor)

# Initialize the feature indexer
fi = FeatureIndexer(args["features_db"],
                    estNumImages=args["approx_images"],
                    maxBufferSize=args["max_buffer_size"],
                    verbose=True)
# Loop over the images in the dataset
for (i, imagePath) in enumerate(sorted(paths.list_images(args["dataset"]))):
    # Check to see if progress should be displayed
    if i > 0 and i % 10 == 0:
        fi._debug("processed {} images".format(i), msgType="[PROGRESS]")

    # Extract the image filename (i.e. the unique image ID) from the image path, then load the image itself
Ejemplo n.º 11
0
ap.add_argument('a',
                '--approx-images',
                type=int,
                default=500,
                help='Approximate # of images in the dataset')
ap.add_argument(
    '-b',
    '--max-buffer-size',
    type=int,
    default=50000,
    help='Maximum buffer size for # of features to be stored in memory')
args = vars(ap.parse_args())

# initialize the keypoint detector, local invariant descriptor, and the
# descriptor pipeline
detector = FeatureDetector_create('SURF')
# detector = FeatureDetector_create("GFTT")
descriptor = DescriptorExtractor_create('RootSIFT')
dad = DetectAndDescribe(detector, descriptor)
"""
Lines 25-27 set up our keypoint detection and local invariant descriptor 
pipeline. In our CBIR lessons, we used the Fast Hessian (i.e. SURF) 
keypoint detector, but here we’ll use the GFTT detector instead. It’s 
very common to use either the GFTT or Harris detector when using the BOVW 
model for classification; however, you should perform experiments 
evaluating each keypoint detector and go with the detector that obtained
the best accuracy. In order to describe the region surrounding each 
keypoint, we’ll use the RootSIFT descriptor, which will produce a 
128-dim feature vector for each keypoint region.
"""