Example #1
0
from descriptors import compute_boundary_desc, get_interest_points
from histograms import compute_visual_words

NUM_IMAGES = None

mem = Memory(cachedir='.')

vocabulary = np.load('./data/vocabulary.npy')
gen = load_data()
res = []

# FIXME needs to lookup the number of images
postings = np.zeros((len(vocabulary), 3170))

for i, (im, mask) in enumerate(gen):
    if i % 10 == 0:
        print "computed %d images" % i
    if NUM_IMAGES is not None and i == NUM_IMAGES:
        break

    interest_points = mem.cache(get_interest_points)(mask)
    descriptor, coords = mem.cache(compute_boundary_desc)(im,
                                                  mask,
                                                  interest_points)
    vw = compute_visual_words(descriptor, vocabulary)
    if vw is not None:
        hist, val = np.histogram(vw, bins=np.arange(len(vocabulary) + 1))
        postings[:, i] = hist

postings.dump('./data/postings.npy')
Example #2
0
    if test_query_index % 10 == 0:
        print "Computed %d images" % test_query_index
    if max_im and test_query_index == max_im:
        break

    query_file_name = query.keys()[0]
    positive_file_names = set(query.values()[0][0])
    ignore_file_names = set(query.values()[0][1])

    if query_file_name not in all_names:
        continue

    im, mask = load.get_image(query_file_name)
    interest_points = mem.cache(get_interest_points)(mask)
    desc, coords = mem.cache(compute_boundary_desc)(im, mask, interest_points)
    visual_words = compute_visual_words(desc, voc)
    if visual_words is None:
        continue

    #search_results = retrieval.search2(visual_words, postings, max_im=20)
    query_document, _ = np.histogram(visual_words,
                                     bins=np.arange(len(voc) + 1))
    search_results = retrieval2.search(query_document, max_im=20)
    #search_results2 = mem.cache(search2)(visual_words,postings,max_im=20)
    indices = search_results[:, 0].astype(int)
    search_file_names = all_names[indices]
    queries_total += 1

    ave_prec = average_precision(search_file_names, positive_file_names,
                                 ignore_file_names)
    mean_average_precision += ave_prec
Example #3
0
    if test_query_index % 10 == 0:
        print "Computed %d images" % test_query_index
    if max_im and test_query_index == max_im:
        break

    query_file_name = query.keys()[0]
    positive_file_names = set(query.values()[0][0])
    ignore_file_names = set(query.values()[0][1])

    if query_file_name not in all_names:
        continue

    im, mask = load.get_image(query_file_name)
    interest_points = mem.cache(get_interest_points)(mask)
    desc, coords = mem.cache(compute_boundary_desc)(im, mask, interest_points)
    visual_words = compute_visual_words(desc, voc)
    if visual_words is None:
        continue

    #search_results = retrieval.search2(visual_words, postings, max_im=20)
    query_document, _ = np.histogram(visual_words,
                                     bins=np.arange(len(voc) + 1))
    search_results = retrieval2.search(query_document, max_im=20)
    #search_results2 = mem.cache(search2)(visual_words,postings,max_im=20)
    indices = search_results[:, 0].astype(int)
    search_file_names = all_names[indices]
    queries_total += 1

    ave_prec = average_precision(search_file_names, positive_file_names,
                                 ignore_file_names)
    mean_average_precision += ave_prec
Example #4
0
import numpy as np

from sklearn.externals.joblib import Memory

from load import load_data
from descriptors import compute_boundary_desc, get_interest_points
from histograms import compute_visual_words
from retrieval import search, show_results

mem = Memory(cachedir='.')
postings = np.load('./data/postings.npy')
vocabulary = np.load('./data/vocabulary.npy')
image_names = np.load('./data/images.npy')

gen = load_data()
im, mask = gen.next()

interest_points = mem.cache(get_interest_points)(mask)
descriptors, _ = mem.cache(compute_boundary_desc)(im, mask, interest_points)
vw = compute_visual_words(descriptors, vocabulary)

results = search(vw, postings)

show_results(results, image_names)

# Awesome ! We're finding the the first image as best result !
Example #5
0
from ransac_homography import ransac
from retrieval import match_descriptors
from draw import show_matched_desc


mem = Memory(cachedir='.')
# FIXME Choose a couple of images, and load them properly
vocabulary = np.load('./data/vocabulary.npy')
im1, mask1 = get_image('Henry_Moore_Cut_0006.jpg', test=True)
im2, mask2 = get_image('Henry_Moore_Cut_0034.jpg', test=True)

interest_points = mem.cache(get_interest_points)(mask1)
desc1, coords1 = mem.cache(compute_boundary_desc)(im1,
                                              mask1,
                                              interest_points)
voc1 = vocabulary[compute_visual_words(desc1, vocabulary)]

interest_points = mem.cache(get_interest_points)(mask2)
desc2, coords2 = mem.cache(compute_boundary_desc)(im2,
                                               mask2,
                                               interest_points)
voc2 = vocabulary[compute_visual_words(desc2, vocabulary)]

# Use, as for a sift matching a nearest neighbour /  second nearest neighbour
# matching.
A = match_descriptors(np.array(desc1), np.array(desc2),
                         np.array(coords1), np.array(coords2))
show_matched_desc(im1, im2, np.array(A))
print "found descriptors %d" % len(A)
el = ransac(np.array(A), max_iter=1000, tol=75, d_min=15)
show_matched_desc(im1, im2, np.array(el[0]))