def index(self, query=None): self.src = imagesearch.Searcher('test.db', self.voc) html = self.header html += """ <br /> Click an image to search. <a href='?query='>Random selection</a> of images. <br /><br /> """ if query: # データベースに問い合わせ上位の画像を得る res = self.src.query(query)[:self.maxres] for dist, ndx in res: imname = self.src.get_filename(ndx) html += "<a href='?query=" + imname + "'>" html += "<img src='" + imname + "' width='100' />" html += "</a>" else: # クエリがなければランダムに選択する random.shuffle(self.ndx) for i in self.ndx[:self.maxres]: imname = self.imlist[i] html += "<a href='?query=" + imname + "'>" html += "<img src='" + imname + "' width='100' />" html += "</a>" html += self.footer return html
def index(self, query=None): self.src = imagesearch.Searcher('test.db', self.voc) html = self.header html += """ <br /> Click an image to search. <a href='?query='> Random selection </a> of images. <br /><br /> """ if query: # query the database and get top images #查询数据库,并获取前面的图像 res = self.src.query(query)[:self.maxres] for dist, ndx in res: imname = self.src.get_filename(ndx) html += "<a href='?query=" + imname + "'>" html += "<img src='" + imname + "' width='200' />" html += "</a>" # show random selection if no query # 如果没有查询图像则随机显示一些图像 else: np.random.shuffle(self.ndx) for i in self.ndx[:self.maxres]: imname = self.imlist[i] html += "<a href='?query=" + imname + "'>" html += "<img src='" + imname + "' width='200' />" html += "</a>" html += self.footer return html
def index(self, query=None): database_name = 'test.db' self.src = imagesearch.Searcher(database_name, self.voc) html = self.header html += """ <br /> Click an image to search. <a href='?query='>Random selection</a> of images. <br /><br /> """ if query: # 查询数据库并获取靠前的图像 res = self.src.query(query)[:self.maxres] for dist, ndx in res: imname = self.src.get_filename(ndx) html += "<a href='?query=" + imname + "'>" html += "<img src='" + imname + "' width='100' />" html += "</a>" else: # 如果没有查询图像,则显示随机选择的图像 random.shuffle(self.ndx) for i in self.ndx[:self.maxres]: imname = self.imlist[i] html += "<a href='?query=" + imname + "'>" html += "<img src='" + imname + "' width='100' />" html += "</a>" html += self.footer return html
def query_img(img): query_img_list = [] sift.process_image(img, './ukbench/tmp.sift') with open('vocabulary.pkl', 'rb') as f: voc = pickle.load(f) index = imagesearch.Indexer('test.db', voc) locs, descr = sift.read_features_from_file('./ukbench/tmp.sift') index.add_to_index(img, descr) index.db_commit() src = imagesearch.Searcher('test.db', voc) res = src.query(img)[:3] for dist, ndx in res: imname = src.get_filename(ndx) query_img_list.append(imname) return query_img_list
def index(self, query=None): self.searcher = imagesearch.Searcher('test.db', self.voc) html = self.header html += """\ <br> Click an image to search. <a href="?query=">Random selection</a> of images. <br><br>""" if query: res = self.searcher.query(query)[:self.maxresults] for dist, ndx in res: imname = self.searcher.get_filename(ndx) html += '<a href="?query=%s">' % imname html += '<img src="/img/%s" width=100>' % os.path.basename(imname) html += '</a>' else: random.shuffle(self.ndx) for i in self.ndx[:self.maxresults]: imname = self.imlist[i] html += '<a href="?query=%s">' % imname html += '<img src="/img/%s" width=100>' % os.path.basename(imname) html += '</a>' html += self.footer return html
import numpy as np import csv import random import math from scipy.stats import norm from copy import deepcopy execfile('loaddata.py') path = 'nail_book03' train_csvfile = 'train_labels04.csv' test_csvfile = 'test_labels04.csv' csvfile = 'train_labels.csv' HSV_fname = 'fHSV_hist.txt' hsv_flag_fname = 'hsv_flag.csv' src = imagesearch.Searcher('nail_image500.db', voc) def main_boost(): train_ratio = 0.5 src = Searcher('nail_image500.db') # get histogram HSV_hist = np.loadtxt(HSV_fname) # fHSVのヒストグラムを読み込む # normalize HSV hist HSV_max = np.max(HSV_hist, axis=1) HSV_min = np.min(HSV_hist, axis=1) HSV_rang = (HSV_max - HSV_min).reshape((len(HSV_max), 1)) HSV_hist_norm = [(HSV_hist[i] - HSV_min[i]) / HSV_rang[i] for i in xrange(len(HSV_rang))]
import homography import imtools import sift import imagesearch """After ch07_buildindex.py has built an index in test.db, this program queries it, and fits a homography to improve query results. """ imlist = imtools.get_imlist('/Users/thakis/Downloads/ukbench/first1000')[:100] imcount = len(imlist) featlist = [imlist[i][:-3] + 'sift' for i in range(imcount)] with open('vocabulary.pkl', 'rb') as f: voc = pickle.load(f) searcher = imagesearch.Searcher('test.db', voc) query_imid = 50 res_count = 20 res = [w[1] for w in searcher.query(imlist[query_imid])[:res_count]] print 'regular results for query %d:' % query_imid, res # Rerank by trying to fit a homography. q_locs, q_descr = sift.read_features_from_file(featlist[query_imid]) fp = homography.make_homog(q_locs[:, :2].T) model = homography.RansacModel() rank = {} for ndx in res[1:]:
voc.train(featlist, 1000, 10) with open('vocabulary.pkl', 'wb') as f: pickle.dump(voc, f) print 'vocabulary is:', voc.name, voc.nbr_words # load vocabulary with open('vocabulary.pkl', 'rb') as f: voc = pickle.load(f) # create indexer indx = imagesearch.Indexer('test.db', voc) indx.create_tables() # go through all images, project features on vocabulary and insert for i in range(nbr_images)[:100]: locs, descr = sift.read_features_from_file(featlist[i]) indx.add_to_index(imlist[i], descr) # commit to database indx.db_commit() from pysqlite2 import dbapi2 as sqlite con = sqlite.connect('test.db') print con.execute('select count (filename) from imlist').fetchone() (1000, ) print con.execute('select * from imlist').fetchone() (u'ukbench00000.jpg', ) src = imagesearch.Searcher('test.db') locs, descr = sift.read_features_from_file(featlist[0]) iw = voc.project(descr) print 'ask using a histogram...' print src.candidates_from_histogram(iw)[:10] src = imagesearch.Searcher('test.db') print 'try a query...' print src.query(imlist[0])[:10]