Example #1
0
 def __init__(self):
     self.image_path = IMAGE_PATH
     self.vocabulary_path = []
     self.method = METHOD_ID
     self.bof_rearrange = 0
     # load image list and vocabulary
     self.imlist = get_imlist(self.image_path)
     self.image_num = len(self.imlist)
     # load feature list
     self.featlist = [
         self.imlist[i][:-3] + 'sift' for i in range(self.image_num)
     ]

# prepare filter bank kernels
kernels = []
for theta in range(4):
    theta = theta / 4. * np.pi
    for sigma in (1, 3):
        for frequency in (0.05, 0.25):
            kernel = np.real(
                gabor_kernel(frequency,
                             theta=theta,
                             sigma_x=sigma,
                             sigma_y=sigma))
            kernels.append(kernel)

imlist = get_imlist('./allInOne/')
#imlist = get_imlist('./leatherImgs/')

shrink = (slice(0, None, 3), slice(0, None, 3))
#brick = img_as_float(data.load('brick.png'))[shrink] # numpy.ndarray类型
#grass = img_as_float(data.load('grass.png'))[shrink] # img_as_float为用255归一化到0-1
#wall = img_as_float(data.load('rough-wall.png'))[shrink]
#image_names = ('brick', 'grass', 'wall')
#images = (brick, grass, wall) # tuple类型
images = ()
start_readingTime = time.time()
for index, imName in enumerate(imlist):
    print("processing %s" % imName)
    img = img_as_float(numpy.asarray(Image.open(imName).convert('L')))[shrink]
    #img = img_as_float(io.imread(imName, as_grey=True))[shrink]
    images = images + (img, )
import pickle
from numpy import *
from imagesearch import imagesearch
from localdescriptors import sift
from sqlite3 import dbapi2 as sqlite
from tools.imtools import get_imlist

imlist = get_imlist('./first500/')
nbr_images = len(imlist)
featlist = [imlist[i][:-3]+'sift' for i in range(nbr_images)]


f = open('./vocabulary.pkl', 'rb')
voc = pickle.load(f)
f.close()

src = imagesearch.Searcher('web.db',voc)
locs,descr = sift.read_features_from_file(featlist[0])
iw = voc.project(array(descr))

print 'ask using a histogram...'
print src.candidates_from_histogram(iw)[:10]

src = imagesearch.Searcher('web.db',voc)
print 'try a query...'

nbr_results = 12
res = [w[1] for w in src.query(imlist[12])[:nbr_results]]
imagesearch.plot_results(src,res)
Example #4
0

# prepare filter bank kernels
kernels = []
for theta in range(4):
    theta = theta / 4. * np.pi
    for sigma in (1, 3):
        for frequency in (0.05, 0.25):
            kernel = np.real(
                gabor_kernel(frequency,
                             theta=theta,
                             sigma_x=sigma,
                             sigma_y=sigma))
            kernels.append(kernel)

imlist = get_imlist('./leatherImgs/')

inputFeature = open('gaborFeature.pkl', 'rb')
ref_feats = pickle.load(inputFeature)
inputFeature.close()

img = img_as_float(np.asarray(Image.open(imlist[0]).convert('L')))

#feat = compute_feats(img, kernels)
#rankRes = rank(ref_feats[0], ref_feats)

dis = []
for i in range(ref_feats.shape[0]):
    #error = math.sqrt(np.sum((feats - ref_feats[i, :])**2)) # sqrt()是递增函数
    error = math.sqrt(np.sum((featsref_feats[0, :] - ref_feats[i, :])**2))
    dis = dis + [error]
Example #5
0
from PIL import Image
from numpy import *
from pylab import *
import pickle
import os
import parentpath
from tools import pca
from tools import imtools

# directory containing font 'a' images
im_dir = os.path.join(parentpath.DATA_DIR, 'a_thumbs')
imlist = imtools.get_imlist(im_dir) # get a list of file names
im = array(Image.open(imlist[0])) # open one image to get size
m,n = im.shape[:2] # get the size of the image
imlist_size = len(imlist) # get the number of images

# store flattened images in a matrix
immatrix = array([array(Image.open(imname)).flatten()
    for imname in imlist], 'f')

# perform PCA
V, S, immean = pca.pca(immatrix)

# show mean and first 15 figures
figure()
gray()
axis('off')
subplot(4, 4, 1)
imshow(immean.reshape(m, n))
for i in range(15):
    subplot(4,4,i+2)
Example #6
0
# prepare filter bank kernels
kernels = []
for theta in range(4):
    theta = theta / 4. * np.pi
    for sigma in (1, 3):
        for frequency in (0.05, 0.25):
            kernel = np.real(
                gabor_kernel(frequency,
                             theta=theta,
                             sigma_x=sigma,
                             sigma_y=sigma))
            kernels.append(kernel)

#imlist = get_imlist('./leatherImgs/')
imlist = get_imlist('./corel1k-thumbnails/')

shrink = (slice(0, None, 3), slice(0, None, 3))

imName = imlist[0]

print("processing %s" % imName)
img = img_as_float(np.asarray(Image.open(imName).convert('L')))[shrink]
#img = img_as_float(io.imread(imName, as_grey=True))[shrink]
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
    filtered = nd.convolve(img, kernel, mode='wrap')
    plt.imshow(filtered)
    plt.gray()
    feats[k, 0] = filtered.mean()
    feats[k, 1] = filtered.var()
# -*- coding: utf-8 -*-
import pickle
from imagesearch import vocabulary
from tools.imtools import get_imlist
from localdescriptors import sift

imlist = get_imlist('./first500/')
nbr_images = len(imlist)
featlist = [imlist[i][:-3] + 'sift' for i in range(nbr_images)]

#for i in range(nbr_images):
#    sift.process_image(imlist[i], featlist[i])

voc = vocabulary.Vocabulary('ukbench')
voc.train(featlist, 10000, 10)
# saving vocabulary
with open('vocabulary.pkl', 'wb') as f:
    pickle.dump(voc, f)
print 'vocabulary is:', voc.name, voc.nbr_words
Example #8
0
File: bof.py Project: xukechun/CBIR
# -*- coding: utf-8 -*-
# This script generates bof vocabulary and database
# Created on June 14th, 2020

import pickle
from imagesearch import vocabulary
from tools.imtools import get_imlist
from localdescriptors import sift
from imagesearch import imagesearch
from sqlite3 import dbapi2 as sqlite

IMAGE_PATH = 'ukbench500/'

# get image list
image_list = get_imlist(IMAGE_PATH)
images_num = len(image_list)
# get feature
feature_list = [image_list[i][:-3] + 'sift' for i in range(images_num)]


class Bof:
    def sift_process(self):
        for i in range(images_num):
            sift.process_image(image_list[i], feature_list[i])

    def get_vocabulary(self):
        # generate vocabularies
        voc = vocabulary.Vocabulary('ukbenchtest')
        voc.train(feature_list, 1000, 10)

        # saving vocabulary