コード例 #1
0
    def train(self, featurefiles, k=100, subsampling=10):
        """ Train a vocabulary from features in files listed 
            in featurefiles using k-means with k number of words. 
            Subsampling of training data can be used for speedup. """

        nbr_images = len(featurefiles)
        # read the features from file
        descr = []
        descr.append(sift.read_features_from_file(featurefiles[0])[1])
        descriptors = descr[0]  #stack all features for k-means

        for i in arange(1, nbr_images):
            descr.append(sift.read_features_from_file(featurefiles[i])[1])
            descriptors = vstack((descriptors, descr[i]))

        # k-means: last number determines number of runs
        self.voc, distortion = kmeans(descriptors[::subsampling, :], k, 1)
        self.nbr_words = self.voc.shape[0]

        # go through all training images and project on vocabulary
        imwords = zeros((nbr_images, self.nbr_words))
        for i in range(nbr_images):
            imwords[i] = self.project(descr[i])

        nbr_occurences = sum((imwords > 0) * 1, axis=0)

        self.idf = log((1.0 * nbr_images) / (1.0 * nbr_occurences + 1))
        self.trainingdata = featurefiles
コード例 #2
0
ファイル: vocabulary.py プロジェクト: Adon-m/PCV
 def train(self,featurefiles,k=100,subsampling=10):
     """ Train a vocabulary from features in files listed 
         in featurefiles using k-means with k number of words. 
         Subsampling of training data can be used for speedup. """
     
     nbr_images = len(featurefiles)
     # read the features from file
     descr = []
     descr.append(sift.read_features_from_file(featurefiles[0])[1])
     descriptors = descr[0] #stack all features for k-means
     for i in arange(1,nbr_images):
         descr.append(sift.read_features_from_file(featurefiles[i])[1])
         descriptors = vstack((descriptors,descr[i]))
         
     # k-means: last number determines number of runs
     self.voc,distortion = kmeans(descriptors[::subsampling,:],k,1)
     self.nbr_words = self.voc.shape[0]
     
     # go through all training images and project on vocabulary
     imwords = zeros((nbr_images,self.nbr_words))
     for i in range( nbr_images ):
         imwords[i] = self.project(descr[i])
     
     nbr_occurences = sum( (imwords > 0)*1 ,axis=0)
     
     self.idf = log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
     self.trainingdata = featurefiles
コード例 #3
0
ファイル: vocabulary2.py プロジェクト: ta-oyama/PCV
 def train(self,featurefiles,k=100,subsampling=10):
     """ featurefilesに列挙されたファイルから特徴量を読み込み
         k平均法とk個のビジュアルワードを用いてボキャブラリを
         学習する。subsamplingで訓練データを間引いて高速化可能 """
     
     nbr_images = len(featurefiles)
     # ファイルから特徴量を読み込む
     descr = []
     descr.append(sift.read_features_from_file(featurefiles[0])[1])
     descriptors = descr[0] #stack all features for k-means
     for i in arange(1,nbr_images):
         descr.append(sift.read_features_from_file(featurefiles[i])[1])
         descriptors = vstack((descriptors,descr[i]))
         
     # k平均法:最後の数字で試行数を指定する
     self.voc,distortion = kmeans(descriptors[::subsampling,:],k,1)
     self.nbr_words = self.voc.shape[0]
     
     # 訓練画像を順番にボキャブラリに射影する
     imwords = zeros((nbr_images,self.nbr_words))
     for i in range( nbr_images ):
         imwords[i] = self.project(descr[i])
     
     nbr_occurences = sum( (imwords > 0)*1 ,axis=0)
     
     self.idf = log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
     self.trainingdata = featurefiles
コード例 #4
0
def simg():
    locs, descr = sift.read_features_from_file(featlist[0])
    iw = voc.project(descr)

    print iw

    result = candidates_from_histogram(iw)

    print result
    return "search image"
コード例 #5
0
    def train(self, featurefiles, k=100, subsampling=10):
        nbr_images = len(featurefiles)
        descr = []

        descr.append(sift.read_features_from_file(featurefiles[0])[1])
        descriptors = descr[0]
        for i in range(1, nbr_images):
            descr.append(sift.read_features_from_file(featurefiles[i])[1])
            descriptors = vstack((descriptors, descr[i]))

        self.voc, distortion = kmeans(descriptors[::subsampling, :], k, 1)
        self.nbr_words = self.voc.shape[0]

        imwords = zeros((nbr_images, self.nbr_words))

        for i in range(nbr_images):
            imwords[i] = self.project(descr[i])

        nbr_occurences = sum((imwords > 0) * 1, axis=0)
        self.idf = log((1.0 * nbr_images) / (1.0 * nbr_occurences + 1))
        self.trainingdata = featurefiles
コード例 #6
0
def read_gesture_features_labels(path):
    # create list of all files ending in .dsift
    featlist = [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.dsift')]
    # read the features
    features = []
    for featfile in featlist:
        l,d = sift.read_features_from_file(featfile)
        features.append(d.flatten())
    features = array(features)
    # create labels
    labels = [featfile.split('/')[-1][0] for featfile in featlist]
    return features,array(labels)
コード例 #7
0
def read_gesture_features_labels(path):
    # create list of all files ending in .dsift
    featlist = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.dsift')]
    # read the features
    features = []
    for featfile in featlist:
        l, d = sift.read_features_from_file(featfile)
        features.append(d.flatten())
    features = array(features)
    # create labels
    labels = [featfile.split('/')[-1][0] for featfile in featlist]
    return features, array(labels)
コード例 #8
0
def get_matching_pts_sift(im_path_1, im_path_2):
    """
    Gets the matching points in the two images, using SIFT features.
    """
    # Process and save features to file
    params = "--edge-thresh 10 --peak-thresh 5 --verbose"
    sift.process_image(im_path_1, im_path_1 + '.sift', params=params)
    sift.process_image(im_path_2, im_path_2 + '.sift', params=params)

    # Read features from the two images.
    l1, d1 = sift.read_features_from_file(im_path_1 + '.sift')
    l2, d2 = sift.read_features_from_file(im_path_2 + '.sift')

    # matchscores will have an entry for each feature in im1.
    # The entry will be 0 if there is not a match.
    # If there is a match, the entry will be the index of the matching feature in im2.
    matchscores = sift.match_twosided(d1, d2)

    pts1, pts2 = get_matches(l1, l2, matchscores)

    return pts1, pts2
コード例 #9
0
ファイル: search_images.py プロジェクト: mgduarte/homework
def search_image(imagename):
    with open(VOC_FILE, 'rb') as f:
        voc = cPickle.load(f)

    locs, descr = sift.read_features_from_file(get_sift_filename(imagename))
    iw = voc.project(descr)

    src = Searcher(DB_FILE, voc)
    print 'ask using a histogram ...'
    print src.candidates_from_histogram(iw)[:5]

    print 'try a query ...'
    for dist, imid in src.query(imagename)[:5]:
        cur = src.con.execute('select filename from imlist where rowid=%d' % imid)
        print '%s(%d)' % (cur.fetchone()[0], dist)
コード例 #10
0
def add_to_index():

    for i in range(nbr_images)[:1000]:
        locs, descr = sift.read_features_from_file(featlist[i])

        if Imlist.is_indexed(imlist[i]): continue
        imid = Imlist.get_id(imlist[i])
        imwords = voc.project(descr)
        nbr_words = imwords.shape[0]
        for i in range(nbr_words):
            word = imwords[i]
            Imwords.add(imid, word, voc.name)
            # self.con.execute("insert into imwords(imid,wordid,vocname)values (?,?,?)", (imid,word,self.voc.name))
        ImHistograms.add(imid, pickle.dumps(imwords), voc.name)

    return "add_to_index"
コード例 #11
0
def candidates_from_histogram(iw):
    locs, descr = sift.read_features_from_file(featlist[0])
    iw = voc.project(descr)

    words = iw.nonzero()[0]

    # 寻找候选图像
    candidates = []
    for word in words:
        c = Imwords.qdistinctid(word)
        candidates += c

    tmp = [(w, candidates.count(w)) for w in set(candidates)]
    tmp.sort(cmp=lambda x, y: cmp(x[1], y[1]))
    tmp.reverse()
    return [w[0] for w in tmp][:10]
コード例 #12
0
ファイル: 05.py プロジェクト: VRER1997/python_CV
def read_gesture_features_labels(path):
    featlist = [
        os.path.join(path, f) for f in os.listdir(path) if f.endswith('.dsift')
    ]
    print len(featlist)

    features = []
    for featfile in featlist:
        l, d = sift.read_features_from_file(featfile)
        features.append(d.flatten())

    features = np.array(features)

    label = [featfile.split('/')[-1].split('-')[0] for featfile in featlist]
    print label[:5]
    return features, np.array(label)
コード例 #13
0
ファイル: search_images.py プロジェクト: mgduarte/homework
def index_images(imlist):
    featlist = [ get_sift_filename(i) for i in imlist ]

    # load vocabulary
    with open(VOC_FILE, 'rb') as f:
        voc = cPickle.load(f)

    # create indexer
    indx = Indexer(DB_FILE, voc)
    indx.create_tables()

    # go through all images, project features on vocabulary and insert
    for imagefile, siftfile in zip(imlist, featlist):
        locs, descr = sift.read_features_from_file(siftfile)
        indx.add_to_index(imagefile, descr)

    indx.db_commit()
コード例 #14
0
# -*- coding: utf-8 -*-
from PIL import Image
from pylab import *
from PCV.localdescriptors import sift
from PCV.localdescriptors import harris

# 添加中文字体支持
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"c:\windows\fonts\SimSun.ttc", size=14)

imname = '../data/empire.jpg'
im = array(Image.open(imname).convert('L'))
sift.process_image(imname, 'empire.sift')
l1, d1 = sift.read_features_from_file('empire.sift')

figure()
gray()
subplot(131)
sift.plot_features(im, l1, circle=False)
title(u'SIFT特征', fontproperties=font)
subplot(132)
sift.plot_features(im, l1, circle=True)
title(u'用圆圈表示SIFT特征尺度', fontproperties=font)

# 检测harris角点
harrisim = harris.compute_harris_response(im)

subplot(133)
filtered_coords = harris.get_harris_points(harrisim, 6, 0.1)
imshow(im)
plot([p[1] for p in filtered_coords], [p[0] for p in filtered_coords], '*')
コード例 #15
0
from PCV.imagesearch import imagesearch
from PCV.localdescriptors import sift
import sqlite3
from PCV.tools.imtools import get_imlist

#获取图像列表
imlist = get_imlist('training/')
nbr_images = len(imlist)

#获取特征列表
featlist = [imlist[i][:-3] + 'sift' for i in range(nbr_images)]

# load vocabulary
with open('training/vocabulary.pkl', 'rb') as f:
    voc = pickle.load(f)

# 创建索引
indx = imagesearch.Indexer('testImaAdd_training.db', voc)
indx.create_tables()

# go through all images, project features on vocabulary and insert
for i in range(nbr_images):
    locs, descr = sift.read_features_from_file(featlist[i])
    indx.add_to_index(imlist[i], descr)

# commit to database
indx.db_commit()
con = sqlite3.connect('testImaAdd_training.db')
print(con.execute('select count (filename) from imlist').fetchone())
print(con.execute('select * from imlist').fetchone())
from PCV.geometry import camera
from numpy import array, loadtxt, genfromtxt, ones, vstack, dot
from PIL import Image

# calibration
from PCV.localdescriptors import sift

K = array([[2394, 0, 932], [0, 2398, 628], [0, 0, 1]])

print "computing sift.."
# load images and compute features
im1 = array(Image.open('../dataset_alcatraz/alcatraz1.jpg'))
sift.process_image('../dataset_alcatraz/alcatraz1.jpg',
                   '../dataset_alcatraz/alcatraz1.sift')
l1, d1 = sift.read_features_from_file('dataset_alcatraz/alcatraz1.sift')

im2 = array(Image.open('../dataset_alcatraz/alcatraz2.jpg'))
sift.process_image('../dataset_alcatraz/alcatraz2.jpg',
                   '../dataset_alcatraz/alcatraz2.sift')
l2, d2 = sift.read_features_from_file('../dataset_alcatraz/alcatraz2.sift')

print "sift.. done"

print "match features..."
# match features
print "\tsearching matches"
matches = sift.match_twosided(d1, d2)
ndx = matches.nonzero()[0]
# make homogeneous and normalize with inv(K)
print "\tmake homogeneous and normalize with inv(K)"
コード例 #17
0
from PCV.tools import imtools

"""
This will process all hand gesture images with the dense SIFT descriptor.

Assumes you downloaded the hand images to ..data/hand_gesture.
The plot at the end generates one of the images of Figure 8-3.
"""

path = '../data/hand_gesture/train/'
imlist = []
for filename in os.listdir(path):
    if os.path.splitext(filename)[1] == '.ppm':
        imlist.append(path+filename)


# process images at fixed size (50,50)
for filename in imlist:
    featfile = filename[:-3]+'dsift'
    dsift.process_image_dsift(filename, featfile, 10, 5, resize=(50,50))


# show an image with features
l,d = sift.read_features_from_file(featfile)
im = array(Image.open(filename).resize((50,50)))
print im.shape

figure()
sift.plot_features(im, l, True)
show()
# -*- coding: utf-8 -*-
from PCV.localdescriptors import sift, dsift
from pylab import *
from PIL import Image

dsift.process_image_dsift('gesture/empire.jpg', 'empire.dsift', 90, 40, True)
l, d = sift.read_features_from_file('empire.dsift')
im = array(Image.open('gesture/empire.jpg'))
sift.plot_features(im, l, True)
title('dense SIFT')
show()
コード例 #19
0
def my_calibration(sz):
    """
    Calibration function for the camera (iPhone4) used in this example.
    """
    row, col = sz
    fx = 2555 * col / 2592
    fy = 2586 * row / 1936
    K = diag([fx, fy, 1])
    K[0, 2] = 0.5 * col
    K[1, 2] = 0.5 * row
    return K


# compute features
sift.process_image('../data/book_frontal.JPG', 'im0.sift')
l0, d0 = sift.read_features_from_file('im0.sift')

sift.process_image('../data/book_perspective.JPG', 'im1.sift')
l1, d1 = sift.read_features_from_file('im1.sift')

# match features and estimate homography
matches = sift.match_twosided(d0, d1)
ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l1[ndx2, :2].T)

model = homography.RansacModel()
H, inliers = homography.H_from_ransac(fp, tp, model)

# camera calibration
コード例 #20
0
ファイル: ch8_dsift.py プロジェクト: CharlieGit/PCV
from PIL import Image
from pylab import *
from numpy import *
from PCV.localdescriptors import dsift, sift
"""
This is the dense SIFT illustration, it will reproduce the plot
in Figure 8-2.
"""
dsift.process_image_dsift('../data/empire.jpg', 'empire.sift', 90, 40, True)
l,d = sift.read_features_from_file('empire.sift')
im = array(Image.open('../data/empire.jpg'))
sift.plot_features(im, l, True)
show()
コード例 #21
0

if len(sys.argv) >= 3:
  im1f, im2f = sys.argv[1], sys.argv[2]
else:
  im1f = '../data/sf_view1.jpg'
  im2f = '../data/sf_view2.jpg'
#  im1f = '../data/crans_1_small.jpg'
#  im2f = '../data/crans_2_small.jpg'
#  im1f = '../data/climbing_1_small.jpg'
#  im2f = '../data/climbing_2_small.jpg'
im1 = array(Image.open(im1f))
im2 = array(Image.open(im2f))

#sift.process_image(im1f, 'out_sift_1.txt')
l1, d1 = sift.read_features_from_file('out_sift_1.txt')
figure()
gray()
subplot(121)
sift.plot_features(im1, l1, circle=False)

#sift.process_image(im2f, 'out_sift_2.txt')
l2, d2 = sift.read_features_from_file('out_sift_2.txt')
subplot(122)
sift.plot_features(im2, l2, circle=False)

#matches = sift.match(d1, d2)
matches = sift.match_twosided(d1, d2)
print '{} matches'.format(len(matches.nonzero()[0]))

figure()
コード例 #22
0
src = imagesearch.Searcher('testImaAdd.db', voc)

# index of query image and number of results to return
#查询图像索引和查询返回的图像数
q_ind = 0
nbr_results = 20

# regular query
# 常规查询(按欧式距离对结果排序)
res_reg = [w[1] for w in src.query(imlist[q_ind])[:nbr_results]]
print('top matches (regular):', res_reg)

# load image features for query image
#载入查询图像特征
q_locs, q_descr = sift.read_features_from_file(featlist[q_ind])
fp = homography.make_homog(q_locs[:, :2].T)

# RANSAC model for homography fitting
#用单应性进行拟合建立RANSAC模型
model = homography.RansacModel()
rank = {}

# load image features for result
#载入候选图像的特征
for ndx in res_reg[1:]:
    locs, descr = sift.read_features_from_file(
        featlist[ndx])  # because 'ndx' is a rowid of the DB that starts at 1
    # get matches
    matches = sift.match(q_descr, descr)
    ind = matches.nonzero()[0]
コード例 #23
0
# compute features
# sift.process_image('./data/simage10.jpg','./data/simage10.sift')
# l0,d0 = sift.read_features_from_file('./data/simage10.sift')
#
# sift.process_image('./data/simage11.jpg','./data/simage11.sift')
# l1,d1 = sift.read_features_from_file('./data/simage11.sift')

# sift.process_image('./data/image1.JPG', './data/im2.sift')
# l0,d0 = sift.read_features_from_file('./data/im2.sift')
#
# sift.process_image('./data/image2.JPG', './data/im3.sift')
# l1,d1 = sift.read_features_from_file('./data/im3.sift')

sift.process_image('./data/s20160720_113416.JPG', './data/im2.sift')
l0,d0 = sift.read_features_from_file('./data/im2.sift')

sift.process_image('./data/s20160720_113436.JPG', './data/im3.sift')
l1,d1 = sift.read_features_from_file('./data/im3.sift')

# match features and estimate homography
matches = sift.match_twosided(d0,d1)

# im0 = array(Image.open('./data/book_frontal.JPG'))
# im1 = array(Image.open('./data/book_perspective.JPG'))
# im0 = array(Image.open('./data/simage10.JPG'))
# im1 = array(Image.open('./data/simage11.JPG'))
# figure()
# sift.plot_matches(im0,im1,l0,l1, matches, show_below=True)
# show()
コード例 #24
0
ファイル: main.py プロジェクト: onaries/Python-Exercise
from pylab import *
from numpy import *
from PIL import Image

from PCV.localdescriptors import sift

"""
This is the twosided SIFT feature matching example from Section 2.2 (p 44).
"""

imname1 = './data/crans_1_small.jpg'
imname2 = './data/crans_2_small.jpg'

# process and save features to file
sift.process_image(imname1, imname1+'.sift')
sift.process_image(imname2, imname2+'.sift')

# read features and match
l1,d1 = sift.read_features_from_file(imname1+'.sift')
l2,d2 = sift.read_features_from_file(imname2+'.sift')
matchscores = sift.match_twosided(d1, d2)

# load images and plot
im1 = array(Image.open(imname1))
im2 = array(Image.open(imname2))

sift.plot_matches(im1,im2,l1,l2,matchscores,show_below=True)
show()
コード例 #25
0
from PCV.localdescriptors import sift
import importlib

camera = importlib.reload(camera)
homography = importlib.reload(homography)
sfm = importlib.reload(sfm)
sift = importlib.reload(sift)
 
# 提取特征,注意读取图片的顺序!
im1 = array(Image.open('D:/study/machine_learning/images/yosemite2.jpg'))
sift.process_image('D:/study/machine_learning/images/yosemite2.jpg', 'im1.sift')
 
im2 = array(Image.open('D:/study/machine_learning/images/yosemite1.jpg'))
sift.process_image('D:/study/machine_learning/images/yosemite1.jpg', 'im2.sift')
 
l1, d1 = sift.read_features_from_file('im1.sift')
l2, d2 = sift.read_features_from_file('im2.sift')
 
matches = sift.match_twosided(d1, d2)
 
ndx = matches.nonzero()[0]
x1 = homography.make_homog(l1[ndx, :2].T)#将点集转化为齐次坐标表示
ndx2 = [int(matches[i]) for i in ndx]
x2 = homography.make_homog(l2[ndx2, :2].T)#将点集转化为齐次坐标表示
 
d1n = d1[ndx]
d2n = d2[ndx2]
x1n = x1.copy()
x2n = x2.copy()
 
figure(figsize=(16,16))
コード例 #26
0
ファイル: ch8_dsift.py プロジェクト: LeeC20/PCV_Python3
from PIL import Image
from pylab import *
from numpy import *
from PCV.localdescriptors import dsift, sift
"""
This is the dense SIFT illustration, it will reproduce the plot
in Figure 8-2.
"""
dsift.process_image_dsift('../data/empire.jpg', 'empire.sift', 90, 40, True)
l, d = sift.read_features_from_file('empire.sift')
im = array(Image.open('../data/empire.jpg'))
sift.plot_features(im, l, True)
show()

コード例 #27
0
# -*- coding: utf-8 -*-
import os
from PCV.localdescriptors import sift, dsift
from pylab import *
from PIL import Image

imlist = [
    '../data/gesture/train/A-uniform01.ppm',
    '../data/gesture/train/B-uniform01.ppm',
    '../data/gesture/train/C-uniform01.ppm',
    '../data/gesture/train/Five-uniform01.ppm',
    '../data/gesture/train/Point-uniform01.ppm',
    '../data/gesture/train/V-uniform01.ppm'
]

figure()
for i, im in enumerate(imlist):
    dsift.process_image_dsift(im, im[:-3] + '.dsift', 90, 40, True)
    l, d = sift.read_features_from_file(im[:-3] + 'dsift')
    dirpath, filename = os.path.split(im)
    im = array(Image.open(im))
    #显示手势含义title
    titlename = filename[:-14]
    subplot(2, 3, i + 1)
    sift.plot_features(im, l, True)
    title(titlename)
show()
コード例 #28
0
ファイル: ch02_sift.py プロジェクト: CBIR-LL/pcv-book-code
# -*- coding: utf-8 -*-
from PIL import Image
from pylab import *
from PCV.localdescriptors import sift
from PCV.localdescriptors import harris

# 添加中文字体支持
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"c:\windows\fonts\SimSun.ttc", size=14)

imname = '../data/empire.jpg'
im = array(Image.open(imname).convert('L'))
sift.process_image(imname, 'empire.sift')
l1, d1 = sift.read_features_from_file('empire.sift')

figure()
gray()
subplot(131)
sift.plot_features(im, l1, circle=False)
title(u'SIFT特征',fontproperties=font)
subplot(132)
sift.plot_features(im, l1, circle=True)
title(u'用圆圈表示SIFT特征尺度',fontproperties=font)

# 检测harris角点
harrisim = harris.compute_harris_response(im)

subplot(133)
filtered_coords = harris.get_harris_points(harrisim, 6, 0.1)
imshow(im)
plot([p[1] for p in filtered_coords], [p[0] for p in filtered_coords], '*')
コード例 #29
0
ファイル: 03.py プロジェクト: VRER1997/python_CV
path = "F:\\BaiduNet\\ukbench\\full\\"
imlist = [os.path.join(path, f) for f in os.listdir(path)]
imnbr = len(imlist)
featlist = [imlist[i].split('\\')[-1][:-3] + 'sift' for i in range(imnbr)]
with open('vocabulary.pkl', 'rb') as f:
    voc = pickle.load(f)

src = imagesearch.Searcher('testImgAdd.db', voc)

q_ind = 0
nbr_results = 20

res_reg = [w[1] for w in src.query(imlist[q_ind])[:nbr_results]]
print 'top matches (regular) : ', res_reg

q_locs, q_descr = sift.read_features_from_file(featlist[q_ind])
fp = homography.make_homog(q_locs[:, :2].T)

model = homography.RansacModel()
rank = {}

for ndx in res_reg[1:]:
    locs, descr = sift.read_features_from_file(featlist[ndx])
    matches = sift.match(q_descr, descr)
    ind = matches.nonzero()[0]
    ind2 = matches[ind]
    tp = homography.make_homog(locs[:, :2].T)
    try:
        H, inliners = homography.H_from_ransac(fp[:, ind],
                                               tp[:, ind2],
                                               model,
コード例 #30
0
from PIL import Image
from pylab import *
import sfm
from PCV.geometry import camera, homography
from PCV.localdescriptors import sift
import tic

# calibration
K = array([[2394,0,932],[0,2398,628],[0,0,1]])

tic.k('start')
# load images and compute featuers
im1 = array(Image.open('./images/salcatraz1.jpg'))
sift.process_image('./images/salcatraz1.jpg','./images/salcatraz1.sift')
l1,d1 = sift.read_features_from_file('./images/salcatraz1.sift')

im2 = array(Image.open('./images/salcatraz2.jpg'))
sift.process_image('./images/salcatraz2.jpg','./images/salcatraz2.sift')
l2,d2 = sift.read_features_from_file('./images/salcatraz2.sift')

tic.k('loadd sifts')
print '{} / {} features'.format(len(d1), len(d2))

# match features
matches = sift.match_twosided(d1,d2)
ndx = matches.nonzero()[0]

tic.k('matched')

# make homogeneous and normalize with inv(K)
コード例 #31
0
from pylab import *
from PIL import Image

from PCV.localdescriptors import sift
"""
This is the twosided SIFT feature matching example from Section 2.2 (p 44).
"""

imname1 = '../data/climbing_1_small.jpg'
imname2 = '../data/climbing_2_small.jpg'

# process and save features to file
sift.process_image(imname1, './climbing_1_small.sift')
sift.process_image(imname2, './climbing_2_small.sift')

# sift.process_image(imname1, imname1+'.sift')
# sift.process_image(imname2, imname2+'.sift')

# read features and match
l1, d1 = sift.read_features_from_file('./climbing_1_small.sift')
l2, d2 = sift.read_features_from_file('./climbing_2_small.sift')
#matchscores = sift.match(d1, d2)
matchscores = sift.match_twosided(d1, d2)

# load images and plot
im1 = array(Image.open(imname1))
im2 = array(Image.open(imname2))

sift.plot_matches(im1, im2, l1, l2, matchscores, show_below=True)
show()
コード例 #32
0
from PCV.localdescriptors import sift

if len(sys.argv) >= 3:
    im1f, im2f = sys.argv[1], sys.argv[2]
else:
    im1f = '../data/sf_view1.jpg'
    im2f = '../data/sf_view2.jpg'
#  im1f = '../data/crans_1_small.jpg'
#  im2f = '../data/crans_2_small.jpg'
#  im1f = '../data/climbing_1_small.jpg'
#  im2f = '../data/climbing_2_small.jpg'
im1 = array(Image.open(im1f))
im2 = array(Image.open(im2f))

#sift.process_image(im1f, 'out_sift_1.txt')
l1, d1 = sift.read_features_from_file('out_sift_1.txt')
figure()
gray()
subplot(121)
sift.plot_features(im1, l1, circle=False)

#sift.process_image(im2f, 'out_sift_2.txt')
l2, d2 = sift.read_features_from_file('out_sift_2.txt')
subplot(122)
sift.plot_features(im2, l2, circle=False)

#matches = sift.match(d1, d2)
matches = sift.match_twosided(d1, d2)
print '{} matches'.format(len(matches.nonzero()[0]))

figure()
コード例 #33
0
# -*- coding: utf-8 -*-
from PCV.localdescriptors import sift, dsift
from pylab import  *
from PIL import Image

dsift.process_image_dsift('../data/empire.jpg','empire.dsift',90,40,True)
l,d = sift.read_features_from_file('empire.dsift')
im = array(Image.open('../data/empire.jpg'))
sift.plot_features(im,l,True)
title('dense SIFT')
show()
コード例 #34
0
import os

from PCV.localdescriptors import dsift, sift
from PCV.tools import imtools
"""
This will process all hand gesture images with the dense SIFT descriptor.

Assumes you downloaded the hand images to ..data/hand_gesture.
The plot at the end generates one of the images of Figure 8-3.
"""

path = '../data/hand_gesture/train/'
imlist = []
for filename in os.listdir(path):
    if os.path.splitext(filename)[1] == '.ppm':
        imlist.append(path + filename)

# process images at fixed size (50,50)
for filename in imlist:
    featfile = filename[:-3] + 'dsift'
    dsift.process_image_dsift(filename, featfile, 10, 5, resize=(50, 50))

# show an image with features
l, d = sift.read_features_from_file(featfile)
im = array(Image.open(filename).resize((50, 50)))
print(im.shape)

figure()
sift.plot_features(im, l, True)
show()
コード例 #35
0
import pickle
from PCV.imagesearch import imagesearch
from PCV.localdescriptors import sift
from sqlite3 import dbapi2 as sqlite
from PCV.tools.imtools import get_imlist

imlist = get_imlist("./first500/")
nbr_images = len(imlist)
featlist = [imlist[i][:-3] + "sift" for i in range(nbr_images)]
# load vocabulary
with open("./vocabulary.pkl", "rb") as f:
    voc = pickle.load(f)
# create indexer
indx = imagesearch.Indexer("web.db", voc)
indx.create_tables()
# go through all images, project features on vocabulary and insert
for i in range(nbr_images)[:500]:
    locs, descr = sift.read_features_from_file(featlist[i])
    indx.add_to_index(imlist[i], descr)
# commit to database
indx.db_commit()

con = sqlite.connect("web.db")
print con.execute("select count (filename) from imlist").fetchone()
print con.execute("select * from imlist").fetchone()
コード例 #36
0
from pylab import *
from numpy import *
from PIL import Image

from PCV.localdescriptors import sift
"""
This is the twosided SIFT feature matching example from Section 2.2 (p 44).
"""

imname1 = 'data/climbing_1_small.jpg'
imname2 = 'data/climbing_2_small.jpg'

# process and save features to file
sift.process_image(imname1, imname1 + '.sift')
sift.process_image(imname2, imname2 + '.sift')

# read features and match
l1, d1 = sift.read_features_from_file(imname1 + '.sift')
l2, d2 = sift.read_features_from_file(imname2 + '.sift')
matchscores = sift.match_twosided(d1, d2)

# load images and plot
im1 = array(Image.open(imname1))
im2 = array(Image.open(imname2))

sift.plot_matches(im1, im2, l1, l2, matchscores, show_below=True)
show()
コード例 #37
0
ファイル: ch3_panorama.py プロジェクト: BelmonduS/PCV
from PCV.localdescriptors import sift

"""
This is the panorama example from section 3.3.
"""

# set paths to data folder
featname = ['../data/Univ'+str(i+1)+'.sift' for i in range(5)] 
imname = ['../data/Univ'+str(i+1)+'.jpg' for i in range(5)]

# extract features and match
l = {}
d = {}
for i in range(5): 
    # sift.process_image(imname[i],featname[i])
    l[i],d[i] = sift.read_features_from_file(featname[i])

matches = {}
for i in range(4):
    matches[i] = sift.match(d[i+1],d[i])

# visualize the matches (Figure 3-11 in the book)
for i in range(4):
    im1 = array(Image.open(imname[i]))
    im2 = array(Image.open(imname[i+1]))
    figure()
    sift.plot_matches(im2,im1,l[i+1],l[i],matches[i],show_below=True)


# function to convert the matches to hom. points
def convert_points(j):
コード例 #38
0
# list of downloaded filenames
imlist = imtools.get_imlist(download_path)
nbr_images = len(imlist)

# extract features
featlist = [imname[:-3] + 'sift' for imname in imlist]
for i, imname in enumerate(imlist):
    sift.process_image(imname, featlist[i])

matchscores = zeros((nbr_images, nbr_images))

for i in range(nbr_images):
    for j in range(i, nbr_images):  # only compute upper triangle
        print 'comparing ', imlist[i], imlist[j]
        l1, d1 = sift.read_features_from_file(featlist[i])
        l2, d2 = sift.read_features_from_file(featlist[j])
        matches = sift.match_twosided(d1, d2)
        nbr_matches = sum(matches > 0)
        print 'number of matches = ', nbr_matches
        matchscores[i, j] = nbr_matches

# copy values
for i in range(nbr_images):
    for j in range(i + 1, nbr_images):  # no need to copy diagonal
        matchscores[j, i] = matchscores[i, j]

threshold = 2  # min number of matches needed to create link

g = pydot.Dot(graph_type='graph')  # don't want the default directed graph
コード例 #39
0
# list of downloaded filenames
imlist = imtools.get_imlist(download_path)
nbr_images = len(imlist)

# extract features
featlist = [imname[:-3] + 'sift' for imname in imlist]
#for i, imname in enumerate(imlist):
#    sift.process_image(imname, featlist[i])

matchscores = zeros((nbr_images, nbr_images))

for i in range(nbr_images):
    for j in range(i, nbr_images):  # only compute upper triangle
        print 'comparing ', imlist[i], imlist[j]
        l1, d1 = sift.read_features_from_file(featlist[i])
        l2, d2 = sift.read_features_from_file(featlist[j])
        matches = sift.match_twosided(d1, d2)
        nbr_matches = sum(matches > 0)
        print 'number of matches = ', nbr_matches
        matchscores[i, j] = nbr_matches
print "The match scores is: %d", matchscores

#np.savetxt(("../data/panoimages/panoramio_matches.txt",matchscores)

# copy values
for i in range(nbr_images):
    for j in range(i + 1, nbr_images):  # no need to copy diagonal
        matchscores[j, i] = matchscores[i, j]

threshold = 2  # min number of matches needed to create link
コード例 #40
0
ファイル: process_images.py プロジェクト: Inflane/homework
def load(filename):
    sift_filename = process_image(filename)
    im = np.array(get_im(filename))
    locs, desc = sift.read_features_from_file(sift_filename)
    return im, locs, desc
コード例 #41
0
ファイル: ch4_ar_cube.py プロジェクト: CBIR-LL/pcv-book-code
    """
    Calibration function for the camera (iPhone4) used in this example.
    """
    row, col = sz
    fx = 2555*col/2592
    fy = 2586*row/1936
    K = diag([fx, fy, 1])
    K[0, 2] = 0.5*col
    K[1, 2] = 0.5*row
    return K



# compute features
sift.process_image('../data/book_frontal.JPG', 'im0.sift')
l0, d0 = sift.read_features_from_file('im0.sift')

sift.process_image('../data/book_perspective.JPG', 'im1.sift')
l1, d1 = sift.read_features_from_file('im1.sift')


# match features and estimate homography
matches = sift.match_twosided(d0, d1)
ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l1[ndx2, :2].T)

model = homography.RansacModel()
H, inliers = homography.H_from_ransac(fp, tp, model)
コード例 #42
0
src = imagesearch.Searcher('web.db',voc)

# 查询图线索引和返回的图像数
# index of query image and number of results to return
q_ind = 0
nbr_results = 20

# 常规查询
# regular query
res_reg = [w[1] for w in src.query(imlist[q_ind])[:nbr_results]]
print 'top matches (regular):', res_reg  # res_reg保存的是候选图像(欧式距离)

# 载入查询图像特征
# load image features for query image
q_locs,q_descr = sift.read_features_from_file(featlist[q_ind])
fp = homography.make_homog(q_locs[:,:2].T)

# RANSAC model for homography fitting
model = homography.RansacModel()

rank = {}
# load image features for result
for ndx in res_reg[1:]:
    locs,descr = sift.read_features_from_file(featlist[ndx-1])  # because 'ndx' is a rowid of the DB that starts at 1.
    # locs,descr = sift.read_features_from_file(featlist[ndx])
    # get matches
    matches = sift.match(q_descr,descr)
    ind = matches.nonzero()[0]
    ind2 = matches[ind]
    tp = homography.make_homog(locs[:,:2].T)
コード例 #43
0
from pylab import *
from PIL import Image

from PCV.localdescriptors import sift

"""
This is the twosided SIFT feature matching example from Section 2.2 (p 44).
"""

imname1 = '../data/climbing_1_small.jpg'
imname2 = '../data/climbing_2_small.jpg'

# process and save features to file
sift.process_image(imname1, 'climbing_1_small.sift')
sift.process_image(imname2, 'climbing_2_small.sift')

#sift.process_image(imname1, imname1+'.sift')
#sift.process_image(imname2, imname2+'.sift')

# read features and match
l1, d1 = sift.read_features_from_file('climbing_1_small.sift')
l2, d2 = sift.read_features_from_file('climbing_2_small.sift')
matchscores = sift.match_twosided(d1, d2)

# load images and plot
im1 = array(Image.open(imname1))
im2 = array(Image.open(imname2))

sift.plot_matches(im1, im2, l1, l2, matchscores, show_below=True)
show()
コード例 #44
0
def load(filename):
    sift_filename = process_image(filename)
    im = np.array(get_im(filename))
    locs, desc = sift.read_features_from_file(sift_filename)
    return im, locs, desc
コード例 #45
0
ファイル: testComplie.py プロジェクト: guchengxi1994/homework
import os
root = "D:\\homework\\homework\\house\\11\\"
"""
This is the panorama example from section 3.3.
"""

# set paths to data folder
featname = ['../data/wanren/uu' + str(i + 1) + '.sift' for i in range(5)]
imname = ['../data/wanren/uu' + str(i + 1) + '.jpg' for i in range(5)]

# extract features and match
l = {}
d = {}
for i in range(5):
    sift.process_image(root + imname[i], root + featname[i])
    l[i], d[i] = sift.read_features_from_file(featname[i])

matches = {}
for i in range(4):
    matches[i] = sift.match(d[i + 1], d[i])

# visualize the matches (Figure 3-11 in the book)
for i in range(4):
    im1 = array(Image.open(imname[i]))
    im2 = array(Image.open(imname[i + 1]))
    figure()
    sift.plot_matches(im2, im1, l[i + 1], l[i], matches[i], show_below=True)


# function to convert the matches to hom. points
def convert_points(j):
コード例 #46
0
# -*- coding: utf-8 -*-
import os
from PCV.localdescriptors import sift, dsift
from pylab import  *
from PIL import Image

imlist=['../data/gesture/train/A-uniform01.ppm','../data/gesture/train/B-uniform01.ppm',
        '../data/gesture/train/C-uniform01.ppm','../data/gesture/train/Five-uniform01.ppm',
        '../data/gesture/train/Point-uniform01.ppm','../data/gesture/train/V-uniform01.ppm']

figure()
for i, im in enumerate(imlist):
    dsift.process_image_dsift(im,im[:-3]+'.dsift',90,40,True)
    l,d = sift.read_features_from_file(im[:-3]+'dsift')
    dirpath, filename=os.path.split(im)
    im = array(Image.open(im))
    #显示手势含义title
    titlename=filename[:-14]
    subplot(2,3,i+1)
    sift.plot_features(im,l,True)
    title(titlename)
show()