コード例 #1
0
def inverse_pressure_images():
    """
    Invert the values of the pressure images to benerate binary masks
    """
    root = "../data/framed"
    names = imt.get_imlist(root,ext ='.png')
    for name in names:
        if "pressure" in name:
#             print name.split('_')[-5]
            im = cv2.cvtColor(cv2.imread(name), cv2.COLOR_RGB2GRAY)
            mask = np.zeros(im.shape, dtype=im.dtype)
            cv2.imshow('input', im)
            crop,_ = imt.cropMask(im, None, th=128)
            mask = np.zeros(crop.shape, dtype=crop.dtype)
            mask[crop>0]=1
            mask[mask!=0]=0
            mask = np.invert(mask)
            #diff[diff!=1]=0
            #diff =diff*255
            crop = cv2.medianBlur(crop,3)
            
            output = mask*crop
            output = frameImage(output)
#             var = mask
#             print "mask: ",type(var), var.dtype, len(np.unique(var)), var.min(), var.max()
#             cv2.imshow('mask', mask*255)
            cv2.imwrite(name,output)

#             var = output
#             print "output: ",type(var), var.dtype, len(np.unique(var)), var.min(), var.max()
            cv2.imshow('output', output)
            
            cv2.waitKey(5)
コード例 #2
0
def resize_rotate_and_frame():
    print "Rotate and Frame Images"
    srcpp = "../data/dataset"
    dstpp = "../data/framed"
    imnames = imt.get_imlist(srcpp, ext = ".png")
    s=320
    i=0
    for imname in imnames:
        filename = imname.split('\\')[-1]
        ## === OPERATORS ===
        if 'dept' in imname:
            im = cv2.imread(imname, -1)
        else:
            im = cv2.imread(imname)

        b = imt.shrinkIm(im, s)
        
        #rotate the image:
        if 'right' in imname:
            b = np.fliplr(cv2.transpose(b))            
        elif 'head' in imname:
            b = cv2.flip(b, -1)
        #frame the image
        a = imt.frameImage(b)

        dst = ("/").join([dstpp,filename])
        print i, dst, b.shape
        i+=1
        cv2.imshow('rotated', a)            
        cv2.waitKey(5)
コード例 #3
0
def replicate_and_rename_pressure_images():
    
    mode = "pressure"
    pp = "../data/preprocess"
    
    actors = ["carlos", "chris", "victor", "lakshman", "norma"]
        
    poses  = ["bgn", "soldierU", "fallerU", "soldierD", "fallerD",
                     "logR", "yearnerR", "fetalR", "logL", "yearnerL", "fetalL"]
    views  = ["bottom"]#, "right", "head"]    
    
    
    B = {'ideal': ["medium","dark", "blanket"],
         'pillow': ["blanketpillow"]
        }
    
    for actor in actors[:1]:
        for srcscene in B.keys():
            for dstscene in B[srcscene]:
                for view in views:
                    for pose in poses [1:2]:
                        srcpath = ("/").join([pp,actor,mode,srcscene,view, pose])
                        print "path: ", srcpath
                        imlist = imt.get_imlist(srcpath, ext='.png')
                        if len(imlist)>0:
                            for src in imlist:
                                dst = src.replace(srcscene, dstscene)
                                print dst
                                shutil.copy(src,  dst)
コード例 #4
0
def adjust_dataset_names():
    """
    Use this script to adjust the names of the imaegs in teh dataset. 
    old format: <actor>_<pose>_<scene>_<view>_<modality>_<idx>.png
    ** change** : <scene> :: <light>_<condition>
    new format: <actor>_<pose>_<light>_<condition>_<view>_<modality>_<idx>.png
    """
    
    root = "../data/framed"
    imnames = imt.get_imlist(root,ext ='.png')
    for src in imnames:
        dst= src
        if "ideal" in src:
            dst = src.replace("ideal", "bright_clear")
        elif "medium" in src:
            dst = src.replace("medium", "medium_clear")
        elif "dark" in src:
            dst = src.replace("dark", "dark_clear")

        elif "blanket" in src:
            dst = src.replace("blanket", "medium_blanket")
        elif "pillow" in src: 
            dst = src.replace("pillow", "medium_pillow")
        elif "blanketpillow" in src:
            dst = src.replace("blanketpillow", "medium_blanketpillow")
            
#         if "carlos" in src:
#             print "Old name: {} \t || New adjusted name: {}".format(src, dst)
        print "Old name: {} \t || New adjusted name: {}".format(src, dst)            
        os.rename(src, dst)
コード例 #5
0
 def __init__(self, path, feature_type, vocab_size=800):
     self.path = path
     self.im_list = imtools.get_imlist(path)
     self.vocab_size = vocab_size
     self.feature_type = feature_type
     self.logger = logging.getLogger(__name__)
     self.load_model()
コード例 #6
0
ファイル: d_tfidf.py プロジェクト: klintan/styleio-training
 def __init__(self, path):
     self.path = path
     self.histograms = None
     self.im_list = imtools.get_imlist(path)
     self.all_scores=[]
     self.logger = logging.getLogger(__name__)
     self.load_histograms()
コード例 #7
0
 def __init__(self, path, feature_type, img_type='test', vocab_size=800):
     self.im_list = imtools.get_imlist(imgs_path)
     self.img_type = img_type
     self.path = path
     self.vocab_size = vocab_size
     self.feature_type = feature_type
     self.vocab_model = None
     self.logger = logging.getLogger(__name__)
コード例 #8
0
def add_tag_to_filename(tag = "_framed.png"):
    newpp = "../data/framed"
    imnames = imt.get_imlist(pp, ext = ".png")
    s=320
    i=0
    for src in imnames:
        dst = src.split('.')[0]
        dst = dst+tag
        os.rename(src, dst)
コード例 #9
0
 def __init__(self):
     self.config = yaml.safe_load(open("../config.yml"))['preprocessing']
     self.logger = logging.getLogger(__name__)
     self.path = self.config['path']
     self.experiment_name = self.config['experiment_name']
     self.im_list = imtools.get_imlist(self.config['path'])
     self.all_images_data = []
     self.size = self.config['img_size']
     self.datagen = ImageDataGenerator(featurewise_center=True,
                                       featurewise_std_normalization=False)
コード例 #10
0
def test4():
    imlist = imtools.get_imlist(data_path)

    averageim = imtools.compute_average(imlist)

    figure()

    imshow(averageim)

    show()
コード例 #11
0
def binarize_images_under_folder():
    root = "../data/framed"
    names = imt.get_imlist(root,ext ='.png')
    for name in names:
        if "binary" in name:
            im = cv2.imread(name)
            if len(np.unique(im)) > 2:
                print "image: {} had {} values".format(name, len(np.unique(im) ))
                im[im>0]=255
                cv2.imwrite(name,im)
コード例 #12
0
    def __init__(self, path, distance_metric='chisquared'):
        self.path = path
        self.histograms = None
        self.im_list = imtools.get_imlist(path)
        self.all_scores = []
        self.all_dist = []
        self.logger = logging.getLogger(__name__)
        self.load_histograms()
        self.load_tfidf()

        self.dist_function = dist.distance(distance_metric)
コード例 #13
0
 def __init__(self):
     self.model = VGG16(weights='imagenet', include_top=True)
     self.config = yaml.safe_load(
         open("../config.yml"))['deep_feature_extraction']
     self.logger = logging.getLogger(__name__)
     self.path = self.config['path']
     self.experiment_name = self.config['experiment_name']
     self.im_list = imtools.get_imlist(self.config['path'])
     self.all_images_data = []
     self.size = self.config['img_size']
     self.save_features = self.config['save_features']
     self.datagen = ImageDataGenerator(featurewise_center=True,
                                       featurewise_std_normalization=False)
コード例 #14
0
def load_data(path):
    """ 加载数据 """

    imlist = imtools.get_imlist(path)

    labels = [int(imfile.split('/')[-1][-5]) for imfile in imlist]

    # create features from the images
    features = []
    for imname in imlist:
        im = np.array(Image.open(imname).convert('L'))
        features.append(compute_feature(im))
    return np.array(features), labels
コード例 #15
0
ファイル: GetData.py プロジェクト: Wilbeibi/csdn_c
def Getdata():
    global fe_arr
    Set_dir = os.curdir + os.sep + "Set"
    for chr in os.listdir(Set_dir):
        img_list = imtools.get_imlist(Set_dir + os.sep + chr)
        ExtractFt(img_list)
        
        #fe_arr.save('f_data.npy')
        #label_arr.save('l_data.npy')
    fe_arr = fe_arr[1:]
    #print fe_arr.size
    #print label_arr.size   
    np.save('f_data.npy', fe_arr)
    np.save('l_data.npy', label_arr)
    return fe_arr,label_arr
コード例 #16
0
 def __init__(self):
     self.config = yaml.safe_load(open("../config.yml"))['html']
     self.path = self.config['path']
     self.histograms = None
     self.im_list = imtools.get_imlist(self.config['path'])
     self.all_scores=[]
     self.logger = logging.getLogger(__name__)
     self.html= None
     self.all_dist = []
     self.all_data = []
     #jinja 2
     template_dir = os.path.dirname(os.path.abspath(__file__))
     env = Environment(loader=FileSystemLoader(template_dir + "/templates"),trim_blocks=True, autoescape=select_autoescape(enabled_extensions=('html', 'xml'), default_for_string=True))
     self.template = env.get_template('annotate.html')
     self.load_comparison()
コード例 #17
0
ファイル: test.py プロジェクト: qianrenzhan/QTCTD
def load_data(path):
    """ 加载数据 """

    imlist = imtools.get_imlist(path)
    # imlist = [
    #     os.path.join(path, f) for f in os.listdir(path) if f.endswith('.bmp')
    # ]

    labels = [int(imfile.split('/')[-1][-5]) for imfile in imlist]

    # create features from the images
    features = []
    for imname in imlist:
        im = np.array(Image.open(imname).convert('L'))
        features.append(compute_feature(im))
    return np.array(features), labels
コード例 #18
0
ファイル: searchdemo.py プロジェクト: EJHortala/books-2
  def __init__(self):
    self.imlist = imtools.get_imlist(
        '/Users/thakis/Downloads/ukbench/first1000')[:100]
    self.ndx = range(len(self.imlist))

    with open('vocabulary.pkl', 'rb') as f:
      self.voc = pickle.load(f)

    self.maxresults = 15

    self.header = """\
<!doctype html>
<html>
<head><title>Image search example</title></head>
<body>"""

    self.footer = """\
コード例 #19
0
def generate_synthetic_dark_bright_depth_pressure_scenes():
    """
    Copy with new label (Illumination invariant) scenes
    """
    root = "../data/framed"
    modality = "depth" #"pressure"
    imnames = imt.get_imlist(root,ext ='.png')
    for src in imnames:
        if("medium" in src) and ("binary" in src) and not ("clear" in src):

            dst_bri = src.replace("medium", "bright")
            
            dst_drk = src.replace("medium", "dark")
            
#             med = cv2.cvtColor(med, cv2.COLOR_RGB2GRAY)
            print "NAMES medium:{} \t brigh:{} \t dark:{}".format(src, dst_bri, dst_drk)            
            shutil.copy(src,  dst_bri)
            shutil.copy(src,  dst_drk)
コード例 #20
0
def correct_rotation_of_right_view(dstpp="../data/framed"):
    print "Correct Rotation of Right View"
    imnames = imt.get_imlist(dstpp, ext = ".png")
    s=320
    i=0
    for imname in imnames:
        filename = imname.split('\\')[-1]
        if 'right' in filename: 
            ## === OPERATORS ===
            if 'dept' in imname:
                im = cv2.imread(imname, -1)
            else:
                im = cv2.imread(imname)           
            #rotate the image:
            b = np.fliplr(im)            

            print i, imname, b.shape
            i+=1
            cv2.imwrite(imname, b)
            cv2.imshow('rotated', b)
            cv2.waitKey(5)
コード例 #21
0
def resize_and_copy_images():
    """Resize all images and copy to new location """
    print "Resize and Copy Images"
    pp = "../data/mm_sleeping_poses_dataset"
    newpp = "../data/framed"
    imnames = imt.get_imlist(pp, ext = ".png")
        
    for imname in imnames:
        ## === OPERATORS ===
        ## copy all files to single location
        filename = imname.split('\\')[-1] #.split('.')[0]
            
            
        #filename = filename+'_resized.png'
        im = cv2.imread(imname, -1)

        
        dst = ("/").join([newpp,filename])
        cv2.imwrite(dst, r_im)
        
#         shutil.copy(imname,   dst)
        print "\n src:{} \n dst:{} \n name: {}".format(imname, dst,filename)
コード例 #22
0
ファイル: 0424.py プロジェクト: ta-oyama/PCV
import matplotlib.pyplot as plt
import imtools
import pca
import pickle
from scipy.ndimage import filters, measurements, morphology
import scipy.io
from scipy.misc import imsave
from numpy import random
import rof

# reload(imtools)


### 1.3.6 画像の主成分分析 ###
# pca.pyを作成する
imlist = imtools.get_imlist("./a_thumbs")  # 相対アドレスを使っている。右から開くということで

im = np.array(Image.open(imlist[0]))
# サイズを得るため画像を1つ開く
m, n = im.shape[0:2]  # 画像のサイズを得る
imnbr = len(imlist)  # 画像数を得る

# すべての平板化画像を格納する行列を作る
immatrix = np.array([np.array(Image.open(img)).flatten() for img in imlist], "f")

# 主成分分析を実行する
V, S, immean = pca.pca(immatrix)


### 1.3.7 pickleモジュール ###
# 平均と主成分を保存する
コード例 #23
0
ファイル: pca_ex.py プロジェクト: arcrin/Image_Recognition
from PIL import Image
from numpy import *
from pylab import *
from imtools import pca, get_imlist


imlist = get_imlist('../data/a_thumbs')
print(imlist)

im = array(Image.open(imlist[0]))
m, n = im.shape[0:2]
imnbr = len(imlist)

immatrix = array([array(Image.open(im)).flatten() for im in imlist], 'f')

V, S, immean = pca(immatrix)
figure()
gray()
subplot(2, 4, 1)
imshow(immean.reshape(m, n))
for i in range(7):
    subplot(2, 4, i+2)
    imshow(V[i].reshape(m, n))

show()
コード例 #24
0
ファイル: pca-view.py プロジェクト: zuqqhi2/samples
from PIL import Image
from numpy import *
import imtools
import pca
import os

imlist = imtools.get_imlist("data/a_thumbs")
im = array(Image.open(imlist[0]))
m,n = im.shape[0:2]
imnbr = len(imlist)

immatrix = array([array(Image.open(im)).flatten() for im in imlist], 'f')

V,S,immean = pca.pca(immatrix)

pil_im = Image.fromarray(uint8(immean.reshape(m,n)))
try:
    pil_im.save("./pca.jpg")
except IOError:
    print "cannot save", "pca.jpg"

for i in range(7):
    tmp = (V[i] - min(V[i])) / (max(V[i]) - min(V[i])) * 255
    pil_im = Image.fromarray(uint8(tmp.reshape(m,n)))
    
    print min(V[i]), max(V[i])
    print tmp.reshape(m,n)
    try:
        pil_im.save("./pca-" + str(i) + ".jpg")
    except IOError:
        print "cannnot save", "pca-" + str(i) + ".jpg"
コード例 #25
0
ファイル: ex_spectran.py プロジェクト: rayjim/python_proj
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 19 22:57:50 2014

@author: ray
"""

import imtools
from PIL import Image 
import pca
from numpy import *
close('all')
# generate data
imlist = imtools.get_imlist('../data/a_selected_thumbs/') 
imnbr = len(imlist)
immatrix = array([array(Image.open(im)).flatten() for im in imlist],'f')
V,S,immean = pca.pca(immatrix)
immean = immean.flatten()
projected = array([dot(V[0:40],immatrix[i]-immean) for i in range(imnbr)])

from scipy.cluster.vq import * 


n = len(projected)
# compute distance matrix
S = array([[ sqrt(sum((projected[i]-projected[j])**2)) for i in range(n) ] for j in range(n)], 'f')
# create Laplacian matrix
rowsum = sum(S,axis=0)
D = diag(1 / sqrt(rowsum))
I = identity(n)
L = I - dot(D,dot(S,D))
コード例 #26
0
# Using the Pickle module
# Saving results or data for later use.

# Pickle can take almost any Python object and convert it to a string representation, and back (unpickling)

from PIL import Image
from numpy import *
from pylab import *
import imtools
import pca
import pickle

imlist = imtools.get_imlist("data/fontimages/", "jpg")

im = array(Image.open(imlist[0]))  # open one image to get size
m, n = im.shape[0:2]  # get the size of the images
imnbr = len(imlist)  # get the number of images

# create matrix to store all flattened images
immatrix = array([array(Image.open(im)).flatten() for im in imlist], 'f')

# perform PCA
V, S, immean = pca.pca(immatrix)

# show some images (mean and 7 first modes)
figure()
gray()
subplot(2, 4, 1)
imshow(immean.reshape(m, n))
for i in range(7):
    subplot(2, 4, i + 2)
コード例 #27
0
ファイル: countzero.py プロジェクト: yaomingshr/DogvsCat
import imtools
import os

train_feat_path = '../trainfeature/'
test_feat_path = '../testfeature/'

trainlist = imtools.get_imlist(train_feat_path)
testlist = imtools.get_imlist(test_feat_path)

zcount = 0

for fp in trainlist:
    if os.path.getsize(fp) == 0:
        zcount = zcount + 1

for fp in testlist:
    if os.path.getsize(fp) == 0:
        zcount = zcount + 1

print zcount
コード例 #28
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys

sys.path.append('C:/workPyth/Mypython/cv')
from PIL import Image
from numpy import *
from pylab import *
import sift
import imtools
import os
from os.path import basename, splitext

im_path = os.getcwd()

imlist = imtools.get_imlist('nail_book')
featlist = []
for imname in imlist:
    name, ext = splitext(basename(imname))
    sname = name + '.sift'
    sift.process_image(imname, sname)
    featlist.append(sname)

nbr_images = len(imlist)

matchscores = zeros((nbr_images, nbr_images))

for i in xrange(nbr_images):
    for j in xrange(i, nbr_images):  # 上三角成分だけを計算する
        print 'comparing ', imlist[i], imlist[j]
コード例 #29
0
ファイル: 4_tfidf.py プロジェクト: sunshiding/py-visual-bow
import Image
import sys
sys.path.append('utils')
import imtools
import os
import pickle

from scipy.cluster.vq import *

f = open(sys.argv[1]+'_histograms.pickle', 'r')
histograms = pickle.load(f)
f.close()

path = sys.argv[1]

nbr_images = imtools.get_imlist(path)

#word, the index of 200 in the histogram
#im, the histogram of the specific image
#histograms, the list of histograms

#tf(word, histogram) computes "term frequency" which is the number of times a word appears in a image
def tf(word, im):
    return float(word) / sum(im)


#n_containing(word, histograms) returns the number of documents containing word.
def n_containing(wordidx, word, histograms):
    noOfContainingIms=[]
    count=0
    for idx,im in enumerate(histograms):
コード例 #30
0
from PIL import Image
import numpy as np
import pandas as pd
import posixpath
import matplotlib.pyplot as plt
import imtools

parsed_coord = open(posixpath.abspath('parsed_data/params_res.txt'))
parsed_coord = parsed_coord.readlines()

res = []
for line in parsed_coord:
    res.append(line.split())
df = pd.DataFrame(res)

image_names = imtools.get_imlist(posixpath.abspath('parsed_data/imgs'))

image = Image.open(image_names[2])
image_arr = np.array(image)

plt.figure(1)
plt.imshow(image_arr)
plt.plot(df[df['Filename'] == image_names[2]].CenX,
         df[df['Filename'] == image_names[2]].CenY, 'r*')
plt.show()
コード例 #31
0
ファイル: image_test1.py プロジェクト: Miura55/python_study
# -*- coding: utf-8 -*-
import imtools
import pickle
from scipy.cluster.vq import *

if __name__ == '__main__':
    # 画像のリスト取得
    imlist = imtools.get_imlist('/usr/local/git_local/python_study/oreilly_computer_vision/data/PIXLEE-computer-vision-clustering/data/a_selected_thumbs')
    imnbr = len(imlist)
    print imnbr

    # モデルのファイルを読み込む
    with open('')
コード例 #32
0
ファイル: ch07_query_hom.py プロジェクト: EJHortala/books-2
import cPickle as pickle

import homography
import imtools
import sift
import imagesearch
"""After ch07_buildindex.py has built an index in test.db, this program
queries it, and fits a homography to improve query results.
"""

imlist = imtools.get_imlist('/Users/thakis/Downloads/ukbench/first1000')[:100]
imcount = len(imlist)
featlist = [imlist[i][:-3] + 'sift' for i in range(imcount)]

with open('vocabulary.pkl', 'rb') as f:
    voc = pickle.load(f)

searcher = imagesearch.Searcher('test.db', voc)

query_imid = 50
res_count = 20

res = [w[1] for w in searcher.query(imlist[query_imid])[:res_count]]
print 'regular results for query %d:' % query_imid, res

# Rerank by trying to fit a homography.
q_locs, q_descr = sift.read_features_from_file(featlist[query_imid])
fp = homography.make_homog(q_locs[:, :2].T)

model = homography.RansacModel()
コード例 #33
0
ファイル: ch1_1.py プロジェクト: rayjim/python_proj
subplot(223)
imshow(im3,cmap=cm.Greys_r)
subplot(224)
imshow(im4,cmap=cm.Greys_r)
figure(4)
im5,cdf = imtools.histeq(pil_img)
subplot(221)
imshow(pil_img,cmap=cm.Greys_r)
subplot(222)
imshow(im5,cmap=cm.Greys_r)
subplot(223)
plot(cdf)
###########################################################
close("all")
# pca
imlist = imtools.get_imlist('gwb_cropped')
im = array(Image.open(imlist[0]))
m,n = im.shape[0:2]
imnbr = len(imlist)

immatrix = array([array(Image.open(im)).flatten() for im in imlist],'f')
# perform PCA
V,S,immean= imtools.pca(immatrix)


figure()

subplot(2,4,1)
imshow(immean.reshape(m,n))
for i in range(7):
    subplot(2,4,i+2)
コード例 #34
0
			value = myarray[x][k]
			if value == 0:
				row.append(0)
			else:
				row.append(value/value_max)
			
		new_array.append(row)
		row =[]
	new_array = asarray(new_array)

	return new_array



# get list of images
imlist = imtools.get_imlist('../img/img_test/') 
nbr_images = len(imlist)

con = sqlite.connect('test.db')

matchscores = []
row = []
a = 0

for i in range(nbr_images):
	filename1 = imlist[i]
	imid1 = con.execute("select rowid from imlist where filename='%s'" % filename1 ).fetchone()
	cand_h1 = con.execute("select histogram from imhistograms where rowid='%d'" % imid1).fetchone()
	cand_h1 = pickle.loads(str(cand_h1[0]))

	for j in range(nbr_images):
コード例 #35
0
#!/usr/bin/python
# -*- coding: utf-8 -*-

import imtools
imlist = sorted(imtools.get_imlist('first1000'))
#imlist = imlist[:100]  # for small memory

import pickle
import vocabulary

nbr_images = len(imlist)
featlist = [ imlist[i][:-3]+'sift' for i in range(nbr_images) ]

voc = vocabulary.Vocabulary('ukbenchtest')
voc.train(featlist,1000,10)

# ボキャブラリを保存する
with open('vocabulary.pkl', 'wb') as f:
  pickle.dump(voc,f)
print 'vocabulary is:', voc.name, voc.nbr_words
コード例 #36
0
ファイル: 0602.py プロジェクト: ta-oyama/PCV
centroids, variance = kmeans(features,2)

code, distance = vq(features,centroids)

plt.figure()
ndx = np.where(code==0)[0]
plt.plot(features[ndx,0],features[ndx,1],'*')
ndx = np.where(code==1)[0]
plt.plot(features[ndx,0],features[ndx,1],'r.')
plt.plot(centroids[:,0],centroids[:,1],'go')
plt.axis('off')

#6.1.2
# 画像のリストを得る
imlist = imtools.get_imlist('selected_fontimages/')
imnbr = len(imlist)

# モデルのファイルを読み込む
with open('font_pca_modes.pkl','rb') as f:
    immean = pickle.load(f)
    V = pickle.load(f)
    
# 平板化した画像を格納する行列を作る
immatrix = np.array([np.array(Image.open(im)).flatten() 
                        for im in imlist],'f')                        
# 第40主成分までを射影する
immean = immean.flatten()
projected = np.array([np.dot(V[:40],immatrix[i]-immean)
                        for i in range(imnbr)])
コード例 #37
0
ファイル: pcatest.py プロジェクト: hvva/imageClassifiers
from PIL import Image
from numpy import *
from pylab import *
import pca, imtools

imlist = imtools.get_imlist('corpus')

im = array(Image.open(imlist[0]))
m,n = im.shape[0:2]
imnbr = len(imlist)

for i in range(imnbr):
  tmpArr = [array(Image.open(imlist[i])).flatten()]

immatrix = array(tmpArr, 'f')

V,S,immean = pca.pca(immatrix)


コード例 #38
0
pil_im_contr = Image.open('empire.jpg').convert('L')

pil_im.show()
pil_im_contr.show()
#Save usage with jpg
pil_im_contr.save("empire_grayscale.jpg", "jpeg")

#Copy image
pil_im_cp = pil_im.copy()

#Thumbnail usage
pil_im_contr.thumbnail((128, 128))
pil_im_contr.show()

#List all jpg images in directory
l_images = get_imlist(os.getcwd())

#Crop the image
rect = (100, 100, 400, 400)
rect_region = pil_im.crop(rect)
rect_region.show()

#rotate image, paste it
rect_region = rect_region.transpose(Image.ROTATE_180)
rect_region.show()
pil_im.paste(rect_region, rect)
pil_im.show()

#Resize and rotate
pil_im_resized = pil_im_cp.resize((256, 256))
pil_im_resized.show("Resized")
コード例 #39
0
# coding: utf-8

import imtools

raw_name_list = imtools.get_imlist("../animals/") #使用する元画像の名前のリストを作る
#processed_name_list = ["../processed_pictures/" + raw_name for raw_name in range(193)] #加工後画像の名前のリストを作る
processed_name_list = []
for i in range(len(raw_name_list)):
    processed_name_list.append("../animals/" + str(i) + ".jpg")

print processed_name_list

for raw_picture_name, processed_picture_name in zip(raw_name_list, processed_name_list):
    print 'a'
    imtools.resize(raw_picture_name, processed_picture_name, 500, 500) #画像のサイズ変更
コード例 #40
0
ファイル: BackProject.py プロジェクト: shimaXX/workspace
            vis_roi = vis[y0:y1, x0:x1]
            cv2.bitwise_not(vis_roi, vis_roi) # 色の反転
            vis[mask == 0] = 0

            prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
            prob &= mask
            term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
            #track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
           
            if self.show_backproj:
                vis[:] = prob[...,np.newaxis]
            """
            try: cv2.ellipse(vis, track_box, (0, 0, 255), 2)
            except: print track_box
            """
               
            cv2.imshow('camshift', vis)

            ch = cv2.waitKey(5)
            #cv2.destroyAllWindows()
            if ch == 27:
                break
            if ch == ord('b'):
                self.show_backproj = not self.show_backproj


if __name__ == '__main__':
    import sys
    import imtools
    imlist = imtools.get_imlist('nail_book03')
    App(imlist).run()
コード例 #41
0
import imtools
import matplotlib.pyplot as plt
import matplotlib.image as mpimg

filelist = imtools.get_imlist('../stp_log/image/', 'jpg')
for fname in filelist:
    img=mpimg.imread(fname) #image to array
    plt.imshow(img) #array to 2Dfigure

    plt.show()
コード例 #42
0
# -*- coding: utf-8 -*-
import imagesearch
import imtools

imlist = sorted(imtools.get_imlist('nail_book03'))
from svm import *
from svmutil import *
import os
import cv2
import cv2.cv as cv
import sqlite3 as sqlite
import pickle
import numpy as np
import csv
import random
import math
from scipy.stats import norm
from copy import deepcopy

execfile('loaddata.py')
path = 'nail_book03'
train_csvfile = 'train_labels04.csv'
test_csvfile = 'test_labels04.csv'
csvfile = 'train_labels.csv'
HSV_fname = 'fHSV_hist.txt'
hsv_flag_fname = 'hsv_flag.csv'

src = imagesearch.Searcher('nail_image500.db', voc)


def main_boost():
コード例 #43
0
import hcluster
import imtools
from PIL import Image
from pylab import *

imlist = imtools.get_imlist('/Users/thakis/Downloads/data/flickr-sunsets-small')

# extract histogram as feature vector (8 bins per color channel)
features = zeros([len(imlist), 512])
for i, f in enumerate(imlist):
  im = array(Image.open(f))

  h, edges = histogramdd(im.reshape(-1, 3), 8, normed=True,
                         range=[(0,255), (0, 255), (0, 255)])
  features[i] = h.flatten()

tree = hcluster.hcluster(features)
hcluster.draw_dendrogram(tree, imlist, filename='out_sunset.png')

# visualize clusters
clusters = tree.extract_clusters(dist=0.23 * tree.distance)
for c in clusters:
  elements = c.get_cluster_elements()
  if len(elements) > 3:
    figure()
    for p in range(minimum(len(elements), 20)):
      subplot(4, 5, p + 1)
      im = array(Image.open(imlist[elements[p]]))
      imshow(im)
      axis('off')
show()
コード例 #44
0
from PIL import Image
# from PCV.localdescriptors import sift
# from PCV.tools import imtools
import sift
import imtools
import pydot
import os
""" This is the example graph illustration of matching images from Figure 2-10.
To download the images, see ch2_download_panoramio.py."""

parent_path = os.path.abspath(
    os.path.join(os.path.dirname("__file__"), os.path.pardir))
download_path = parent_path + '/data/panoimages/'  # set this to the path where you downloaded the panoramio images
path = parent_path + '/data/panoimages/thumbnails/'  # path to save thumbnails (pydot needs the full system path)
# list of downloaded filenames
imlist = imtools.get_imlist(download_path)
nbr_images = len(imlist)

# extract features
featlist = [imname[:-3] + 'sift' for imname in imlist]

matchscores = zeros((nbr_images, nbr_images))
for i in range(nbr_images):
    for j in range(i, nbr_images):  # only compute upper triangle
        # print('comparing ', imlist[i],imlist[j])
        # process and save features to file
        sift.process_image(imlist[i], featlist[i])
        l1, d1 = sift.read_features_from_file(featlist[i])
        sift.process_image(imlist[j], featlist[j])
        l2, d2 = sift.read_features_from_file(featlist[j])
        matches = sift.match_twosided(d1, d2)
コード例 #45
0
ファイル: ch07_query_hom.py プロジェクト: geosky/cvbook
import cPickle as pickle

import homography
import imtools
import sift
import imagesearch

"""After ch07_buildindex.py has built an index in test.db, this program
queries it, and fits a homography to improve query results.
"""

imlist = imtools.get_imlist("/Users/thakis/Downloads/ukbench/first1000")[:100]
imcount = len(imlist)
featlist = [imlist[i][:-3] + "sift" for i in range(imcount)]

with open("vocabulary.pkl", "rb") as f:
    voc = pickle.load(f)

searcher = imagesearch.Searcher("test.db", voc)

query_imid = 50
res_count = 20

res = [w[1] for w in searcher.query(imlist[query_imid])[:res_count]]
print "regular results for query %d:" % query_imid, res

# Rerank by trying to fit a homography.
q_locs, q_descr = sift.read_features_from_file(featlist[query_imid])
fp = homography.make_homog(q_locs[:, :2].T)

model = homography.RansacModel()
コード例 #46
0
import imtools
import pca
from PIL import Image
from pylab import *
from scipy.cluster.vq import *

# PCA on all images.
imlist = imtools.get_imlist('/Users/thakis/Downloads/data/a_thumbs')
imcount = len(imlist)
immatrix = array([array(Image.open(im)).flatten() for im in imlist], 'f')
V, S, immean = pca.pca(immatrix)

# Visualize only selected images.
imlist = imtools.get_imlist('/Users/thakis/Downloads/data/a_selected_thumbs')
imcount = len(imlist)
immatrix = array([array(Image.open(im)).flatten() for im in imlist], 'f')

# Project on 40 first PCs.
projected = array([dot(V[:40], immatrix[i] - immean) for i in range(imcount)])

# spectral clustering
n = len(projected)

# compute distance matrix
S = array([[sqrt(sum((projected[i] - projected[j])**2)) for i in range(n)]
           for j in range(n)], 'f')

# create Laplacian matrix
# (See "A Comparison of Spectral Clustering Algorithms", NJW algorithm
rowsum = sum(S, axis=0)
D = diag(1 / sqrt(rowsum))
コード例 #47
0
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 02 19:26:16 2016

@author: user
"""
import sys

sys.path.append('../ch1/')
import imtools
from numpy import *
from PIL import Image
import pickle
from scipy.cluster.vq import *

imlist = imtools.get_imlist(
    '../pcv_data/data/selectedfontimages/a_selected_thumbs/')
imnbr = len(imlist)
# load model file
with open('a_pca_modes.pkl', 'rb') as f:
    immean = pickle.load(f)
    V = pickle.load(f)
# create matrix to store all flattened images
immatrix = array([array(Image.open(im)).flatten() for im in imlist], 'f')
# project on the 40 first PCs
immean = immean.flatten()
projected = array([dot(V[[0, 1]], immatrix[i] - immean) for i in range(imnbr)])

n = len(projected)

# compute distance matrix
S = array([[sqrt(sum((projected[i] - projected[j])**2)) for i in range(n)]
import imtools
import pickle
from PIL import Image
from scipy.cluster.vq import *
from pylab import *

# 获取 selected-fontimages 文件下图像文件名,并保存在列表中
imlist = imtools.get_imlist('../data/fontimages/a_thumbs/')
imnbr = len(imlist)

# 载入模型文件
# with open('a_pca_modes.pkl','rb') as f:
with open('../data/fontimages/font_pca_modes.pkl', 'rb') as f:
    immean = pickle.load(f)
    V = pickle.load(f)

# 创建矩阵,存储所有拉成一组形式后的图像
immatrix = array([array(Image.open(im)).flatten() for im in imlist], 'f')

# 投影到前 40 个主成分上
immean = immean.flatten()
projected = array([dot(V[:40], immatrix[i] - immean) for i in range(imnbr)])

# 进行 k-means 聚类
projected = whiten(projected)
centroids, distortion = kmeans(projected, 4)

code, distance = vq(projected, centroids)

# 绘制聚类簇
for k in range(4):
コード例 #49
0
ファイル: kmeans.py プロジェクト: xiebinlin/image-process
from scipy.cluster.vq import *
import numpy as np
from pylab import *
from PIL import Image
import imtools,pca
import pickle

# 获取 selected-fontimages 文件下图像文件名,并保存在列表中
imlist = imtools.get_imlist('C:\\Users\\msi\\Desktop\\selectedfontimages\\a_selected_thumbs\\')

imnbr = len(imlist)
# 载入模型文件

# 创建矩阵,存储所有拉成一组形式后的图像
immatrix = np.array([np.array(Image.open(im)).flatten() for im in imlist],'f')
V, S, immean = pca.pca(immatrix)

# 投影到前 40 个主成分上
projected = np.array([np.dot(V[[0,1]],immatrix[i]-immean) for i in range(imnbr)])

# projected = whiten(projected)
# centroids,distortion = kmeans(projected,2)
# code,distance = vq(projected,centroids)

# for k in range(2):
    # ind = where(code==k)[0]
    # figure()
    # gray()
    # for i in range(minimum(len(ind),40)):
        # subplot(4,10,i+1)
        # imshow(immatrix[ind[i]].reshape((25,25)))
コード例 #50
0
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans


class vocabulary:
    def __init__(self, img_type, vocab_size=800, feature):
        self.img_type = img_type
        self.vocab_size = vocab_size
        self.feature = feature


if __name__ == '__main__':
    path = sys.argv[1]
    vocab_size = 800
    init_size = vocab_size * 3
    im_list = imtools.get_imlist(path)

    all_features = []
    for idx, image_name in enumerate(im_list):
        gray_img = rgb2gray(np.array(Image.open(image_name)))
        features = daisy(gray_img, step=8)
        all_features.append(features.reshape(-1, 200))

    X = np.vstack(all_features)

    km = MiniBatchKMeans(n_clusters=vocab_size, max_iter=200, verbose=1)
    km.fit(X)

    f = open(sys.argv[1] + '_vocab.pickle', 'w')
    pickle.dump(km, f)
    f.close()
コード例 #51
0
ファイル: grayscale.py プロジェクト: briceran/vision
from PIL import Image
import os
import imtools
import imtools

im_paths = imtools.get_imlist(os.getcwd())

im1 = Image.open(im_paths[0])
im2 = Image.open(im_paths[1])

#grayscale

im1_gs = im1.convert('L')
im2_gs = im2.convert('L')

for infile in im_paths:
    outfile = os.path.splitext(infile)[0] + ".jpg"
    if infile != outfile:
        try:
            Image.open(infile).save(outfile)
        except IOerror:
            print("can't convert", infile)
コード例 #52
0
# coding: utf-8

import imtools

raw_name_list = imtools.get_imlist("../raw_pictures/") #使用する元画像の名前のリストを作る
processed_name_list = ["../processed_pictures/" + raw_name.split('/')[2] for raw_name in raw_name_list] #加工後画像の名前のリストを作る

for raw_picture_name, processed_picture_name in zip(raw_name_list, processed_name_list):
    imtools.resize(raw_picture_name, processed_picture_name, 50, 50) #画像のサイズ変更
コード例 #53
0
    return num_flag


def draw_image(filename, wrfname):
    image_res = 200 * 200
    imagebit = read_target(filename, "camcap:camcap_1|data_in_R[7..0]")
    imagebit = hex_dec(imagebit)
    trigger = read_target(filename, "camcap:camcap_1|trig_0_R")
    start_flag = detect_trigger(trigger)
    if len(start_flag) == 0:
        print("can't find flag\nexit.........")
        exit(1)

    frame = imagebit[start_flag[0] + 1:start_flag[0] + image_res + 1]
    frame = np.array(frame)
    frame = frame.reshape((200, 200))
    print(frame)
    pilout = Image.fromarray(np.uint8(frame))
    pilout.save(wrfname)


# def cap_imageSequences(filename, wrfname):

if __name__ == '__main__':
    filelist = imtools.get_imlist('../stp_log/', 'csv')
    dir_path, fname = os.path.split(filelist[-1])
    name, ext = os.path.splitext(fname)
    wrfname = dir_path + "/image/" + name + ".jpg"
    print(wrfname)
    draw_image(filelist[-1], wrfname)
コード例 #54
0
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 31 16:19:28 2016

@author: user
"""

from PIL import Image
from pylab import *
from numpy import *
sys.path.append('../ch1/')
import imtools, pca

# Get list of images and their size
imlist = imtools.get_imlist(
    '../pcv_data/data/selectedfontimages/a_selected_thumbs/'
)  # fontimages.zip is part of the book data set
im = array(Image.open(imlist[0]))  # open one image to get the size
m, n = im.shape[:2]

# Create matrix to store all flattened images
immatrix = array([array(Image.open(imname)).flatten() for imname in imlist],
                 'f')

# Perform PCA
V, S, immean = pca.pca(immatrix)

# Show the images (mean and 7 first modes)
# This gives figure 1-8 (p15) in the book.
figure()
gray()
コード例 #55
0
def main():
    imlist = it.get_imlist(PATH)
    for img_path in imlist:
        original = preparing_img(img_path)
        detecting(original)
コード例 #56
0
#!/usr/bin/python
# -*- coding: utf-8 -*-

from pylab import *
import imtools

for dir in ('jkfaces2008_small', 'jkfaces2008_small/aligned'):
  imlist = imtools.get_imlist(dir)
  avgimg = imtools.compute_average(sorted(imlist)[:150])
  figure()
  imshow(avgimg)
  gray()
  axis('off')
  title(dir)

show()
コード例 #57
0
import pickle 
import vocabulary
import imtools
import sift


# get list of images
imlist = imtools.get_imlist('../img/sunsets/treino/') 
nbr_images = len(imlist)
featlist = [ imlist[i][:-3]+'sift' for i in range(nbr_images) ]

#create vocabulary
voc = vocabulary.Vocabulary('test')
voc.train(featlist,1000,10)

# saving vocabulary
with open('vocabulary.pkl', 'wb') as f:
  pickle.dump(voc,f)
print 'vocabulary is:', voc.name, voc.nbr_words
コード例 #58
0
ファイル: 0515.py プロジェクト: ta-oyama/PCV
import warp
#reload(sift)
reload(homography)
reload(imtools)

"""前回のプログラム"""
#3.2.3 
xmlFileName = 'jkfaces2008_small/jkfaces.xml'
points = imregistration.read_points_from_xml(xmlFileName)

#位置合わせする
imregistration.rigid_alignment(points,'jkfaces2008_small/')
"""   """

# 平均画像の比較
imlist = imtools.get_imlist('jkfaces2008_small/')
avgim = imtools.compute_average(imlist)
plt.subplot(1,2,1)
plt.imshow(avgim)
plt.axis('off')
imlist = imtools.get_imlist('jkfaces2008_small/aligned/')
avgim_aligned = imtools.compute_average(imlist)
plt.subplot(1,2,2)
plt.imshow(avgim_aligned)
plt.axis('off')

#マスクの作成
imlist = imtools.get_imlist('jkfaces2008_small/')
im = np.array(Image.open(imlist[0]))
m,n = im.shape[0:2]
    #im.shape[0:1]->(400L,)となり、カンマまでになってしまうので[0:2]までにした