Exemplo n.º 1
0
def getPhowFeatures(imagedata, phowOpts):
    im = standardizeImage(imagedata)
    frames, descrs = vl_phow(im,
                             verbose=phowOpts.Verbose,
                             sizes=phowOpts.Sizes,
                             step=phowOpts.Step)
    return frames, descrs
def getPhowFeatures(imagedata, phowOpts):
    im = standarizeImage(imagedata)
    frames, descrs = vl_phow(im,
                             verbose=phowOpts.Verbose,
                             sizes=phowOpts.Sizes,
                             step=phowOpts.Step)
    return frames, descrs
Exemplo n.º 3
0
def getImageDescriptor(model, im):
    import numpy as np
    from vl_phow import vl_phow
    im = standarizeImage(im, resize=200)
    width = im.shape[1]
    height = im.shape[0]
    numWords = model.vocab.shape[1]
    model.phowOpts['verbose'] = False

    # get PHOW features
    frames, descrs = vl_phow(im, **model.phowOpts)

    # quantize local descriptors into visual words
    if model.quantizer == 'kdtree':
        binsa = model.kdtree.query(descrs.T.astype(np.float32))[1]

    hists = []
    for i in range(len(model.numSpatialX)):
        binsx = np.digitize(
            frames[1], np.linspace(0, width, model.numSpatialX[i] + 1)) - 1
        binsy = np.digitize(
            frames[0], np.linspace(0, height, model.numSpatialY[i] + 1)) - 1

        # combined quantization
        bins = np.ravel_multi_index(
            (binsy, binsx, binsa),
            [model.numSpatialY[i], model.numSpatialX[i], numWords])
        hist = np.histogram(bins, len(bins))[0].astype(np.float32)
        hists.append(hist / np.sum(hist))
    hist = np.concatenate(hists, axis=0)
    hist = hist / np.sum(hist)
    return hist
Exemplo n.º 4
0
def getPhowFeatures(imagedata, phowOpts): #extracts features from image
	im = standardizeImage(imagedata) #scale image to 640x480
	frames, descrs = vl_phow(im,
							 verbose=phowOpts.Verbose,
							 sizes=phowOpts.Sizes,
							 step=phowOpts.Step)
	return frames, descrs
Exemplo n.º 5
0
#!/usr/bin/python
from vl_phow import vl_phow
from numpy import *
from scipy.io import savemat, loadmat

I = loadmat('lena.mat')['lena_gray']
I = asfortranarray(I, dtype=float32)

f, d = vl_phow(I, step=50, floatDescriptors=True, verbose=True)
savetxt('phow_f_python.txt', f.T, fmt='%.2f')
savetxt('phow_d_python.txt', d.T, fmt='%.2f')
Exemplo n.º 6
0
    model.b = []
    model.classify = classify

    # --------------------------------------------------------------------
    #                     Train vocabulary
    # --------------------------------------------------------------------

    if not os.path.isfile(conf.vocabPath):

        # Get some PHOW descriptors to train the dictionary
        selTrainFeats = random_subset(selTrain, 30)
        descrs = []
        for ii in range(len(selTrainFeats)):
            im = imageio.imread(images[selTrainFeats[ii]])
            im = standarizeImage(im)
            descrs.append(vl_phow(im, **model.phowOpts)[1])

        descrs = np.concatenate(descrs, axis=1).T
        descrs = random_subset(descrs.tolist(), int(40e4))
        descrs = np.array(descrs).T.astype(np.float32)

        # Quantize the descriptors to get the visual words
        vocab = kmeans(descrs.T.copy(order='C'),
                       conf.numWords,
                       verbose=True,
                       algorithm='ELKAN',
                       max_num_iterations=100)
        # Required .copy(order='C') because of cython
        vocab = vocab.T
        save(conf.vocabPath, vocab)
    else: