예제 #1
0
def generate_X_y(non_lens_glob, lens_glob) :
    '''Reads in data that will be features and targets, outputs as numpy array data'''
    non_lens_filenames = glob.glob(non_lens_glob)
    lens_filenames = glob.glob(lens_glob)
    filenames = non_lens_filenames + lens_filenames
    X = image_processing.load_images(filenames)
    y = [0] * len(non_lens_filenames) + [1] * len(lens_filenames)
    
    return X, y, filenames
예제 #2
0
def generate_X_y(non_lens_glob, lens_glob):
    '''Reads in data that will be features and targets, outputs as numpy array data'''
    non_lens_filenames = glob.glob(non_lens_glob)
    lens_filenames = glob.glob(lens_glob)
    filenames = non_lens_filenames + lens_filenames
    X = image_processing.load_images(filenames)
    y = [0] * len(non_lens_filenames) + [1] * len(lens_filenames)

    return X, y, filenames
예제 #3
0
def handle_query(directory, palette, canvas):
    """
    Parameters
        directory: folder containing images
        palette: maps color ids to functions
        canvas: list of color ids

    Return value
        list of strings (file names)
    """

    split_dim = 4

    from image_processing import load_images, sortImagesByMatchRevised

    imgs = load_images(directory)
    result = sortImagesByMatchRevised(imgs, canvas, split_dim, palette)

    # result_imgs = []
    # for filename in result:
    #     result_imgs.append(Image.open(os.path.join(directory, filename)))

    return result
예제 #4
0

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('non_lens_glob')
    parser.add_argument('lens_glob')

    args = vars(parser.parse_args())

    # Load the data. X is a list of numpy arrays
    # which are the images.
    non_lens_filenames = glob.glob(args['non_lens_glob'])
    lens_filenames = glob.glob(args['lens_glob'])
    filenames = non_lens_filenames + lens_filenames
    X = image_processing.load_images(filenames)
    y = [0] * len(non_lens_filenames) + [1] * len(lens_filenames)

    # Train/test split
    X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
    print "len(X_train) =", len(X_train)
    print "len(y_train) =", len(y_train)
    print "len(X_test) =", len(X_test)
    print "len(y_test) =", len(y_test)
    print

    # Create the pipeline which consists of image
    # processing and a classifier
    image_processors = [('median_smooth', image_processing.MedianSmooth(5)),
                        ('hog',
                         image_processing.HOG(orientations=8,
예제 #5
0

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('non_lens_glob')
    parser.add_argument('lens_glob')

    args = vars(parser.parse_args())
    
    # Load the data. X is a list of numpy arrays
    # which are the images.
    non_lens_filenames = glob.glob(args['non_lens_glob'])
    lens_filenames = glob.glob(args['lens_glob'])
    filenames = non_lens_filenames + lens_filenames
    X = image_processing.load_images(filenames)
    y = [0] * len(non_lens_filenames) + [1] * len(lens_filenames)

    # Train/test split
    X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.8)
    print "len(X_train) =", len(X_train)
    print "len(y_train) =", len(y_train)
    print "len(X_test) =", len(X_test)
    print "len(y_test) =", len(y_test)
    print

    # Create the pipeline which consists of image
    # processing and a classifier
    image_processors = [('median_smooth', image_processing.MedianSmooth(5)),
                        ('hog', image_processing.HOG(orientations = 8,
                                                     pixels_per_cell = (16, 16),
예제 #6
0
import time

import numpy as np
from keras.applications import vgg19
from keras.optimizers import SGD
from pycocotools.coco import COCO

from image_processing import load_images, categories, ann_file
from vgg import compute_nn_features
from text_processing import create_caption_dataframe
from word2vec import compute_textual_features

coco = COCO(ann_file)

X_visual, _, visual_img_ids = load_images(categories, coco=coco)
np.save('X_visual.npy', X_visual)

X_visual = np.load('X_visual.npy')
X_visual = X_visual[:X_visual.shape[0] // 2]

net = vgg19.VGG19()
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
net.compile(optimizer=sgd, loss='categorical_crossentropy')

from tqdm import tqdm

V = np.zeros((X_visual.shape[0], 4096))
for i in tqdm(range(X_visual.shape[0] // 10 + 1)):
    start_index = (i) * 10
    end_index = (i + 1) * 10