示例#1
0
def load_image(path):
    '''Method to load the image'''
    img = image.load_img(path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return np.asarray(x)
示例#2
0
 def Standard(path, size):
     img = image.load_img(path, target_size = (size, size))
     x = image.img_to_array(img)
     x /= 255.0
     x -= 0.5
     x *= 2.0
     return x
示例#3
0
 def Standard(path, size):
     img = image.load_img(path, target_size=(size, size))
     x = image.img_to_array(img)
     x /= 255.0
     x -= 0.5
     x *= 2.0
     return x
示例#4
0
 def Normalize(path, size=224, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
     img = image.load_img(path, target_size=(size, size))
     x = image.img_to_array(img)
     x /= 255.0
     for i in range(0, 3):
         x[..., i] -= mean[i]
         x[..., i] /= std[i]
     return x
示例#5
0
def read_and_prepare_images(img_paths, img_height=img_size, img_width=img_size):
    # image loading via load_image
    imgs = [load_img(img_path, target_size=(img_height, img_width)) for img_path in img_paths]
    # for storing image in 3-D tensors: img_to_array
    img_array = np.array([img_to_array(img) for img in imgs])
    # preprocess_input applies arithmetic on pixels so that their value is b/w -1 and 1
    output = preprocess_input(img_array)
    return output
示例#6
0
 def Normalize(path, size=224, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
     img = image.load_img(path, target_size=(size, size))
     x = image.img_to_array(img)
     x /= 255.0
     for i in range(0, 3):
         x[..., i] -= mean[i]
         x[..., i] /= std[i]
     return x
示例#7
0
 def ZeroCenter(path, size, BGRTranspose=False):
     img = image.load_img(path, target_size = (size, size))
     x = image.img_to_array(img)
     if BGRTranspose == True:
         x = x[..., ::-1]
     x[..., 0] -= 103.939
     x[..., 1] -= 116.779
     x[..., 2] -= 123.68
     return x
示例#8
0
 def ZeroCenter(path, size, BGRTranspose=False):
     img = image.load_img(path, target_size=(size, size))
     x = image.img_to_array(img)
     if BGRTranspose == True:
         x = x[..., ::-1]
     x[..., 0] -= 103.939
     x[..., 1] -= 116.779
     x[..., 2] -= 123.68
     return x
示例#9
0
 def Standard(path, size, BGRTranspose=False):
     img = image.load_img(path, target_size=(size, size))
     x = image.img_to_array(img)
     x /= 255.0
     x -= 0.5
     x *= 2.0
     if BGRTranspose == True:
         x = x[..., ::-1]
     return x
示例#10
0
 def Normalize(path, size=224, scale=0.0392156863 ,mean=[-0.485, -0.456, -0.406], std=[0.229, 0.224, 0.225], BGRTranspose = False):
     img = image.load_img(path, target_size=(size, size))
     x = image.img_to_array(img)
     x *= scale
     for i in range(0, 3):
         x[..., i] += mean[i]
         x[..., i] /= std[i]
     if BGRTranspose == True:
         x = x[..., ::-1]
     return x
示例#11
0
def preprocess(img_path):
    # Preprocessing
    img = load_img(img_path)
    img = img_to_array(img)
    #Tensorflow backend dimensions
    img = imresize(img, (img_h, img_w, 3))
    img = img.astype('float64')
    #Batch dimension
    img = np.expand_dims(img, axis=0)
    img = vgg19.preprocess_input(img)
    return img
def preprocess(img_path):
    # Preprocessing to make the style transfer
    # possible
    img = load_img(img_path)
    img = img_to_array(img)
    # This dimensions are for Tensorflow backend
    img = imresize(img, (img_h, img_w, 3))
    img = img.astype('float64')
    # Add the batch dimension
    img = np.expand_dims(img, axis=0)
    img = vgg19.preprocess_input(img)
    return img
示例#13
0
    def ZeroCenter(path, size, BGRTranspose=False):
        img = image.load_img(path, target_size=(size, size))
        x = image.img_to_array(img)

        # Reference: 1) Keras image preprocess: https://github.com/keras-team/keras/blob/master/keras/applications/imagenet_utils.py
        #            2) tensorflow github issue: https://github.com/tensorflow/models/issues/517
        # R-G-B for Imagenet === [123.68, 116.78, 103.94]

        x[..., 0] -= 123.68
        x[..., 1] -= 116.779
        x[..., 2] -= 103.939

        if BGRTranspose == True:
            x = x[..., ::-1]

        return x
示例#14
0
def __data_label__(path):
    datalist = []
    labellist = []
    for i in range(0, 7500):
        imgname = trainpath + str('%04d' % i) + ".jpg"
        f = open(trainpath + str('%04d' % i) + ".pts", "r")
        new_label = []
        for line in f.readlines():
            a = line.replace("\n", "")
            b = a.split(",")
            new_label.append(b[0])
            new_label.append(b[1])
        labellist.append(new_label)
        image = load_img(trainpath + str('%04d' % i) + ".jpg")
        datalist.append(np.array(img_to_array(image).tolist()))
    img_data = np.array(datalist)
    img_data /= 255
    label = np.array(labellist)
    print(img_data)
    return img_data, label
示例#15
0
from tensorflow.contrib.keras.api.keras.preprocessing import image
import tensorflow as tf

assert tf.__version__ == "1.8.0"
tf.set_random_seed(20180130)

img_path = 'sample.jpg'

import numpy as np

x = image.load_img(img_path, target_size=(250, 250))

x = image.img_to_array(x)
x_expended = np.expand_dims(x, axis=0)
x_expended_trans = np.transpose(x_expended, [0, 3, 1, 2])

X = tf.placeholder(tf.float32, [None, 250, 250, 3])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(sess.run(X, feed_dict={X: x_expended_trans}))
示例#16
0
                         validation_steps=2000 / batch_size,
                         workers=12,
                         max_q_size=100,
                         callbacks=[history])

# Serialize Model
ModelSerializer.serialize_model_json(classifier, 'loss_history',
                                     'loss_history_weights')

# Predict single cases
test_image_1 = image.load_img('dataset/single_prediction/cat_or_dog_1.jpg',
                              target_size=input_size)
test_image_2 = image.load_img('dataset/single_prediction/cat_or_dog_2.jpg',
                              target_size=input_size)

test_image_1 = image.img_to_array(test_image_1)
test_image_2 = image.img_to_array(test_image_2)

# adding a 4th dimension for predict method
# this dimension corresponds to the batch, cannot accept single inputs
# only batch of size single or greater inputs
test_image_1 = np.expand_dims(test_image_1, axis=0)
test_image_2 = np.expand_dims(test_image_2, axis=0)

predict_1 = classifier.predict(test_image_1)
predict_2 = classifier.predict(test_image_2)

# to understand output of 0 or 1, cats are 0, dogs are 1
training_set.class_indices

if predict_1[0][0] == 1:
示例#17
0
import os

script_dir = os.path.dirname(__file__)
# Load pre-trained model
model_backup_path = os.path.join(script_dir, '../dataset/cat_or_dogs_model.h5')
test_set_path = os.path.join(script_dir, '../dataset/single_prediction')

classifier = load_model(model_backup_path)

input_size = (128, 128)

test_images_path = [
    test_set_path + '/' + filename for filename in os.listdir(test_set_path)
]
test_images = np.array([
    image.img_to_array(image.load_img(test_image_name, target_size=input_size))
    for test_image_name in test_images_path
])

# No need to rescale the images here... why?

predictions = classifier.predict(test_images)

for prediction, image_path in zip(predictions, test_images_path):
    if prediction == 1:
        prediction = 'dog'
    else:
        prediction = 'cat'
    print("Predicted {} for file {}".format(prediction,
                                            image_path.split("/")[-1]))
示例#18
0
        self.losses = losses

        return loss


if __name__ == "__main__":
    import sys
    from tensorflow.contrib.keras.api.keras.preprocessing import (
            image)

    s = tf.Session()

    img_path = sys.argv[1]
    img = image.load_img(img_path, target_size=(128, 128))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = x / 255.0 * 2.0 - 1.0
    print(x.shape, np.min(x), np.max(x))
    x = tf.constant(x)

    feature_layers = [
            "input_1", "block1_conv1", "block1_conv2", "block1_pool", "block2_conv2",
            "block3_conv2", "block4_conv2", "block5_conv2"]
    vgg19 = VGG19Features(feature_layers)
    fmaps = vgg19.make_feature_ops(x)

    for i in range(len(fmaps)):
        print(i)
        f = fmaps[i].eval(session=s)
        print(f.shape)
示例#19
0
 def Identity(path, size, BGRTranspose=False):
     img = image.load_img(path, target_size = (size, size))
     x = image.img_to_array(img)
     if BGRTranspose == True:
         x = x[..., ::-1]
     return x
示例#20
0
 def Identity(path, size, BGRTranspose=False):
     img = image.load_img(path, target_size=(size, size))
     x = image.img_to_array(img)
     if BGRTranspose == True:
         x = x[..., ::-1]
     return x
示例#21
0
 def generate_example(path, group):
     img_path = ['{}_group{}_index{}.jpg'.format(path, group, idx) for idx in range(time_step)]
     imgs = [image.img_to_array(image.load_img(x)) for x in img_path]
     return imgs