コード例 #1
0
def prepare_img(img_path, model_name='InceptionV3'):
    img = image.load_img(img_path, target_size=target_sizes[model_name])
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    if model_name == 'InceptionV3':
        return inception_v3_preprocess(x)
    if model_name == 'ResNet50':
        return resnet50_preprocess(x)
    if model_name == 'VGG16':
        return vgg16_preprocess(x)
コード例 #2
0
def preprocess_vgg16(img_path):
    """
    Loads image and applies preprocessing based on VGG16-models and returns 4D-array.

    Parameters
    ----------
    img_path: str
        pth to image

    Returns
    -------
    np.array
    """
    return vgg16_preprocess(path_to_tensor(img_path, scale=False))
コード例 #3
0
def get_image_value(path, dim, bw, model_type):
    '''This function will read an image and convert to a specified version and resize depending on which algorithm is being used.  If edge is specified as true, it will pass the img array to get_edged which returns a filtered version of the img'''
    img = image.load_img(path, target_size=dim)
    img = image.img_to_array(img)
    if bw == True:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        if model_type.upper() != 'Normal':
            img = np.stack((img, ) * 3, axis=-1)
        else:
            img = img.reshape(img.shape[0], img.shape[1], 1)

    if model_type.upper() == 'MOBILENET':
        img = mobile_preprocess(img)
        return img
    elif model_type.upper() == 'VGG16':
        img = vgg16_preprocess(img)
        return img
    return img / 255
movie_list = [subdir for subdir in os.listdir(jpg_dir)]

vgg16_model = VGG16(weights='imagenet')
vgg16_layer = Model(inputs=vgg16_model.input, outputs=vgg16_model.get_layer('fc2').output)
inceptionv3_model = InceptionV3(weights='imagenet')
inceptionv3_layer = Model(inputs=inceptionv3_model.input, outputs=inceptionv3_model.get_layer('avg_pool').output)
resnet50_model = ResNet50(weights='imagenet')
resnet50_layer = Model(inputs=resnet50_model.input, outputs=resnet50_model.get_layer('avg_pool').output)

for movie in movie_list:
    clip_list = [clip for clip in os.listdir(os.path.join(jpg_dir, movie))]
    for clip in clip_list:
        print "Processing %s" % clip
        jpg_list = [jpg for jpg in os.listdir(os.path.join(jpg_dir, movie, clip))]
        vgg16_list = []
        inceptionv3_list = []
        resnet50_list = []
        for jpg in sorted(jpg_list):
            jpg_path = os.path.join(jpg_dir, movie, clip, jpg)
            keras_image_224, keras_image_299 = kerasImage.load_img(jpg_path,target_size=(224, 224)), kerasImage.load_img(jpg_path, target_size=(299, 299))
            keras_image_arr_224, keras_image_arr_299 = kerasImage.img_to_array(keras_image_224), kerasImage.img_to_array(keras_image_299)
            keras_image_arr_224, keras_image_arr_299 = np.expand_dims(keras_image_arr_224, axis=0), np.expand_dims(keras_image_arr_299, axis=0)

            vgg16_list.append(vgg16_layer.predict(vgg16_preprocess(keras_image_arr_224)))
            inceptionv3_list.append(inceptionv3_layer.predict(inceptionv3_preprocess(keras_image_arr_299)))
            resnet50_list.append(resnet50_layer.predict(resnet50_preprocess(keras_image_arr_224)))
        np.save(os.path.join(jpg_dir, movie, clip+"_vgg16.npy"), np.array(vgg16_list))
        np.save(os.path.join(jpg_dir, movie, clip+"_inceptionv3.npy"), np.array(inceptionv3_list))
        np.save(os.path.join(jpg_dir, movie, clip+"_resnet50.npy"), np.array(resnet50_list))
コード例 #5
0
from VGG16FeatureGenerator import VGG16FeatureGenerator
from keras.applications.vgg16 import VGG16, preprocess_input as vgg16_preprocess
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np

if __name__ == '__main__':
    img_path = sys.argv[1]
    if not img_path:
        exit(1)

    model = VGG16(weights='imagenet', include_top=False)

    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = vgg16_preprocess(x)

    features = model.predict(x)

    print features.shape
    features = np.squeeze(features, axis=0)
    print features
    print features.shape
    print features[:, :, -1]
    print features[:, :, -1].shape
    plt.imshow(features[:, :, -1])


コード例 #6
0
def extract_VGG16(tensor) -> np.ndarray: return VGG16(**args_NN).predict(vgg16_preprocess(tensor))
def extract_VGG19(tensor) -> np.ndarray: return VGG19(**args_NN).predict(vgg19_preprocess(tensor))