예제 #1
0
def get_VGGunet_bnd_v3(input_shape=(416, 416, 3),
                       dropout_val=0.1,
                       batch_norm=False,
                       n_filters=32,
                       classes=2):
    vgg_model = vgg16.VGG16(include_top=False,
                            weights='imagenet',
                            input_tensor=None,
                            input_shape=input_shape,
                            pooling=None,
                            classes=1)
    for l in vgg_model.layers:
        l.trainable = False

    conv1 = vgg_model.get_layer("block1_conv2").output
    conv2 = vgg_model.get_layer("block2_conv2").output
    conv3 = vgg_model.get_layer("block3_conv3").output
    conv4 = vgg_model.get_layer("block4_conv3").output
    conv5 = vgg_model.get_layer("block5_conv3").output

    up1 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=-1)
    conv6 = double_conv_layer(up1, n_filters * 8 * 3, dropout_val, batch_norm)

    up2 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=-1)
    conv7 = double_conv_layer(up2, n_filters * 8 * 2, dropout_val, batch_norm)

    up3 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=-1)
    conv8 = double_conv_layer(up3, n_filters * 8 * 1, dropout_val, batch_norm)

    up4 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=-1)
    conv9 = double_conv_layer(up4, n_filters * 4, 0, batch_norm)

    out = Conv2D(classes, (1, 1), activation='sigmoid', name='output')(conv9)

    model = Model(input=vgg_model.input, output=out)

    return model
예제 #2
0
def visualize_class_activation_map(img_path, output_path):
  model = vgg16.VGG16(weights='imagenet')
  from keras.preprocessing import image
  img = image.load_img(img_path, target_size=(224, 224))
  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)   # of size (1, 224, 224, 3)
  x = preprocess_input(x)
  preds = model.predict(np.random.rand(1,224,224,3))

  african_elephant_output = model.output[:, np.argmax(preds[0])]
 
  last_conv_layer = model.get_layer('block5_conv3')
  grads = K.gradients(african_elephant_output, last_conv_layer.output)[0]
  
  pooled_grads = K.mean(grads, axis=(0, 1, 2))
 
  
  iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])
  pooled_grads_value, conv_layer_output_value = iterate([x])
  print(pooled_grads_value.shape)
  print(conv_layer_output_value.shape)
  for i in range(512):
    conv_layer_output_value[:, :, i] *= pooled_grads_value[i]

  heatmap = np.mean(conv_layer_output_value, axis=-1)
  heatmap = np.maximum(heatmap, 0)
  heatmap /= np.max(heatmap)
  import matplotlib.pyplot as plt
  plt.imshow(heatmap)
  plt.show()

  img = cv2.imread(img_path)
  heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
  heatmap = np.uint8(255 * heatmap)
  heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
  superimposed_img = heatmap * 0.4 + img
  cv2.imwrite(output_path, superimposed_img)
예제 #3
0
def defineModel():
    """
    1. load VGG16 model without top
    2. freeze model except last 4 layers
    3. add dense layers
    """
    # load the VGG model
    vgg_conv = vgg16.VGG16(weights='imagenet',
                           include_top=False,
                           input_shape=(224, 224, 3))

    # # freeze the layers except the last 4 layers
    # for layer in vgg_conv.layers[:-4]:
    #    layer.trainable = False

    # # check the trainable status of the individual layers
    # for layer in vgg_conv.layers:
    #     print(layer, layer.trainable)

    # create the model
    model = models.Sequential()

    # add the vgg convolutional base model
    model.add(vgg_conv)

    # add new layers
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dropout(0.2))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dropout(0.2))
    model.add(layers.Dense(1, activation='sigmoid'))

    # show a summary of the model. Check the number of trainable parameters
    model.summary()

    return model
예제 #4
0
def create_model(feature_extraction_method, path_cnn_pre_trained, input_size):

    if (feature_extraction_method == 'pretrained_lenet'):
        model = load_model(path_cnn_pre_trained)
        input_image = input_size
    elif (feature_extraction_method == 'pretrained_vgg16'):
        model = vgg16.VGG16(weights='imagenet', include_top=True)
        #layer_name = 'fc2'
        input_image = 224
    elif (feature_extraction_method == 'pretrained_vgg19'):
        model = vgg19.VGG19(weights='imagenet', include_top=True)
        #layer_name = 'fc2'
        input_image = 224
    elif (feature_extraction_method == 'pretrained_xception'):
        model = xception.Xception(weights='imagenet', include_top=True)
        #layer_name = 'avg_pool'
        input_image = 299
    elif (feature_extraction_method == 'pretrained_resnet'):
        model = resnet.ResNet50(weights='imagenet', include_top=True)
        #layer_name = 'avg_pool'
        input_image = 224
    elif (feature_extraction_method == 'pretrained_inception_resnet'):
        model = inception_resnet.InceptionResNetV2(weights='imagenet',
                                                   include_top=True)
        #layer_name = 'avg_pool'
        input_image = 299
    elif (feature_extraction_method == 'pretrained_nasnet'):
        model = nasnet.NASNetLarge(weights='imagenet', include_top=True)
        #layer_name = 'global_average_pooling2d_1'
        input_image = 331

    intermediate_layer_model = Model(inputs=model.input,
                                     outputs=model.layers[-2].output)

    model.summary()

    return intermediate_layer_model, input_image
예제 #5
0
    def load_pie_intent(self,
                        model_path='data/pie/intention/context_loc_pretrained'
                        ):
        # Load PIE model
        with open(os.path.join(model_path, 'configs.pkl'), 'rb') as fid:
            try:
                configs = pickle.load(fid)
            except:
                configs = pickle.load(fid, encoding='bytes')
        self.train_params = configs[1]
        self.load_model_config(configs[0])

        try:
            test_model = load_model(os.path.join(model_path, 'model.h5'))
        except:
            test_model = self.get_model(train_params['model'])
            test_model.load_weights(os.path.join(model_path, 'model.h5'))
        #test_model.summary()
        self.pie_model = test_model

        # Create context model
        self.context_model = vgg16.VGG16(input_shape=(224, 224, 3),
                                         include_top=False,
                                         weights='imagenet')
예제 #6
0
    def _build_architecture(self, input_shape):
        original_vgg16 = vgg16.VGG16(
            weights=self.cache_m.fileLocation('vgg16_weights_notop.h5'),
            include_top=False,
            input_shape=input_shape)

        #Freeze initial layers, except for the last 3:
        #for layer in original_vgg16.layers[:-2]:
        #    layer.trainable = False

        model = Sequential()
        model.add(original_vgg16)
        model.add(
            Convolution2D(4096, (7, 7),
                          strides=1,
                          padding='valid',
                          kernel_initializer='he_normal'))
        model.add(Activation('relu'))
        model.add(Dropout(0.75))
        model.add(
            Convolution2D(4096, (1, 1),
                          strides=1,
                          padding='valid',
                          kernel_initializer='he_normal'))
        model.add(Activation('relu'))
        model.add(Dropout(0.75))
        model.add(
            Convolution2D(self._ds.nclasses, (1, 1),
                          strides=1,
                          padding='valid',
                          kernel_initializer='he_normal'))
        model.add(Flatten())
        model.add(Dense(self._ds.nclasses))
        model.add(Activation('softmax'))

        return model
예제 #7
0
    def getBaseModelInstance(self, modelChoose, weights_name):
        if (modelChoose == BaseModel.NA):
            return None, "Invalid model chose"
        if (weights_name == ""):
            weights_name = None
        if modelChoose == BaseModel.Resnet50:
            resnet50Model = resnet50.ResNet50(weights=weights_name,
                                              include_top=False,
                                              pooling='max',
                                              input_shape=(224, 224, 3))
            resnet50Model.summary()
            return resnet50Model
        else:
            vgg16Model = vgg16.VGG16(weights=None,
                                     include_top=False,
                                     pooling=None,
                                     input_shape=(224, 224, 3))
            vgg16Model.summary()
            model = Sequential()
            for layer in vgg16Model.layers[:-1]:
                model.add(layer)

            model.summary()
            return model
예제 #8
0
    def get_train_val_data(self, data, data_type, seq_length, overlap):
        tracks, images, bboxes, ped_ids = self.get_tracks(
            data, data_type, seq_length, overlap)

        encoder_input = self.concat_data(tracks,
                                         data_type['encoder_input_type'])
        decoder_input = self.concat_data(tracks,
                                         data_type['decoder_input_type'])
        output = self.concat_data(tracks, data_type['output_type'])

        if len(decoder_input) == 0:
            decoder_input = np.zeros(shape=np.array(bboxes).shape)
        self.context_model = vgg16.VGG16(input_shape=(224, 224, 3),
                                         include_top=False,
                                         weights='imagenet')

        return {
            'images': images,
            'bboxes': bboxes,
            'ped_ids': ped_ids,
            'encoder_input': encoder_input,
            'decoder_input': decoder_input,
            'output': output
        }
예제 #9
0
def vg16(input_shape, Layer_Trainable=[]):

    vgg = vgg16.VGG16(include_top=False,
                      weights='imagenet',
                      input_shape=Layer_Trainable)

    output = vgg.layers[-1].output
    output = keras.layers.Flatten()(output)
    vgg_model = Model(vgg.input, output)

    set_trainable = False
    for layer in vgg_model.layers:
        if layer.name in true:
            set_trainable = True
        if set_trainable:
            layer.trainable = True
        else:
            layer.trainable = False

    layers = [(layer, layer.name, layer.trainable)
              for layer in vgg_model.layers]
    y = pd.DataFrame(layers,
                     columns=['Layer Type', 'Layer Name', 'Layer Trainable'])
    return (y)
예제 #10
0
 def __init__(self, model='vgg16'):
     self.name = model
     if self.name == 'vgg16':
         self.model = vgg16.VGG16(weights='imagenet',
                                  include_top=False,
                                  input_shape=(256, 256, 3))
     elif self.name == 'resnet50':
         self.model = resnet50.ResNet50(weights='imagenet',
                                        include_top=False,
                                        input_shape=(256, 256, 3))
     elif self.name == 'inception_v3':
         self.model = inception_v3.InceptionV3(weights='imagenet',
                                               include_top=False,
                                               input_shape=(256, 256, 3))
     else:
         self.model = mobilenet.MobileNet(weights='imagenet',
                                          include_top=False,
                                          input_shape=(256, 256, 3))
     self.list_path_normal = listings()[0] + listings()[
         1]  #train and after validation
     with open("C:/Users/RLOCAL/Desktop/DataChallenge/youtput.txt",
               "rb") as fp:  # Unpickling
         self.labels = pickle.load(fp)
     self.mode = 'tf'
예제 #11
0
def getCnnDescriptors(ficheros, output_folder, cnn_network='vgg16'):
    descriptors_dict = {}
    if cnn_network == 'vgg16':
        from keras.applications import vgg16
        from keras.models import Model
        from keras.applications.vgg16 import preprocess_input
        from keras.preprocessing import image

        model = vgg16.VGG16(weights='imagenet', include_top=True)
        model = Model(inputs=model.input,
                      outputs=model.get_layer('fc1').output)

    DeleteFiles(output_folder)

    for fichero in tqdm(ficheros):
        img = image.load_img(fichero, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        descriptor = model.predict(x)
        id = os.path.basename(fichero).split('.')[0] + '.npy'
        name = os.path.join(output_folder + '\\' + id)
        np.save(name, descriptor)
    return 'SUCEED'
예제 #12
0
def visualize_class_activation_map( img_path, output_path):
        model = vgg16.VGG16(weights='imagenet')
        original_img = cv2.imread(img_path, 1)
        original_img= cv2.resize(original_img,(224,224))
        import matplotlib.pyplot as plt
       
        print("original_img shape:",original_img.shape)
        width, height, _ = original_img.shape

        #Reshape to the network input shape (3, w, h).
        # img = np.array([np.transpose(np.float32(original_img), (2, 0, 1))])
        img = np.array([original_img])
        print("IMG  shape:",img.shape)
        #Get the 512 input weights to the softmax.
        class_weights = model.layers[-1].get_weights()[0]
        print("class_weights",class_weights.shape)
        final_conv_layer = get_output_layer(model, "block5_conv3")
        get_output = K.function([model.layers[0].input], [final_conv_layer.output])
        [conv_outputs] = get_output([img])
        print(conv_outputs.shape)
        conv_outputs = conv_outputs[0, :, :, :]
        print(conv_outputs.shape)
        print(class_weights.shape)
        #Create the class activation map.
        cam = np.zeros(dtype = np.float32, shape = conv_outputs.shape[0:2])
        for i, w in enumerate(class_weights[:512,2]):
                cam += w * conv_outputs[ :, :,i]
        # print("predictions", predictions)
        cam /= np.max(cam)
        plt.imshow(cam)
        plt.show()
        cam = cv2.resize(cam, (height, width))
        heatmap = cv2.applyColorMap(np.uint8(255*cam), cv2.COLORMAP_JET)
        heatmap[np.where(cam < 0.2)] = 0
        img = heatmap*0.5 + original_img
        cv2.imwrite(output_path, img)
예제 #13
0
import numpy as np
from keras.preprocessing import image
from keras.applications import vgg16

model = vgg16.VGG16()

img = image.load_img('aero.jpg', target_size=(224, 224, 3))

x = image.img_to_array(img)

x = np.expand_dims(x, axis=0)

x = vgg16.preprocess_input(x)

predict = model.predict(x)

predited_classes = vgg16.decode_predictions(predict, top=4)

print('Predictions for this image')

for imagenet_id, name, likelihood in predited_classes[0]:
    print('Predictions: {} - {:2f}'.format(name, likelihood))
예제 #14
0
def train_vgg16(train_data_path, test_data_path, num_classes):
    print(num_classes)
    # Load train set
    data = action_image_dataloader(train_data_path)
    train_inputs, train_labels = data.get_data()
    print(train_inputs.shape)
    print(train_labels.shape)
    # Load test set
    test_data = action_image_dataloader(test_data_path, mode='Test')
    test_inputs, test_labels = test_data.get_data()
    print(test_inputs.shape)
    print(test_labels.shape)
    # Create ImageDataGenerator object for data-augmentation
    datagen = ImageDataGenerator(rotation_range=40,
                                 width_shift_range=0.2,
                                 height_shift_range=0.2,
                                 shear_range=0.2,
                                 zoom_range=0.2,
                                 horizontal_flip=True,
                                 fill_mode='nearest')
    # Create PlotLosses object for plotting training loss
    plot_losses = PlotLosses()
    # Specify batch-size
    batch_size = 100
    # Specifiy number of epochs
    num_epochs = 50
    # Create data-generator
    generator = datagen.flow(train_inputs, train_labels, batch_size=batch_size)
    # Load base-model VGG16
    base_model = vgg16.VGG16(weights='imagenet', include_top=False)
    # Define the final layers of the model
    x = base_model.output
    x = Dropout(rate=0.5)(x)
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(num_classes, activation='softmax')(x)
    # Create object for the new model
    model = Model(inputs=base_model.input, outputs=predictions)
    # Freeze all the layers of the base-model
    # Only the newly defined final layers will be trainable
    for layer in base_model.layers:
        layer.trainable = False
    # Compile the model
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    # Train the final layers of the model
    model.fit(x=train_inputs,
              y=train_labels,
              batch_size=batch_size,
              epochs=num_epochs,
              verbose=1,
              shuffle=True,
              validation_data=(test_inputs, test_labels),
              callbacks=[plot_losses] +
              callbacks('face_classifier_mdl_best_1.h5'))
    # model.fit_generator(generator, epochs=num_epochs) #, verbose=1, shuffle=True, validation_data=(test_inputs, test_labels)), callbacks=[plot_losses]+callbacks('face_classifier_mdl_best_1.h5'))
    # Save the model as a json file
    model_json = model.to_json()
    with open("face_classifier_model.json", "w") as json_file:
        json_file.write(model_json)
    # Function call for fine-tuning a previous layer of the model
    fine_tune_vgg16(generator, train_inputs, train_labels, test_inputs,
                    test_labels, num_epochs, batch_size, plot_losses)
@author: ramah
"""

# -*- coding: utf-8 -*-
"""
Validation accuracy:   64 %
Created on Wed Jan 23 01:56:10 2019

@author: ramah
"""
from keras.applications import vgg16
from keras.applications.vgg16 import preprocess_input, decode_predictions
from keras.callbacks import ModelCheckpoint

conv_base = vgg16.VGG16(weights = 'imagenet', include_top=False, input_shape=(224, 224, 3))

import numpy as np
import os
from keras.preprocessing.image import ImageDataGenerator
base_dir ='..../DME_NORMAL'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir,'validation')
test_dir = os.path.join(base_dir, 'test')

from keras import models
from keras import layers

x = conv_base.output
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation='relu')(x)
예제 #16
0
import keras
import numpy as np
from keras.applications import vgg16
from keras.applications.imagenet_utils import decode_predictions
from keras.preprocessing.image import load_img, img_to_array
import matplotlib.pyplot as plt
import os

__BASE_DIR__ = os.path.dirname(os.path.realpath(__file__))

vgg = vgg16.VGG16(weights='imagenet')

file = __BASE_DIR__ + '/src/puppy.jpg'
org = load_img(file, target_size=(224, 224))
image = img_to_array(org)

# plt.imshow(np.uint8(image))
# plt.show()

x = np.expand_dims(image, axis=0)
x = vgg16.preprocess_input(x)

pred = vgg.predict(x)

label = decode_predictions(pred)

# print(label)

"""
[[('n02085936', 'Maltese_dog', 0.7064417),
('n02098286', 'West_Highland_white_terrier', 0.1227724),
예제 #17
0
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.imagenet_utils import decode_predictions


# classify image with model
def predict_image(filename, model):
    original = load_img(filename, target_size=(224, 224))
    numpy_image = img_to_array(original)
    image_batch = np.expand_dims(numpy_image, axis=0)
    processed_image = vgg16.preprocess_input(image_batch.copy())
    return decode_predictions(vgg_model.predict(processed_image))[0][0][1]


# load vgg model
vgg_model = vgg16.VGG16(weights='imagenet')

# flower classes
flowers = ['daisy', 'pot']

# bird classes
birds = [
    'goldfinch',
    'European_gallinule',
    'brambling',
    'peacock',
    'indigo_bunting',
    'lorikeet',
    'bulbul',
    'great_grey_owl',
    'hen',
예제 #18
0
x = Flatten(name='flatten')(x)
for i in range(cfg.fclayers):
    x = Dense(cfg.fclayersize, activation='relu',
              kernel_regularizer=l1_l2(cfg.l1, cfg.l2))(x)
x = Dense(len(obj_classes), activation='softmax', name='predictions')(x)

inputs = img_input
model = Model(inputs, x, name='vgg16')
model.compile(loss='categorical_crossentropy',
              optimizer=optimizer, metrics=['accuracy'])
model.summary()
#%% Transfer weights
from keras.applications import vgg16
import keras.layers.convolutional
vgg16model = vgg16.VGG16(include_top=False)
modelconv = [l for l in model.layers if type(
    l) == keras.layers.convolutional.Conv2D]
vgg16conv = [l for l in vgg16model.layers if type(
    l) == keras.layers.convolutional.Conv2D]

for i, l in enumerate(modelconv):
    if i > cfg.xferlearning:
        continue  # Transfer only first n layers
    print('**** Transferring layer %d: %s from VGG ****' % (i, l))
    weights = vgg16conv[i].get_weights()
    modelconv[i].set_weights(weights)
    if cfg.freeze_conv:
        l.trainable = False
#%% Visualization code
예제 #19
0
#!/usr/bin/env python

# -*- coding: utf-8 -*-

from keras.applications import vgg16

img_rows, img_cols = 224, 224

vgg16 = vgg16.VGG16(weights='imagenet',
                    include_top=False,
                    input_shape=(img_rows, img_cols, 3))

for layer in vgg16.layers:
    layer.trainable = False

for (i, layer) in enumerate(vgg16.layers):
    print(str(i) + " " + layer.__class__.__name__, layer.trainable)


def head(bottom_model, num_classes):
    """creates the top or head of the model that will be 
    placed ontop of the bottom layers"""

    top_model = bottom_model.output
    top_model = GlobalAveragePooling2D()(top_model)
    top_model = Dense(1024, activation='relu')(top_model)
    top_model = Dense(1024, activation='relu')(top_model)
    top_model = Dense(512, activation='relu')(top_model)
    top_model = Dense(num_classes, activation='softmax')(top_model)
    return top_model
예제 #20
0
def eval_loss_and_grads(x):
    x = x.reshape((1,) + img_size)
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

img_size = (img_height, img_width, 3)
# this will contain the generated image
dream = Input(batch_shape=(1,)+img_size)

# load the vgg16 model with pretrained weights
model = vgg16.VGG16(input_tensor=dream, weights='imagenet', include_top=False)
print('Model loaded.')

# get the symbolic output of each "key" layer
layer_dict = dict([(layer.name, layer) for layer in model.layers])

# define the loss
loss = K.variable(0.)

for layer_name in settings['features']:
    # add the L2 norm of the features of a layer to the loss
    assert layer_name in layer_dict.keys(), 'Layer ' + layer_name + ' not found in model.'
    coeff = settings['features'][layer_name]
    x = layer_dict[layer_name].output
    shape = layer_dict[layer_name].output_shape
    # avoid border artifacts by only involving non-border pixels in the loss
예제 #21
0
#path_test = '../input/test/'
trainG = vggGenerator(train_ids, train_path, pred_size=4, batch_size=256)
testG = vggGenerator(test_ids, train_path, pred_size=4, batch_size=32)
save_str = 'vgg_model-tgs-salt-var_.1_left_right-' + '-acc-{val_competitionMetric2:.2f}-epoch-{epoch:02d}.h5'
callbacks = [
    #EarlyStopping(patience=10, verbose=1), #was patience=3 for LRNon
    ReduceLROnPlateau(patience=5, verbose=1),  #was patience=3
    ModelCheckpoint(save_str,
                    verbose=1,
                    save_best_only=False,
                    period=10,
                    save_weights_only=True,
                    monitor='val_competitionMetric2')
]

model = vgg16.VGG16(False, input_shape=(48, 48, 3))
model = Sequential(model.layers)
for layer in model.layers:
    layer.trainable = False
model.add(Flatten())
model.add(Dense(4048, activation='relu'))
#model.add(Activation('softmax'))
model.add(Dense(1024, activation='relu'))
#model.add(Activation('softmax'))
model.add(Dense(1024, activation='relu'))
#model.add(Activation('relu'))
model.add(Dense(512, activation='relu'))
#model.add(Activation('softmax'))
model.add(Dense(16))
model.add(Reshape((4, 4)))
model.add(Activation('relu'))
예제 #22
0
from keras.applications import vgg16

output_pre = "models/vgg16"

model = vgg16.VGG16(include_top=True, weights='imagenet')
with open(output_pre + '.json', 'w') as jsonf:
    jsonf.write(model.to_json())
model.save_weights(output_pre + ".h5")
예제 #23
0
 def run(self):
     import keras.applications.resnet50 as resnet50
     import keras.applications.vgg16 as vgg16
     resnet50.ResNet50(weights='imagenet')
     vgg16.VGG16(weights='imagenet', include_top=False)
     develop.run(self)
예제 #24
0
def main():
    WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
    WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'

    parser = myArgumentParser(description='Run a prediction experiment using pretrained VGG16, specified on the deepstreet DataSet.',
            fromfile_prefix_chars='@')
    parser.add_argument('--gpu', type=int, default=0, help='GPU Device (default: %(default)s)')
    parser.add_argument('--output_dir', type=str, default="./experiment_output/",help='Output directory')
    parser.add_argument('--input_dir', type=str, default="./",help='Input directory')
    parser.add_argument('--debug', type=bool, default=False, help='Debug mode')

    args = parser.parse_args()
    GPU = args.gpu
    OUTDIR = args.output_dir+"/"
    INDIR = args.input_dir+"/"
    DEBUG = args.debug

    if not os.path.exists(OUTDIR):
        os.makedirs(OUTDIR)


    if DEBUG:
        validation_data_dir = INDIR + "small_dataset/val/"
    else:
        #validation_data_dir = "dataset/val/"
        validation_data_dir = INDIR + "val/"

    if os.path.exists(INDIR + validation_data_dir + ".DS_Store"):
        os.remove(INDIR + validation_data_dir + ".DS_Store")

    #set dimensions of the images
    img_rows, img_cols = 224, 224

    if K.image_data_format() == 'channels_first':
        shape_ord = (3, img_rows, img_cols)
    else:  # channel_last
        shape_ord = (img_rows, img_cols, 3)

    vgg16_model = vgg16.VGG16(weights=None, include_top=False, input_tensor=Input(shape_ord))
    vgg16_model.summary()

    #add last fully-connected layers
    x = Flatten(input_shape=vgg16_model.output.shape)(vgg16_model.output)
    x = Dense(4096, activation='relu', name='ft_fc1')(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    predictions = Dense(43, activation='softmax')(x)

    model = Model(inputs=vgg16_model.input, outputs=predictions)

    #compile the model
    model.compile(optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
                loss='categorical_crossentropy', metrics=['accuracy'])


    #load validation images and create labels list
    validation_filenames = os.listdir(validation_data_dir)
    validation_filenames.sort()
    validation_images = []
    validation_labels = []

    for name in validation_filenames:
        if name.endswith(".ppm"):
            validation_images.append(validation_data_dir + name)
            label = name.split("_")[0]
            label_int = int(label)
            labels_array = [0]*43
            labels_array[label_int] = 1
            validation_labels.append(labels_array)
        else:
            validation_filenames.remove(name)

    print("Validation Filenames loaded.")


    validation = np.array(load_im2(validation_images, img_cols, img_rows))
    print("Validation images loaded.")

    model.load_weights("experiment_output/vgg16_deepstreet_training1.h5")

    predicted_labels = model.predict(validation)
    print("Labels predicted.")

    #write summary file
    prediction_summary = open(OUTDIR + "vgg16_deepstreet_t_prediction_summary_deepstreet_v.txt", "w")
    prediction_summary.write("\t".join(['FILENAME', 'REAL_LABEL', 'PREDICTED_LABELS']) + '\n')

    predicted_labels_linear = []
    validation_labels_linear = []

    #make linear labels list
    for lbl in validation_labels:
        for i,val in enumerate(lbl):
            if val == 1:
                validation_labels_linear.append(i)


    for i in range(len(predicted_labels)):
        cls_prob = predicted_labels[i]     #percentage of belonging for i image

        predicted_label_index = np.argmax(cls_prob) #get the index of the class with higher probability
        line = [validation_images[i], str(validation_labels_linear[i]), str(predicted_label_index), str(round(cls_prob[predicted_label_index],3))]

        s = ""
        for i in range(42):
            s += "{}:{}; ".format(i,round(cls_prob[i],3))
            #s += str(i) + ":" + str(round(cls_prob[i],3)) + "; "
        s += "42:{}".format(round(cls_prob[42],3))
        #s += "42:" + str(round(cls_prob[42],3))

        line.append(s)

        predicted_labels_linear.append(np.argmax(cls_prob))
        prediction_summary.write(";".join(line) + "\n")
        prediction_summary.flush()


    validation_labels_linear = np.array(validation_labels_linear)
    predicted_labels_linear = np.array(predicted_labels_linear)

    #calculate MCC
    MCC = multimcc(validation_labels_linear, predicted_labels_linear)
    print(MCC)

    prediction_summary.write("MCC = {}".format(MCC))
    prediction_summary.flush()
    prediction_summary.close()

    #compute confusion matrix and save the image
    conf_matrix = confusion_matrix(validation_labels_linear,predicted_labels_linear)[0]
    plt.matshow(conf_matrix)
    plt.colorbar()
    plt.savefig("confusion_matrix.png")

    end = timer()
    print("Total time: ", end - start)
예제 #25
0
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_data_format() == 'channels_first':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x


# build the VGG16 network with ImageNet weights
model = vgg16.VGG16(weights='imagenet', include_top=False)
print('Model loaded.')

model.summary()

# this is the placeholder for the input images
input_img = model.input

# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])


def normalize(x):
    # utility function to normalize a tensor by its L2 norm
    return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
예제 #26
0
def play_annotated_video():
    def on_click(event, x, y, flags, param):

        if event != cv2.EVENT_LBUTTONUP:
            return

        rval, frame = vc.read()
        if rval == False:
            return

        frame = resize_frame(frame)

        key_points = [(kp.pt[1], kp.pt[0]) for kp in sift.detect(frame, None)]

        loc = (y - stride / 2.0, x - stride / 2.0)
        key_points = list(
            filter(lambda p: is_in_rad_grid_loc(p, loc), key_points))

        if len(key_points) == 0:
            print("no keypoints there")
            return

        random.shuffle(key_points)

        windows = np.empty((len(key_points), window_size, window_size, 3))

        for i in range(len(key_points)):
            windows[i] = extract_window(frame, key_points[i])

        print("windows.shape", windows.shape)

        # extract cnn features from windows

        windows = preprocess_input(windows)
        feats = model.predict(windows)[:, 3, 3, :]

        ids, distances = memory_graph.knn_query(feats, k=1)

        observation_id = None

        print("distances.shape", distances.shape)

        for i in range(distances.shape[0]):
            print("distances[i][0]", distances[i][0])
            if distances[i][0] < 0.1:
                random_keypoint = key_points[i]
                observation_id = ids[i][0]
                print("distances[i][0]", distances[i][0])
                break

        if observation_id is None:
            print("no close observation found")
            return

        counts, node_ids = memory_graph.random_walk(observation_id, 100, 1000)

        n = 0
        for i in range(len(counts)):
            count = counts[i]
            if count < 200:
                break
            n += 1

        nodes = memory_graph.get_observations(node_ids[:n])

        for i in range(n):
            node = nodes[i]
            x = node["x"]
            y = node["y"]
            t = node["t"]
            cv2.circle(frame, (int(round(x)), int(round(y))), 3, colors[0],
                       cv2.FILLED)

        cv2.circle(
            frame,
            (int(round(random_keypoint[1])), int(round(random_keypoint[0]))),
            7, colors[2], cv2.FILLED)

        cv2.imshow("preview", frame)

        key = cv2.waitKey(0)

        # if not vc.isOpened():
        #     vc.release()
        #     cv2.destroyWindow("preview")

    # Video
    cv2.namedWindow("preview")
    cv2.setMouseCallback("preview", on_click)

    vc = cv2.VideoCapture(video_file)

    # CNN
    model = vgg16.VGG16(weights="imagenet",
                        include_top=False,
                        input_shape=(224, 224, 3))

    # initialize SIFT
    sift = cv2.xfeatures2d.SIFT_create()

    memory_graph = MemoryGraph(graph_path=graph_file,
                               index_path=index_file,
                               space='cosine',
                               dim=512)

    while vc.isOpened():
        rval, frame = vc.read()
        cv2.imshow("preview", frame)
        key = cv2.waitKey(0)
예제 #27
0
    content_image = K.zeros(shape=shape)

images = K.concatenate([style_image, target_image, content_image], axis=0)

# Create tensor variables for masks
raw_style_mask, raw_target_mask = load_mask_labels()
style_mask = K.variable(raw_style_mask.astype("float32"))
target_mask = K.variable(raw_target_mask.astype("float32"))
masks = K.concatenate([style_mask, target_mask], axis=0)

# index constants for images and tasks variables
STYLE, TARGET, CONTENT = 0, 1, 2

# Build image model, mask model and use layer outputs as features
# image model as VGG19
image_model = vgg16.VGG16(include_top=False, input_tensor=images)

# mask model as a series of pooling
mask_input = Input(tensor=masks, shape=(None, None, None), name="mask_input")
x = mask_input
for layer in image_model.layers[1:]:
    name = 'mask_%s' % layer.name
    if 'conv' in layer.name:
        x = AveragePooling2D((3, 3),
                             strides=(1, 1),
                             name=name,
                             border_mode="same")(x)
    elif 'pool' in layer.name:
        x = AveragePooling2D((2, 2), name=name)(x)
mask_model = Model(mask_input, x)
예제 #28
0
def build_graph():

    print("Starting...")

    # initialize SIFT
    sift = cv2.xfeatures2d.SIFT_create()

    # initialize VGG16
    model = vgg16.VGG16(weights="imagenet",
                        include_top=False,
                        input_shape=(32, 32, 3))

    print(model.summary())

    memory_graph = MemoryGraph(space='cosine', dim=512)
    memory_graph_walker = MemoryGraphWalker(memory_graph,
                                            distance_threshold=0.15,
                                            identical_distance=0.015)

    total_frame_count = 0

    # for each run though the video
    for r in range(runs):

        print("Run", r)

        # open video file for a run though
        cap = cv2.VideoCapture(video_file)

        # select a random starting position
        pos = [None for _ in range(walker_count)]

        done = False

        # for each frame
        for t in range(max_frames):
            if done:
                break

            ret, frame = cap.read()

            if ret == False:
                done = True
                break

            frame = resize_frame(frame)

            for i in range(walker_count):
                if pos[i] is None:
                    pos[i] = (frame.shape[0] * random.random(),
                              frame.shape[1] * random.random())

            key_points = [(kp.pt[1], kp.pt[0])
                          for kp in sift.detect(frame, None)]

            for i in range(walker_count):
                pos[i] = next_pos(key_points, pos[i], frame.shape)

            windows = extract_windows(frame, pos)

            # extract cnn features from windows
            preprocess_input(windows)
            feats = model.predict(windows)
            print("feats.shape", feats.shape)

            ids = memory_graph_walker.add_parrelell_observations(t, pos, feats)

            if save_windows:
                for i in range(walker_count):
                    cv2.imwrite('./output/testing' + str(ids[i]) + '.jpg',
                                windows[i])

            total_frame_count += 1

        cap.release()
        cv2.destroyAllWindows()

    memory_graph.save_graph(graph_file)
    memory_graph.save_index(index_file)

    print("Done")
예제 #29
0
def initialize_neural_network():
    model = vgg16.VGG16(include_top=True, weights='imagenet')
    model.layers.pop()
    model.layers.pop()
    model.outputs = [model.layers[-1].output]
    return model
    labels.append(0)

# load dog
for img in dog_path.glob("*.png"):
    img = image.load_img(img)
    image_array = image.img_to_array(img)
    images.append(image_array)
    # expected value should be 1
    labels.append(1)

# all the training images we load
x_train = np.array(images)
# convert labels to array
y_train = np.array(labels)

# use vgg16 model pretrained on the image net dataset.
# Normalize image data
x_train = vgg16.preprocess_input(x_train)

# Load a pre-trained NN (on imagenet) to use as a feature extractor
# so, chop off the last layer w/ include_top
pretrained_nn = vgg16.VGG16(weights='imagenet', include_top=False, \
                            input_shape=(64, 64, 3))

# Extract features for each image
features_x = pretrained_nn.predict(x_train)
# Save extracted features to file
joblib.dump(features_x, "x_train.dat")
# Save the matching array of expected values to a file
joblib.dump(y_train, "y_train.dat")