Ejemplo n.º 1
0
def create_my_model_without_dropout():
    #Get back the convolutional part of a VGG network trained on ImageNet
    img_input = Input(shape=(224,224,3), name = 'image_input')
    my_model = VGG16_Places365(weights= 'places', input_tensor = img_input, include_top = False)
    
    #Add the fully-connected layers 
    output_base_model = my_model.layers[-1].output
    x = Flatten(name='flatten')(output_base_model)
    
    x = BatchNormalization(name = 'bn_flatten')(x)
    x = Dense(64, activation='relu', name='fc1')(x)
    x = BatchNormalization(name='bn1')(x)
    x = Dense(64, activation='relu', name='fc2')(x)
    x = BatchNormalization(name='bn2')(x)
    x = Dense(64, activation='relu', name='fc3')(x)
    x = BatchNormalization(name='bn3')(x)
    
    fc_pose_utmx_utmy = Dense(2, name = 'fc_pose_utmx_utmy')(x)
    
    #Create your own model
    my_model = Model(input = img_input, output = fc_pose_utmx_utmy)
    
    for layer in my_model.layers:
        layer.trainable = False

    #In the summary, weights and layers from VGG part will be hidden, but they will be fit during the training
    my_model.summary()
    plot_model(my_model, to_file='model_test.png', show_shapes = True)
    return my_model
Ejemplo n.º 2
0
def vgg16_places365(nb_layers_removable=0):
    places = VGG16_Places365()
    # load vgg16 as sequential model
    # (weights were trained on rgb, requires 3 channels)
    model = vgg16_sequential(365, 3)
    # copy places weights to sequential model
    model.set_weights(places.get_weights())
    # remove specified layers
    for _ in range(nb_layers_removable):
        model.pop()

    return model
def pre_trained_model(index):
    """
    Modelos que foi realizado o fine tunning
    """
    if index == 1:
        return [
            "VGG16_Places365",
            VGG16_Places365(include_top=False,
                            weights='places',
                            input_shape=(224, 224, 3)), pi
        ]
    elif index == 0:
        return [
            "ResNet50",
            ResNet50(weights='imagenet', include_top=False),
            app.resnet50.preprocess_input
        ]
Ejemplo n.º 4
0
def create_model():
    # pretrained load model
    vgg16 = VGG16_Places365(weights_path=VGG_weights_path)
    model = Sequential()
    # add all layers except the last one that does classification
    for layer in vgg16.layers[:-1]:
        model.add(layer)

    #freeze all the layers before the last (5th) convolutional block
    for layer in model.layers:
        if "block5" in layer.name:
            break
        else:
            layer.trainable = False

    # add final dense prediction layer
    model.add(Dense(num_classes, activation='softmax', name="predictions"))
    model.summary()
    return model
Ejemplo n.º 5
0
def create_my_model():
    #Get back the convolutional part of a VGG network trained on ImageNet
    img_input = Input(shape=(224, 224, 3), name='image_input')
    my_model = VGG16_Places365(weights='places',
                               input_tensor=img_input,
                               include_top=False,
                               classes=classes)

    #fix vgg16 convnet weight as untrainable (1. initializer 2. feature extracter)

    for layer in my_model.layers:
        layer.trainable = False

    #Add the fully-connected layers
    output_base_model = my_model.layers[-1].output
    x = Flatten(name='flatten')(output_base_model)

    x = BatchNormalization(name='bn_flatten')(x)
    #x = Dropout(0.4, name='drop_flatten')(x)
    x = Dense(128,
              activation='relu',
              kernel_initializer='he_normal',
              name='fc1')(x)
    x = BatchNormalization(momentum=0.9, name='bn1')(x)
    #x = Dropout(0.4, name='drop_fc1')(x)
    x = Dense(128,
              activation='relu',
              kernel_initializer='he_normal',
              name='fc2')(x)
    x = BatchNormalization(momentum=0.9, name='bn2')(x)
    #x = Dropout(0.4, name='drop_fc2')(x)

    predicted_img_class = Dense(classes,
                                activation='softmax',
                                name='predictions')(x)

    #Create your own model
    my_model = Model(input=img_input, output=predicted_img_class)

    #In the summary, weights and layers from VGG part will be hidden, but they will be fit during the training
    my_model.summary()
    plot_model(my_model, to_file='model_train.png', show_shapes=True)
    return my_model
    def __init__(self):

        # started
        self.logger = logging.getLogger(
            'vsi_application.scenesetting.SceneSettingDetector')
        self.logger.info('__init__(): started')

        ### init
        self.model = VGG16_Places365(weights='places')
        file_name = 'categories_places365.txt'
        if not os.access(file_name, os.W_OK):
            synset_url = 'https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt'
            #os.system('wget ' + synset_url)
        classes = list()
        with open(file_name) as class_file:
            for line in class_file:
                classes.append(line.strip().split(' ')[0][3:])
        self.categories = tuple(classes)

        # finished
        self.logger.info('__init__(): finished')
def extractor(path_file):

    # Initializing the model
    model = VGG16_Places365(weights='places', include_top=True)
    intermediate_layer_model = Model(inputs=model.input,
                                     outputs=model.get_layer('fc2').output)

    # Counting the number of lines in path_file
    pb = progressbar.ProgressBar(count_lines(path_file))

    dir_to_save = abspath(path_file).split(basename(path_file))[0]
    fc2 = dir_to_save + splitext(basename(path_file))[0] + '_fc2.txt'
    softmax = dir_to_save + splitext(basename(path_file))[0] + '_softmax.txt'

    # Creating the files
    file_fc2 = open(fc2, 'w')
    file_softmax = open(softmax, 'w')

    # Logging...
    logging.info('Saving... ' + fc2)
    logging.info('Saving... ' + softmax)

    with open(path_file) as f:
        for line in f:

            img_path = line.split()[0]
            label = line.split()[1]
            image = Image.open(img_path)
            image = np.array(image, dtype=np.uint8)
            image = resize(image, (224, 224))
            image = np.expand_dims(image, 0)

            fc2_output = intermediate_layer_model.predict(image)
            softmax_output = model.predict(image)

            # Writing features into the files
            write_features(file_fc2, label, img_path, fc2_output)
            write_features(file_softmax, label, img_path, softmax_output)

            pb.update()
Ejemplo n.º 8
0
def locationDetect(image):
    image = np.array(image, dtype=np.uint8)
    image = resize(image, (224, 224))
    image = np.expand_dims(image, 0)

    model = VGG16_Places365(weights='places')
    predictions_to_return = 5
    preds = model.predict(image)[0]
    top_preds = np.argsort(preds)[::-1][0:predictions_to_return]

    # load the class label
    file_name = os.path.join(PLACES_PATH, 'categories_places365.txt')
    if not os.access(file_name, os.W_OK):
        synset_url = 'https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt'
        os.system('wget ' + synset_url)
    classes = list()
    with open(file_name) as class_file:
        for line in class_file:
            classes.append(line.strip().split(' ')[0][3:])
    classes = tuple(classes)

    # output the prediction
    return classes[top_preds[0]]
Ejemplo n.º 9
0
def NewPlaceModel(classes):
    base_model = VGG16_Places365(include_top=True,
                                 weights='places',
                                 input_tensor=None,
                                 input_shape=None,
                                 pooling=None,
                                 classes=365)

    for i in range(len(base_model.layers[:-6])):
        base_model.layers[i].trainable = False

    train_layer = Flatten(name='flatten')(base_model.layers[-7].output)
    train_layer = Dense(4096, activation='relu', name='fc1')(train_layer)
    train_layer = Dropout(0.5, name='drop_fc1')(train_layer)

    train_layer = Dense(2048, activation='relu', name='fc2')(train_layer)
    train_layer = Dropout(0.5, name='drop_fc2')(train_layer)

    train_layer = Dense(classes, activation='softmax',
                        name="predictions")(train_layer)

    model = keras.models.Model(inputs=base_model.input, outputs=train_layer)

    return model
Ejemplo n.º 10
0
        images_copy[:, :, 1] = image.copy()
        images_copy[:, :, 2] = image.copy()
        image = images_copy

    image = np.expand_dims(image, 0)
    return image


TEST_IMAGE_URL = 'http://places2.csail.mit.edu/imgs/demo/6.jpg'

image = Image.open(urllib2.urlopen(TEST_IMAGE_URL))
image = np.array(image, dtype=np.uint8)
image = resize(image, (224, 224))
image = np.expand_dims(image, 0)

base_model = VGG16_Places365(weights='places', include_top=False)
# print(models_name[model_id])
x = base_model.output
x = GlobalAveragePooling2D()(x)
model = Model(inputs=base_model.input, outputs=x)

path_to_data = 'd:\\dane\\HARRISON\\'
csv_ok = pd.read_csv('d:\\dane\\HARRISON\\data_list.txt', header=None)

models_name = ['VGG16_Places365']

model_id = 0

#for a in range(csv_ok.shape[0]):
for a in range(csv_ok.shape[0]):
    if a % 100 == 0:
Ejemplo n.º 11
0
import os
from urllib.request import urlopen
import numpy as np
from PIL import Image
from cv2 import resize
from vgg16_places_365 import VGG16_Places365
LABELS_URL = 'https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt'
LABELS = np.array(urlopen(LABELS_URL).read().splitlines())
model = VGG16_Places365()
# Redis initialize


def predict(file_name):
    image = Image.open(file_name)
    image = np.array(image, dtype=np.uint8)
    image = resize(image, (224, 224))
    # image = preprocess_input(image.astype(np.float32))
    image = np.expand_dims(image, 0)
    output = model.predict(image)
    output = np.squeeze(output)
    new_labels = []
    top5 = output.argsort()[-5:][::-1]
    labels = LABELS[top5]
    scores = output[top5]
    for vals in labels:
        decoded_string_array = vals.decode('UTF-8')
        array_with_id = decoded_string_array.split(" ")
        array_with_id.pop()
        new_labels.append(array_with_id[0])

    scores = [float(np_float) for np_float in scores]
Ejemplo n.º 12
0
'''                     ***                      '''
'''            Mahya Mahdian - 94471039          '''
'''      Mohammad Hassan Sattarian - 94471035    '''

import matplotlib.pyplot as plt
import numpy as np
from keras import layers, models, optimizers
from keras.applications.imagenet_utils import _obtain_input_shape
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from places_utils import preprocess_input
from vgg16_places_365 import VGG16_Places365

# Model Used as Base model in Fine Tuning
base_model = VGG16_Places365(include_top=False,
                             weights='places',
                             input_shape=(108, 192, 3))

# making 5 last layers *Unfreeze*
for layer in base_model.layers[:12]:
    layer.trainable = True
for layer in base_model.layers[12:]:
    layer.trainable = False

# Creating out very own model based on VGG616:Places365 model with additional
# fully-connected layers containing 256 and 6 nodes, having, respectively Relu and Softmax as activation
# functions for detecting nonlinearities and coding result in 6 classes
model = models.Sequential()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
Ejemplo n.º 13
0
            np.sum(ground_truth == unique_y))
    return np.mean(acc)


if __name__ == '__main__':
    args = vars(arg_parser.parser.parse_args())
    source = args['source_model']

    img_width, img_height = 224, 224
    if source == 'VGG16_ImageNet':
        model = applications.VGG16(weights="imagenet",
                                   include_top=False,
                                   input_shape=(img_width, img_height, 3))
    elif source == 'VGG16_Places':
        model = VGG16_Places365(include_top=False,
                                weights='places',
                                input_shape=(img_width, img_height, 3))
    else:
        sys.stdout.write("Source model specified ", source_model,
                         "not recognized. Try: ", "VGG16_ImageNet",
                         ", VGG16_Places")
        sys.exit(1)

    # Define input and target tensors, that is where we want
    #to enter data, and which activations we wish to extract

    #target_tensors = ['block1_conv1/Relu:0','block1_conv2/Relu:0','block2_conv1/Relu:0','block2_conv2/Relu:0','block3_conv1/Relu:0','block3_conv2/Relu:0','block3_conv3/Relu:0','block4_conv1/Relu:0','block4_conv2/Relu:0','block4_conv3/Relu:0','block5_conv1/Relu:0','block5_conv2/Relu:0','block5_conv3/Relu:0','fc1/Relu:0','fc2/Relu:0']
    #target_tensors = ['fc1/Relu:0']
    '''
    target_tensors = ['block1_conv1','block1_conv2',
                      'block2_conv1','block2_conv2',
Ejemplo n.º 14
0
true_y = [np.argmax(x) for x in test_y]

plot_heatmap(true_y, pred_y, './result/heatmap_datagen')
plot_res(true_y, pred_y, './result/res_datagen')

## VGG16 place 365

from vgg16_places_365 import VGG16_Places365

train_x, train_y, test_x, test_y = get_data(gray=False, size=256)
train_y = to_categorical(train_y)
test_y = to_categorical(test_y)
train_x.shape

vgg = VGG16_Places365(weights='places',
                      include_top=False,
                      input_shape=(256, 256, 3))
model_vgg = Sequential()
model_vgg.add(vgg)
model_vgg.add(Flatten())
model_vgg.add(Dense(4096, activation='relu'))
model_vgg.add(Dense(15, activation='softmax'))
vgg.trainable = False

model_vgg.compile(optimizer=Adam(lr=2e-5),
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])

datagen.fit(train_x)

history_vgg = model_vgg.fit_generator(datagen.flow(train_x,
Ejemplo n.º 15
0
def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("-t",
                    "--training",
                    required=True,
                    help="path to input dataset of training images")
    ap.add_argument("-tt",
                    "--test",
                    required=True,
                    help="path to input dataset of training images")
    ap.add_argument("-p",
                    "--plot",
                    required=True,
                    help="path to output accuracy/loss plot")
    args = vars(ap.parse_args())
    # initialize the data and labels
    print("[INFO] loading images...")

    datagen = ImageDataGenerator(horizontal_flip=True,
                                 width_shift_range=0.1,
                                 height_shift_range=0.1,
                                 zoom_range=0.2,
                                 shear_range=0.2,
                                 rotation_range=30,
                                 fill_mode="nearest",
                                 validation_split=0.2)

    train_generator = datagen.flow_from_directory(args["training"],
                                                  (HEIGHT, WIDTH),
                                                  batch_size=BS,
                                                  subset='training')

    valid_generator = datagen.flow_from_directory(args["training"],
                                                  (HEIGHT, WIDTH),
                                                  batch_size=BS,
                                                  subset='validation')

    # initialize our VGG-like Convolutional Neural Network
    base_model = VGG16_Places365(include_top=False,
                                 weights='places',
                                 input_shape=(HEIGHT, WIDTH, 3),
                                 pooling="avg")
    x = base_model.output
    x = Dense(1024, activation="relu")(x)
    x = Dropout(0.5)(x)
    predictions = Dense(15, activation="softmax")(x)

    model = Model(inputs=base_model.input, outputs=predictions)

    # initialize the model and optimizer (you'll want to use
    # binary_crossentropy for 2-class classification)
    print("[INFO] training network...")
    opt = SGD(lr=INIT_LR, decay=INIT_LR / EPOCHS, clipnorm=5., momentum=0.9)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the network
    H = model.fit_generator(train_generator,
                            validation_data=valid_generator,
                            steps_per_epoch=train_generator.samples // BS + 1,
                            validation_steps=valid_generator.samples // BS + 1,
                            epochs=EPOCHS)

    ##########################
    ## EVALUATE THE NETWORK ##
    ##########################
    print("[INFO] evaluating network...")
    datagen = ImageDataGenerator()
    test_generator = datagen.flow_from_directory(args["test"], (HEIGHT, WIDTH),
                                                 batch_size=BS,
                                                 shuffle=False)
    Y_pred = model.predict_generator(test_generator,
                                     steps=test_generator.samples // BS + 1)
    y_pred = np.argmax(Y_pred, axis=1)
    acc = accuracy_score(test_generator.classes, y_pred)
    print("Accuracy: ", acc)

    # plot the training loss and accuracy
    N = np.arange(0, EPOCHS)
    plt.style.use("ggplot")
    plt.figure()
    plt.plot(N, H.history["loss"], label="train_loss")
    plt.plot(N, H.history["val_loss"], label="val_loss")
    plt.plot(N, H.history["acc"], label="train_acc")
    plt.plot(N, H.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy (VGG16Places365)")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend()
    plt.savefig(args["plot"])

    target_names = [
        "Bedroom", "Coast", "Forest", "Highway", "Industrial", "InsideCity",
        "Kitchen", "LivingRoom", "Mountain", "Office", "OpenCountry", "Store",
        "Street", "Suburb", "TallBuilding"
    ]
    print(
        classification_report(test_generator.classes,
                              y_pred,
                              target_names=target_names))
    plot_confusion_matrix(target_names, test_generator.classes, y_pred)
drive.mount('/content/gdrive/')

cd /content/gdrive/My\ Drive/vip2

!unzip -q "drive/My Drive/vip2/NWPUvip.zip"

train_dir = 'NWPU-RESISC12/train'
test_dir  = 'NWPU-RESISC12/test' 
image_size = 224
nTrain = 6600
nTest = 1800

cd drive/My\ Drive/vip2
# load vgg16_places365
from vgg16_places_365 import VGG16_Places365
vgg16_places = VGG16_Places365(weights='places', include_top=False, input_shape=(224, 224, 3))


from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator, load_img
import numpy as np

# Train :: Extract Features
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 20

train_features = np.zeros(shape=(nTrain, 7, 7, 512))
train_labels = np.zeros(shape=(nTrain,12))

train_generator = datagen.flow_from_directory(
    train_dir,
Ejemplo n.º 17
0
train_data = CombinatorialTripletSet(train_dataset,
                                     mean_file,
                                     img_size,
                                     crop_size,
                                     isTraining=False)

# c = tf.ConfigProto()
# c.gpu_options.visible_device_list=str(whichGPU)
# sess = tf.Session(config=c)
# saver = tf.train.import_meta_graph(pretrained_net.split('.ckpt')[0]+'.meta')
# saver.restore(sess, pretrained_net)
#
# graph = tf.get_default_graph()
# image_batch = graph.get_tensor_by_name("images:0")

model = VGG16_Places365(weights='places', include_top=False, pooling='avg')

train_ims = []
train_classes = []
for ims, cls in zip(train_data.files, train_data.classes):
    for im in ims:
        train_ims.append(im)
        train_classes.append(int(cls))

train_ims = np.array(train_ims)
train_classes = np.array(train_classes)

if not os.path.exists(os.path.join(output_dir, 'trainFeats.h5')):
    train_feats = np.zeros((train_ims.shape[0], model.output.shape[1]))
    for ix in range(0, train_ims.shape[0], batch_size):
        image_list = train_ims[ix:ix + batch_size]
Ejemplo n.º 18
0
def train(pooling="avg",
          num_units=1024,
          batch_size=2,
          name="test",
          drop_prob=0.,
          bonus=False,
          freeze=False):
    model_dir = os.path.join(DIRNAME, "models/{}".format(name))
    os.makedirs(model_dir, exist_ok=True)

    datagen = ImageDataGenerator(horizontal_flip=True,
                                 width_shift_range=0.2,
                                 height_shift_range=0.2,
                                 zoom_range=0.1,
                                 validation_split=0.2)

    train_generator = datagen.flow_from_directory(TRAIN_DIR, (300, 250),
                                                  batch_size=batch_size,
                                                  subset='training')

    valid_generator = datagen.flow_from_directory(TRAIN_DIR, (300, 250),
                                                  batch_size=batch_size,
                                                  subset='validation')

    if bonus:
        base_model = VGG16_Places365(include_top=False,
                                     weights='places',
                                     input_shape=(300, 250, 3),
                                     pooling=pooling)
    else:

        base_model = Xception(include_top=False,
                              weights="imagenet",
                              input_shape=(300, 250, 3),
                              pooling=pooling)

    x = base_model.output
    x = Dense(num_units, activation="relu")(x)
    x = Dropout(drop_prob)(x)
    predictions = Dense(15, activation="softmax")(x)

    model = Model(inputs=base_model.input, outputs=predictions)

    if freeze:
        for layer in base_model.layers:
            layer.trainable = False

    optimizer = SGD(lr=0.001, momentum=0.9, clipnorm=5.)

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    tensorboard = TensorBoard(log_dir=model_dir,
                              batch_size=batch_size,
                              update_freq="batch")
    saver = ModelCheckpoint("{}/model.hdf5".format(model_dir),
                            verbose=1,
                            save_best_only=True,
                            monitor="val_acc",
                            mode="max")
    stopper = EarlyStopping(patience=20,
                            verbose=1,
                            monitor="val_acc",
                            mode="max")
    reduce_lr = ReduceLROnPlateau(monitor="loss",
                                  factor=0.5,
                                  patience=5,
                                  verbose=1,
                                  min_lr=0.0001)

    model.fit_generator(
        train_generator,
        steps_per_epoch=train_generator.samples // batch_size + 1,
        validation_data=valid_generator,
        validation_steps=valid_generator.samples // batch_size + 1,
        verbose=2,
        epochs=50,
        callbacks=[tensorboard, saver, stopper, reduce_lr])
    print("Modelo {} treinado!".format(name))
Ejemplo n.º 19
0
import sys
sys.path.append("../")
from vgg16_places_365 import VGG16_Places365
import tensorflow as tf
from keras import backend as K

model = VGG16_Places365(weights='places')


def freeze_session(session,
                   keep_var_names=None,
                   output_names=None,
                   clear_devices=True):
    """
    Freezes the state of a session into a pruned computation graph.

    Creates a new computation graph where variable nodes are replaced by
    constants taking their current value in the session. The new graph will be
    pruned so subgraphs that are not necessary to compute the requested
    outputs are removed.
    @param session The TensorFlow session to be frozen.
    @param keep_var_names A list of variable names that should not be frozen,
                          or None to freeze all the variables in the graph.
    @param output_names Names of the relevant graph outputs.
    @param clear_devices Remove the device directives from the graph for better portability.
    @return The frozen graph definition.
    """
    graph = session.graph
    with graph.as_default():
        freeze_var_names = list(
            set(v.op.name