from tensorflow.keras.applications import VGG16, ResNet50V2, VGG19
from tensorflow.keras.layers import Input

MODELS = [{
    "base_model":
    VGG16(weights="imagenet",
          include_top=False,
          input_tensor=Input(shape=(224, 224, 3))),
    "name":
    "vgg16",
    "feature_shape":
    7 * 7 * 512
}, {
    "base_model":
    VGG19(weights="imagenet",
          include_top=False,
          input_tensor=Input(shape=(224, 224, 3))),
    "name":
    "vgg19",
    "feature_shape":
    7 * 7 * 512
}, {
    "base_model":
    ResNet50V2(weights="imagenet",
               include_top=False,
               input_tensor=Input(shape=(224, 224, 3))),
    "name":
    "resnet50v2",
    "feature_shape":
    7 * 7 * 2048
}]
Exemplo n.º 2
0
def build_model(model_name, img_size, num_classes):
    """
    Build model.

    Arguments
        model_name : (str) model name, which is used to select model.
        img_size : (int) image size for both width and height for modeling.
        num_classes : (int) number of classes for the last layer of the model.

    Returns
        model : a tensorflow model object.

    """
    image_shape = (img_size, img_size, 3)

    # Initialize model
    model = models.Sequential()

    # Load model
    if model_name == 'dummy':
        model.add(
            layers.MaxPooling2D(pool_size=(4, 4), input_shape=image_shape))

    else:
        print('[KF INFO] Loading pre-trained model ...')
        if model_name == 'VGG16':
            if img_size > 224:
                raise Exception(
                    "[KF ERROR] For %s model, the input image size cannot be larger than 224!"
                    % model_name)
            conv = VGG16(weights='imagenet',
                         include_top=False,
                         input_shape=image_shape)
        elif model_name == 'VGG19':
            if img_size > 224:
                raise Exception(
                    "[KF ERROR] For %s model, the input image size cannot be larger than 224!"
                    % model_name)
            conv = VGG19(weights='imagenet',
                         include_top=False,
                         input_shape=image_shape)
        elif model_name == 'InceptionResNetV2':
            if img_size > 299:
                raise Exception(
                    "[KF ERROR] For %s model, the input image size cannot be larger than 299!"
                    % model_name)
            conv = InceptionResNetV2(weights='imagenet',
                                     include_top=False,
                                     input_shape=image_shape)
        elif model_name == 'InceptionV3':
            if img_size > 299:
                raise Exception(
                    "[KF ERROR] For %s model, the input image size cannot be larger than 299!"
                    % model_name)
            conv = InceptionV3(weights='imagenet',
                               include_top=False,
                               input_shape=image_shape)
        elif model_name == 'ResNet50':
            if img_size > 224:
                raise Exception(
                    "[KF ERROR] For %s model, the input image size cannot be larger than 224!"
                    % model_name)
            conv = ResNet50(weights='imagenet',
                            include_top=False,
                            input_shape=image_shape)
        else:
            raise Exception("[KF ERROR] Cannot load the pre-trained model! ")

        print(
            "[KF INFO] The pretrained model %s's convolutional part is loaded ..."
            % model_name)
        model.add(conv)

    # Add top layers
    fc_size = 256
    model.add(layers.Flatten())
    model.add(layers.BatchNormalization())
    model.add(layers.Dense(fc_size, activation='relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(num_classes, activation='softmax'))

    return model
Exemplo n.º 3
0
    def __init__(
        self,
        dataset_name,
        light,
        source_domain,
        target_domain,
        gan_type,
        epochs,
        input_size,
        multi_scale,
        batch_size,
        sample_size,
        reporting_steps,
        content_lambda,
        style_lambda,
        g_adv_lambda,
        d_adv_lambda,
        generator_lr,
        discriminator_lr,
        data_dir,
        log_dir,
        result_dir,
        checkpoint_dir,
        generator_checkpoint_prefix,
        discriminator_checkpoint_prefix,
        pretrain_checkpoint_prefix,
        pretrain_model_dir,
        model_dir,
        disable_sampling,
        ignore_vgg,
        pretrain_learning_rate,
        pretrain_epochs,
        pretrain_saving_epochs,
        pretrain_reporting_steps,
        pretrain_generator_name,
        generator_name,
        discriminator_name,
        debug,
        **kwargs,
    ):
        self.debug = debug
        self.ascii = os.name == "nt"
        self.dataset_name = dataset_name
        self.light = light
        self.source_domain = source_domain
        self.target_domain = target_domain
        self.gan_type = gan_type
        self.epochs = epochs
        self.input_size = input_size
        self.multi_scale = multi_scale
        self.batch_size = batch_size
        self.sample_size = sample_size
        self.reporting_steps = reporting_steps
        self.content_lambda = float(content_lambda)
        self.style_lambda = float(style_lambda)
        self.g_adv_lambda = g_adv_lambda
        self.d_adv_lambda = d_adv_lambda
        self.generator_lr = generator_lr
        self.discriminator_lr = discriminator_lr
        self.data_dir = data_dir
        self.log_dir = log_dir
        self.result_dir = result_dir
        self.checkpoint_dir = checkpoint_dir
        self.generator_checkpoint_prefix = generator_checkpoint_prefix
        self.discriminator_checkpoint_prefix = discriminator_checkpoint_prefix
        self.pretrain_checkpoint_prefix = pretrain_checkpoint_prefix
        self.pretrain_model_dir = pretrain_model_dir
        self.model_dir = model_dir
        self.disable_sampling = disable_sampling
        self.ignore_vgg = ignore_vgg
        self.pretrain_learning_rate = pretrain_learning_rate
        self.pretrain_epochs = pretrain_epochs
        self.pretrain_saving_epochs = pretrain_saving_epochs
        self.pretrain_reporting_steps = pretrain_reporting_steps
        self.pretrain_generator_name = pretrain_generator_name
        self.generator_name = generator_name
        self.discriminator_name = discriminator_name

        self.logger = get_logger("Trainer", debug=debug)
        self.sizes = [self.input_size - 32, self.input_size, self.input_size + 32]

        if not self.ignore_vgg:
            self.logger.info("Setting up VGG19 for computing content loss...")
            from tensorflow.keras.applications import VGG19
            from tensorflow.keras.layers import Conv2D
            input_shape = (self.input_size, self.input_size, 3)
            base_model = VGG19(weights="imagenet", include_top=False, input_shape=input_shape)
            tmp_vgg_output = base_model.get_layer("block4_conv3").output
            tmp_vgg_output = Conv2D(512, (3, 3), activation='linear', padding='same',
                                    name='block4_conv4')(tmp_vgg_output)
            self.vgg = tf.keras.Model(inputs=base_model.input, outputs=tmp_vgg_output)
            self.vgg.load_weights(os.path.expanduser(os.path.join(
                "~", ".keras", "models",
                "vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5")), by_name=True)
        else:
            self.logger.info("VGG19 will not be used. "
                             "Content loss will simply imply pixel-wise difference.")
            self.vgg = None

        self.logger.info(f"Setting up objective functions and metrics using {self.gan_type}...")
        self.mae = tf.keras.losses.MeanAbsoluteError()
        self.generator_loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
        if self.gan_type == "gan":
            self.discriminator_loss_object = tf.keras.losses.BinaryCrossentropy(
                from_logits=True)
        elif self.gan_type == "lsgan":
            self.discriminator_loss_object = tf.keras.losses.MeanSquaredError()
        else:
            wrong_msg = f"Non-recognized 'gan_type': {self.gan_type}"
            self.logger.critical(wrong_msg)
            raise ValueError(wrong_msg)

        self.g_total_loss_metric = tf.keras.metrics.Mean("g_total_loss", dtype=tf.float32)
        self.g_adv_loss_metric = tf.keras.metrics.Mean("g_adversarial_loss", dtype=tf.float32)
        if self.content_lambda != 0.:
            self.content_loss_metric = tf.keras.metrics.Mean("content_loss", dtype=tf.float32)
        if self.style_lambda != 0.:
            self.style_loss_metric = tf.keras.metrics.Mean("style_loss", dtype=tf.float32)
        self.d_total_loss_metric = tf.keras.metrics.Mean("d_total_loss", dtype=tf.float32)
        self.d_real_loss_metric = tf.keras.metrics.Mean("d_real_loss", dtype=tf.float32)
        self.d_fake_loss_metric = tf.keras.metrics.Mean("d_fake_loss", dtype=tf.float32)
        self.d_smooth_loss_metric = tf.keras.metrics.Mean("d_smooth_loss", dtype=tf.float32)

        self.metric_and_names = [
            (self.g_total_loss_metric, "g_total_loss"),
            (self.g_adv_loss_metric, "g_adversarial_loss"),
            (self.d_total_loss_metric, "d_total_loss"),
            (self.d_real_loss_metric, "d_real_loss"),
            (self.d_fake_loss_metric, "d_fake_loss"),
            (self.d_smooth_loss_metric, "d_smooth_loss"),
        ]
        if self.content_lambda != 0.:
            self.metric_and_names.append((self.content_loss_metric, "content_loss"))
        if self.style_lambda != 0.:
            self.metric_and_names.append((self.style_loss_metric, "style_loss"))

        self.logger.info("Setting up checkpoint paths...")
        self.pretrain_checkpoint_prefix = os.path.join(
            self.checkpoint_dir, "pretrain", self.pretrain_checkpoint_prefix)
        self.generator_checkpoint_dir = os.path.join(
            self.checkpoint_dir, self.generator_checkpoint_prefix)
        self.generator_checkpoint_prefix = os.path.join(
            self.generator_checkpoint_dir, self.generator_checkpoint_prefix)
        self.discriminator_checkpoint_dir = os.path.join(
            self.checkpoint_dir, self.discriminator_checkpoint_prefix)
        self.discriminator_checkpoint_prefix = os.path.join(
            self.discriminator_checkpoint_dir, self.discriminator_checkpoint_prefix)
Exemplo n.º 4
0
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.applications import VGG19
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau

(x_train, y_train), (x_test, y_test) = cifar10.load_data()

y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

print(x_train.shape, x_test.shape)
print(y_train.shape, y_test.shape)

vgg19 = VGG19(weights='imagenet',
              include_top=False,
              input_shape=x_train.shape[1:])

vgg19.trainable = False

model = Sequential()
model.add(vgg19)
model.add(Flatten())
model.add(Dense(256))
model.add(Dense(64))
model.add(Dense(10, activation='softmax'))

model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
Exemplo n.º 5
0
d_rate      = 0.5
reg_value   = 0.01
MODEL_PATH = 'pickles/complete_model.h5'
IMAGE_PATH = 'static'

custom_objects = {
    'PhraseLevelFeatures': PhraseLevelFeatures,
    'AttentionMaps': AttentionMaps,
    'ContextVector': ContextVector
    }

"""**Loading the saved model and the pretrained VGG19 model**"""

# load the model
model = tf.keras.models.load_model(MODEL_PATH, custom_objects=custom_objects)
vgg_model = VGG19(weights="imagenet", include_top=False, input_tensor=Input(shape=(3, 224, 224)))

"""**Predict Function**

> Extracts the question and image features given as input and passes these features onto the model.

> Output is the answer predicted by the model
"""

def predict(img,que):

  img_feat = image_feature_extractor(img, vgg_model)

  questions_processed = pd.Series(que).apply(process_sentence)

  question_data = tok.texts_to_sequences(questions_processed)
x_test = scaler.transform(x_test)
x_val = scaler.transform(x_val)

#to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_val = to_categorical(y_val)

#reshape
x_train = x_train.reshape(-1, 32, 32, 3)
x_test = x_test.reshape(-1, 32, 32, 3)
x_val = x_val.reshape(-1, 32, 32, 3)
print(x_train.shape, x_test.shape, x_val.shape)

#2. 모델링
vgg16 = VGG19(weights='imagenet', include_top=False,
              input_shape=(32, 32, 3))  #레이어 16개
vgg16.trainable = False  #훈련시키지 않고 가중치만 가져오겠다.
model = Sequential()
model.add(vgg16)
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(10,
                activation='softmax'))  #activation='softmax')) #mnist사용할 경우
model.summary()
print(len(vgg16.weights))  # 26
print(len(vgg16.trainable_weights))  # 0

#컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping
Exemplo n.º 7
0
                                                    y,
                                                    train_size=0.8,
                                                    shuffle=True,
                                                    random_state=42)

aaa = 1
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],
                          aaa)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], aaa)
print(x_train.shape, y_train.shape)  # (3628, 128, 862, 1) (3628,)
print(x_test.shape, y_test.shape)  # (908, 128, 862, 1) (908,)

model = VGG19(
    include_top=True,
    input_shape=(128, 862, 1),
    classes=2,
    pooling=None,
    weights=None,
)

model.summary()
# model.trainable = False

model.save('C:/nmb/nmb_data/h5/5s/vgg19/vgg19_rmsprop_1.h5')

# 컴파일, 훈련
op = RMSprop(lr=1e-3)
batch_size = 4

es = EarlyStopping(monitor='val_loss',
                   patience=20,
Exemplo n.º 8
0
(trainX, testX, trainY, testY) = train_test_split(imagens,
                                                  labels,
                                                  test_size=0.20,
                                                  stratify=labels,
                                                  random_state=random_state)

# DATA AUGMENTATION
train_datagen = ImageDataGenerator(rotation_range=20, zoom_range=0.2)

train_datagen.fit(trainX)
data_aug = train_datagen.flow(trainX, trainY, batch_size=batch_size)

# TRANSFER LEARNING
conv_base = VGG19(weights='imagenet',
                  include_top=False,
                  input_shape=input_shape)

conv_base.summary()

#  Retreinando parte da VGG19
conv_base.trainable = True
set_trainable = False

for layer in conv_base.layers:
    if layer.name == 'block5_conv1':
        set_trainable = True
    if set_trainable:
        layer.trainable = True
    else:
        layer.trainable = False
Exemplo n.º 9
0
def build_vgg_extractor():
    vgg = VGG19(include_top=False, weights='imagenet')
    content_layers = ['block1_conv1','block1_conv2','block2_conv1','block2_conv2','block3_conv1']
    lossModel = tf.keras.Model(inputs=vgg.input, outputs=[vgg.get_layer(x).output for x in content_layers])
    lossModel.trainable = False
    return lossModel
validation_set = train_datagen.flow_from_directory(train_path,
                                                   target_size=(224, 224),
                                                   batch_size=32,
                                                   class_mode='categorical',
                                                   shuffle=True,
                                                   subset='validation')

# In[4]:

from tensorflow.keras.applications import VGG19
from tensorflow.keras.layers import GlobalAveragePooling2D, Dropout

## We are initialising the input shape with 3 channels rgb and weights as imagenet and include_top as False will make to use our own custom inputs

mv = VGG19(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)

# In[5]:

for layers in mv.layers:
    layers.trainable = False

# In[6]:

x = Flatten()(mv.output)
prediction = Dense(3, activation='softmax')(x)

# In[7]:

model = Model(inputs=mv.input, outputs=prediction)
Exemplo n.º 11
0
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import confusion_matrix
from tensorflow.keras.models import Model

import tensorflow.keras.backend as K
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os, re, cv2
import itertools

#Defining the model objects for ResNet50 and VGG19

K.clear_session()
base_model = ResNet50(include_top=False, weights='imagenet', input_shape=(32,32,3))
base_model_vgg = VGG19(include_top=False, weights='imagenet', input_shape=(32,32,3))

#Making the VGG19 Dense layers non-trainable

for layer in base_model_vgg.layers:
    layer.trainable= False

base_model_vgg.summary()

#Adding some customized layers for the model

y1 = base_model_vgg.output
y1 = Flatten()(y1)
y1 = BatchNormalization()(y1)
y1 = Dense(128,activation='relu')(y1)
y1 = Dropout(0.3)(y1)
Exemplo n.º 12
0
<br>



#### VGG-19




<br>
"""

# VGG-19 architecture
vgg = VGG19(include_top=False,
            weights='imagenet',
            input_shape=(32, 32, 3),
            pooling='max',
            classes=10)

# setting trainable layers in VGG-19

for layer in vgg.layers[0:15]:
    layer.trainable = True

for layer in vgg.layers[15:]:
    layer.trainable = False

# Summary VGG-19
print(vgg.summary())

# Build CNN
Exemplo n.º 13
0
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical

#1. 데이터
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],
                          3).astype('float32') / 255.
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2],
                        3).astype('float32') / 255.

#2. 모델
t = VGG19(weights='imagenet',
          include_top=False,
          input_shape=(x_train.shape[1], x_train.shape[2], 3))
t.trainable = False  #학습시키지 않겠다 이미지넷 가져다가 그대로 쓰겠다
# model.trainable=True

model = Sequential()
model.add(t)
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Dense(10, activation='softmax'))

model.compile(loss='categorical_crossentropy',
Exemplo n.º 14
0
    def CNN_model(self, learning_rate, epoch, batchsize, whether_Adam, Momentum_gamma, weight_decay, whether_load, cnn_type):
        """
        Resnet model
        :param learning_rate
        :param epoch
        :param batchsize
        :param whether_Adam: whether to perform Adam optimiser, if not perform Momentum
        :param Momentum gamma: a variable of Momentum
        :param weight_decay: weight decay for Momentum
        :param whether_load: whether to load trained Resnet model in if it exists (or cover it)
        """

        test_cnn_mfcc = self.train_mfcc
        test_cnn_label = self.train_label

        if(isfile("model/resnet_label.hdf5") and whether_load):
            self.cnn_model = load_model("model/resnet_label.hdf5")
        else:
            train_cnn_mfcc = self.test_mfcc
            train_cnn_label = self.test_label
            val_cnn_mfcc = self.validate_mfcc
            val_cnn_label = self.validate_label

            # input
            input = Input(shape=(self.test_mfcc.shape[1], self.test_mfcc.shape[2], 1))

            # Concatenate -1 dimension to be three channels, to fit the input need in ResNet50
            input_concate = Concatenate()([input,input,input])

            # CNN series network (VGG+Resnet)
            # reference: https://keras.io/api/applications/
            if(cnn_type == 'ResNet50'):
                from tensorflow.keras.applications import ResNet50
                cnn_output = ResNet50(pooling = 'avg')(input_concate)
            elif(cnn_type == 'ResNet101'):
                from tensorflow.keras.applications import ResNet101
                cnn_output = ResNet101(pooling = 'avg')(input_concate)
            elif(cnn_type == 'ResNet152'):
                from tensorflow.keras.applications import ResNet152
                cnn_output = ResNet152(pooling = 'avg')(input_concate)
            elif(cnn_type == 'ResNet50V2'):
                from tensorflow.keras.applications import ResNet50V2
                cnn_output = ResNet50V2(pooling = 'avg')(input_concate)
            elif(cnn_type == 'ResNet101V2'):
                from tensorflow.keras.applications import ResNet101V2
                cnn_output = ResNet101V2(pooling = 'avg')(input_concate)
            elif(cnn_type == 'ResNet152V2'):
                from tensorflow.keras.applications import ResNet152V2
                cnn_output = ResNet152V2(pooling = 'avg')(input_concate)
            elif(cnn_type == 'VGG16'):
                # width and height should not smaller than 32
                from tensorflow.keras.applications import VGG16
                cnn_output = VGG16(include_top = False, pooling = 'avg')(input_concate)
                cnn_output = Flatten()(cnn_output)
            elif(cnn_type == 'VGG19'):
                # width and height should not smaller than 32
                from tensorflow.keras.applications import VGG19
                cnn_output = VGG19(include_top = False, pooling = 'avg')(input_concate)
                cnn_output = Flatten()(cnn_output)
            else:
                # CNN layers we design
                print("No recognised CNN network. The CNN layers we designed are performed")
                # convolution layers
                conv_output1 = Conv2D(filters=32, strides=(1, 1), kernel_size=5, activation='relu')(input)
                # pool_output1 = MaxPool2D(pool_size=(2, 2))(conv_output1)
                conv_output2 = Conv2D(filters=8, strides=(2, 2), kernel_size=4, activation='relu')(conv_output1)

                conv_output2 = Dropout(0.2)(conv_output2)

                conv_output2_batch = BatchNormalization()(conv_output2)

                cnn_output = Flatten()(conv_output2_batch)
                cnn_output = Flatten()(cnn_output)


            # dense with sigmoid
            Dense_sigmoid = Dense(24, activation='sigmoid')(cnn_output)

            Dense_sigmoid = Dropout(0.2)(Dense_sigmoid)

            # dense output
            output = Dense(self.test_label.shape[1], activation='softmax')(Dense_sigmoid)

            # cnn model for labels recognision
            self.cnn_model = Model(input, output)

            # optimizer
            if whether_Adam:
                optimizer = optimizers.Adam(lr=learning_rate, beta_1 = Momentum_gamma, decay=weight_decay)
            else:
                optimizer = optimizers.SGD(lr=learning_rate, momentum=Momentum_gamma, nesterov=True, decay=weight_decay)
            self.cnn_model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['mse', 'accuracy'])
            start = time.time()
            self.history = self.cnn_model.fit(train_cnn_mfcc, train_cnn_label, epochs=epoch, batch_size=batchsize, validation_data=[val_cnn_mfcc,val_cnn_label])
            self.training_time = time.time() - start
            self.cnn_model.save("model/resnet_label.hdf5")

        # model evaluation
        self.cnn_model.predict(test_cnn_mfcc)
        self.score = self.cnn_model.evaluate(test_cnn_mfcc, test_cnn_label)
        print("test loss: ", self.score[0], ", mse: ", self.score[1], ", accuracy", self.score[2])
Exemplo n.º 15
0
def mini_vgg(layer_names):
    vgg = VGG19(weights='imagenet', include_top=False)
    vgg.trainable = False
    outputs = [vgg.get_layer(name).output for name in layer_names]
    model = Model([vgg.input], outputs)
    return model
Exemplo n.º 16
0
def build_model(encoder='efficientnetb7', center='dac', full_skip=True, attention='sc', upscore='upall'):

	MODEL_NAME = encoder
	if center is not None:
		MODEL_NAME = MODEL_NAME+'_'+center
	if attention is not None:
		MODEL_NAME = MODEL_NAME+'_'+attention
	if full_skip:
		MODEL_NAME = MODEL_NAME + '_fullskip'
	if upscore is not None:
		MODEL_NAME = MODEL_NAME + '_'+upscore


	if encoder == 'resnet50':
		encoder = ResNet50(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'conv1_relu', 'conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out']
		encoder_output = encoder.get_layer('conv5_block3_out').output
		# data    320x320x3
		# conv1_relu    160x160x64
		# conv2_block3_out     80x80x256
		# conv3_block4_out    40x40x512
		# conv4_block6_out    20x20x1024
		# conv5_block3_out    10x10x2048  --> encoder output

	elif encoder == 'resnet101':
		encoder = ResNet101(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'conv1_relu', 'conv2_block3_out', 'conv3_block4_out']
		encoder_output = encoder.get_layer('conv4_block23_out').output
		#data   320x320x3
		#conv1_relu   160x160x64
		#conv2_block3_out   80x80x256
		#conv3_block4_out    40x40x512
		#conv4_block23_out   20x20x1024 --> encoder output
		#conv5_block3_out  10x10x2048

	elif encoder == 'resnet50v2':
		encoder = ResNet50V2(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'conv1_conv', 'conv2_block3_1_relu', 'conv3_block4_1_relu', 'conv4_block6_1_relu']
		encoder_output = encoder.get_layer('post_relu').output
		# data   320x320x3
		# conv1_conv   160x160x64
		# conv2_block3_1_relu   80x80x64
		# conv3_block4_1_relu   40x40x128
		# conv4_block6_1_relu   20x20x256
		# post_relu   10x10x2048  --> encoder output

	elif encoder == 'resnet101v2':
		encoder = ResNet101V2(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'conv1_conv', 'conv2_block3_1_relu', 'conv3_block4_1_relu', 'conv4_block23_1_relu']
		encoder_output = encoder.get_layer('post_relu').output
		#data   320x320x3
		#conv1_conv   160x160x64
		#conv2_block3_1_relu   80x80x64
		#conv3_block4_1_relu    40x40x128
		#conv4_block23_1_relu   20x20x256 
		#post_relu  10x10x2048 --> encoder output

	elif encoder == 'vgg19':
		encoder = VGG19(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), weights='imagenet', include_top=False)
		skip_names = ['block1_conv2', 'block2_conv2', 'block3_conv4', 'block4_conv4', 'block5_conv4']
		encoder_output = encoder.get_layer('block5_pool').output
		# block1_conv2   320x320x64
		# block2_conv2   160x160x128
		# block3_conv4   80x80x256
		# block4_conv4   40x40x512
		# block5_conv4   20x20x512
		# block5_pool   10x10x512   --> encoder output

	elif encoder == 'efficientnetb6':
		encoder = EfficientNetB6(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'block2a_expand_activation', 'block3a_expand_activation', 'block4a_expand_activation']
		encoder_output = encoder.get_layer('block6a_expand_activation').output
		#data   320x320x3
		#block2a_expand_activation   160x160x192
		#block3a_expand_activation   80x80x240
		#block4a_expand_activation    40x40x432
		#block6a_expand_activation   20x20x1200 --> encoder output
		#top_activation   10x10x2304

	elif encoder == 'efficientnetb7':
		encoder = EfficientNetB7(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'block2a_expand_activation', 'block3a_expand_activation', 'block4a_expand_activation']
		encoder_output = encoder.get_layer('block6a_expand_activation').output
		#data   320x320x3
		#block2a_expand_activation   160x160x192
		#block3a_expand_activation   80x80x288
		#block4a_expand_activation    40x40x480
		#block6a_expand_activation   20x20x1344 --> encoder output
		#top_activation   10x10x

	elif encoder == 'mobilenetv2':
		encoder = MobileNetV2(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'block_1_expand_relu', 'block_3_expand_relu', 'block_6_expand_relu', 'block_13_expand_relu']
		encoder_output = encoder.get_layer('out_relu').output
		# data   320x320x3
		# block_1_expand_relu   160x160x96
		# block_3_expand_relu   80x80x144
		# block_6_expand_relu    40x40x192
		# block_13_expand_relu   20x20x576
		# out_relu   10x10x1248   --> encoder output

	skip_layers = [encoder.get_layer(i).output for i in skip_names]
	# Center --------------
	if center == 'atrous':
		x = atrous_block(encoder_output)
	elif center == 'dac':
		x = dense_atrous_block(encoder_output)
	elif center == 'aspp':
		x = aspp_block(encoder_output)
	elif center is None:
		x = encoder_output

    # Decoder --------------
	if attention == 'se':
		attn_block = se_block
	elif attention == 'cbam':
		attn_block = cbam_block
	elif attention == 'sc':
		attn_block = scSE_block

	filters = [i.shape[-1] for i in skip_layers]
	filters[0] = 64

	scales = [2 ** i for i in range(1, len(filters))][::-1]
	X = []
	for i in range(1, len(filters) + 1):
		X.append(x)

		down = []
		if full_skip:
			for j in range(len(scales) - (i - 1)):
				d = down_skip(skip_layers[j], scales[j + (i - 1)], filters[-1]//4)
				if attention is not None:
					d = attn_block(d) 
				down.append(d)


		direct = direct_skip(skip_layers[-i], filters[-1]//4)
		if attention is not None:
			direct = attn_block(direct)


		x = convtranspose_block(x, filters[-1]//4)
		if attention is not None:
			x = attn_block(x)

		x = Concatenate()([x] + [direct] + down)
		
		x = conv3_block(x, x.shape[-1])

	if upscore is not None:
		if upscore=='upall':
			up_scales=[2 ** i for i in range(1, len(filters)+1)][::-1]
			UP = [upscore_block(x, 32, up_scales[i]) for i, x in enumerate(X)]
			if attention is not None:
				UP = [attn_block(x) for x in UP]

			up = Concatenate()(UP)
     
		elif upscore=='upcenter':
			up = upscore_block(X[0], 64, 2 ** len(filters))
			if attention is not None:
				up = attn_block(up)

		x = Concatenate()([x, up])


	x = Conv2D(1, 1, padding='same')(x)
	x = Activation('sigmoid')(x)

	model = Model(encoder.input, x)

	metrics = [dice_coef, Recall(), Precision()]
	opt = Nadam(LR)
	model.compile(loss=bce_dice_loss, optimizer=opt, metrics=metrics)

	return model, MODEL_NAME
 def vgg_layers(layer_names):
     vgg = VGG19(include_top=False, weights="imagenet")
     vgg.trainable = False
     outputs = [vgg.get_layer(name).output for name in layer_names]
     return Model([vgg.input], outputs)
Exemplo n.º 18
0
def train_model(path,
                train_images=None,
                train_labels=None,
                test_images=None,
                test_labels=None,
                model_name=None,
                epochs=80,
                learning_rate=0.0001,
                input_shape=(224, 224, 3),
                classes=2,
                batch_size=16,
                classifier_activation='softmax',
                callbacks=None):
    '''    
    saves the model as .h5 file\n  
    path = directory for saving the files
    train_images = a numpy array containing the image data for training\n
    train_labels = a numpy array containing the labels for training\n
    test_images = a numpy array containing the image data for test\n
    test_labels = a numpy array containing the labels for test\n
    model_name = a string, name of the model -> "vgg19", "resnet50_v2", "inception_resnet_v2", "densenet201", "inception_v3", "xception", "mobilenet_v2"\n
    epochs\n
    learning_rate\n        
    '''

    base_model = None
    if model_name == 'vgg19':
        base_model = VGG19(weights=None,
                           include_top=False,
                           input_shape=input_shape)

    if model_name == 'resnet50_v2':
        base_model = ResNet50V2(weights=None,
                                include_top=False,
                                input_shape=input_shape)

    if model_name == 'inception_resnet_v2':
        base_model = InceptionResNetV2(weights=None,
                                       include_top=False,
                                       input_shape=input_shape)

    if model_name == 'densenet201':
        base_model = DenseNet201(weights=None,
                                 include_top=False,
                                 input_shape=input_shape)

    if model_name == 'inception_v3':
        base_model = InceptionV3(weights=None,
                                 include_top=False,
                                 input_shape=input_shape)

    if model_name == 'xception':
        base_model = Xception(weights=None,
                              include_top=False,
                              input_shape=input_shape)

    if model_name == 'mobilenet_v2':
        base_model = MobileNetV2(weights=None,
                                 include_top=False,
                                 input_shape=input_shape)

    x = base_model.output
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    output = tf.keras.layers.Dense(classes,
                                   activation=classifier_activation)(x)

    model = tf.keras.Model(inputs=base_model.input, outputs=output)

    optimizer = Adam(learning_rate=learning_rate,
                     beta_1=0.9,
                     beta_2=0.999,
                     epsilon=1e-07)

    model.compile(
        optimizer=optimizer,
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])

    results = model.fit(train_images,
                        train_labels,
                        epochs=epochs,
                        validation_data=(test_images, test_labels),
                        batch_size=batch_size,
                        callbacks=callbacks)

    #losses = pd.DataFrame(model.history.history)
    #losses[['loss','val_loss']].plot()

    save_model = path + model_name + '.h5'
    model.save(save_model)

    return results
Exemplo n.º 19
0
    def __init__(self, content_layers: dict = None, style_layers: dict = None,
                 pooling: str = 'AvgPooling'):
        """
        Initializes a neural network which will output content and style of a given image.
        Weights provided via 'content_layers' and 'style_layers' will be normalized to 1.

        Args:
            content_layers: dict with {vgg_layer_name: layer weight} for content layers
            style_layers: dict with {vgg_layer_name: layer weight} for style layers
            pooling: 'AvgPooling' or 'MaxPooling'. If 'AvgPooling', VGG19 Pooling layers
                will be replaced with average pooling.
        """

        # SET DEFAULTS AND NORMALIZE WEIGHT TO 1
        if content_layers is None:
            content_layers = {'block4_conv2': 1.0}

        if style_layers is None:
            style_layers = {
                'block1_conv1': 1.0,
                'block2_conv1': 1.0,
                'block3_conv1': 1.0,
                'block4_conv1': 1.0,
                'block5_conv1': 1.0,
            }

        sum_content_weights = sum([v for k, v in content_layers.items()])
        sum_style_weights = sum([v for k, v in style_layers.items()])
        content_layers = {k: v/sum_content_weights for k, v in content_layers.items()
                          if v != 0.}
        style_layers = {k: v/sum_style_weights for k, v in style_layers.items()
                        if v != 0.}

        self.content_layers = content_layers
        self.style_layers = style_layers

        print(self.content_layers)
        print(self.style_layers)

        # DOWNLOAD VGG19 AND CREATE NEW MODEL
        base_model = VGG19(include_top=False, weights='imagenet')
        base_model.trainable = False

        if pooling == 'AvgPooling':
            base_model = self._replace_max_pooling(base_model)

        content_outputs = []
        style_outputs = []

        self.content_layers = content_layers
        self.style_layers = style_layers

        for layer in base_model.layers:
            if layer.name in self.content_layers:
                content_outputs.append(layer.output)
            if layer.name in self.style_layers:
                style_outputs.append(layer.output)

        outputs = {'content': content_outputs, 'style': style_outputs}
        model = tf.keras.Model(inputs=base_model.inputs, outputs=outputs)

        self.nst_model = model
Exemplo n.º 20
0
def build_model(mode, model_name=None, model_path=None):

    clear_session()

    if mode == 'train':
        img = Input(
            shape=(96, 96,
                   3))  # ResNet50 minimum size (32,32) for others (128,128)

        if model_name == 'DenseNet121':  #Checked and Working

            model = DenseNet121(include_top=False,
                                weights='imagenet',
                                input_tensor=img,
                                input_shape=None,
                                pooling='avg')

        elif model_name == 'MobileNet':  #checked, raised shape error, #Error Resolved, Now working

            model = MobileNet(include_top=True,
                              weights='imagenet',
                              input_tensor=img,
                              input_shape=None,
                              pooling='avg')

        elif model_name == 'Xception':  #Checked and Working

            model = Xception(include_top=False,
                             weights='imagenet',
                             input_tensor=img,
                             input_shape=None,
                             pooling='max')

        elif model_name == 'ResNet50':  #Image Dimesion size should be high eg 224x224, not sufficient GPU memory resource

            model = ResNet50(include_top=False,
                             weights='imagenet',
                             input_tensor=img,
                             input_shape=None,
                             pooling='avg')

        elif model_name == 'InceptionV3':  #Checked and Working

            model = InceptionV3(include_top=False,
                                weights='imagenet',
                                input_tensor=img,
                                input_shape=(None),
                                pooling='avg')
        elif model_name == 'VGG19':  #to be checked

            model = InceptionV4(include_top=False,
                                weights='imagenet',
                                input_tensor=img,
                                input_shape=None,
                                pooling='avg')

        elif model_name == 'VGG16':  #Checked and Working
            model = VGG16(include_top=False,
                          weights='imagenet',
                          input_tensor=img,
                          input_shape=(None),
                          pooling='max')

        elif model_name == 'VGG19':  #to be checked

            model = VGG19(include_top=False,
                          weights='imagenet',
                          input_tensor=img,
                          input_shape=None,
                          pooling='avg')

        final_layer = model.layers[-1].output

        dense_layer_1 = Dense(128, activation='relu')(final_layer)
        output_layer = Dense(4, activation='softmax')(dense_layer_1)

        model = Model(inputs=[img], outputs=output_layer)
        model.compile(optimizer='adam',
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

    elif mode == 'inference':
        model = load_model(model_path)

    return model
Exemplo n.º 21
0
                                  weights='imagenet')),
                     ("ResNet50",
                      ResNet50(input_shape=IMG_SHAPE,
                               include_top=False,
                               weights='imagenet')),
                     ("ResNet50V2",
                      ResNet50V2(input_shape=IMG_SHAPE,
                                 include_top=False,
                                 weights='imagenet')),
                     ("VGG16",
                      VGG16(input_shape=IMG_SHAPE,
                            include_top=False,
                            weights='imagenet')),
                     ("VGG19",
                      VGG19(input_shape=IMG_SHAPE,
                            include_top=False,
                            weights='imagenet')),
                     ("Xception",
                      Xception(input_shape=IMG_SHAPE,
                               include_top=False,
                               weights='imagenet'))]

    for arch in architectures:

        policy = mixed_precision.Policy('mixed_float16')
        mixed_precision.set_policy(policy)
        print("")
        print("Arch: ", arch[0])

        initializer = tf.keras.initializers.he_normal()
        steps = np.ceil(image_count / BATCH_SIZE) * EPOCHS
Exemplo n.º 22
0
def generate_vgg_model_large(classes_len: int):
    """
    Function to create a VGG19 model pre-trained with custom FC Layers at the start of the network plus optional layers at
    the end before the fully connected ones as well
    If the "advanced" command line argument is selected, adds an extra convolutional layer with extra filters to support
    larger images.
    This model is a larger model that starts with two more sets of convolutional layers with less filters 
    :param classes_len: The number of classes (labels).
    :return: The VGG19 model.
    """

    model_base = Sequential()

    # Reconfigure single channel input into a greyscale 3 channel input
    img_input = Input(shape=(config.VGG_IMG_SIZE_LARGE['HEIGHT'],
                             config.VGG_IMG_SIZE_LARGE['WIDTH'], 1))
    img_conc = Concatenate()([img_input, img_input, img_input])
    input_model = Model(inputs=img_input, outputs=img_conc)

    # Generate extra convolutional layers for model to put at the beginning
    model_base.add(input_model)
    model_base.add(Conv2D(16, (3, 3), activation='relu', padding='same'))

    model_base.add(Conv2D(16, (3, 3), activation='relu', padding='same'))

    model_base.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model_base.add(Conv2D(32, (3, 3), activation='relu', padding='same'))

    model_base.add(Conv2D(32, (3, 3), activation='relu', padding='same'))

    model_base.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # To ensure model fits with vgg model, we can remove the first layer from the vgg model to replace with this
    model_base.add(Conv2D(64, (3, 3), activation='relu', padding='same'))

    # Generate a VGG19 model with pre-trained ImageNet weights, input as given above, excluded fully connected layers.
    vgg_model = VGG19(include_top=False,
                      weights='imagenet',
                      input_shape=[
                          config.VGG_IMG_SIZE['HEIGHT'],
                          config.VGG_IMG_SIZE['HEIGHT'], 3
                      ])

    # Crop vgg model to exlude input layer and first convolutional layer
    vgg_model_cropped = Sequential()
    for layer in vgg_model.layers[2:]:  # go through until last layer
        vgg_model_cropped.add(layer)

    # Combine the models
    combined_model = Sequential()
    combined_model.add(model_base)
    combined_model.add(vgg_model_cropped)

    # Add fully connected layers
    model = Sequential()
    # Start with base model consisting of convolutional layers
    model.add(combined_model)

    # Generate additional convolutional layers
    if config.model == "advanced":
        model.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
        model.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # Flatten layer to convert each input into a 1D array (no parameters in this layer, just simple pre-processing).
    model.add(Flatten())

    # Add fully connected hidden layers.
    model.add(Dense(units=512, activation='relu', name='Dense_Intermediate_1'))
    model.add(Dense(units=32, activation='relu', name='Dense_Intermediate_2'))

    # Possible dropout for regularisation can be added later and experimented with:
    # model.add(Dropout(0.1, name='Dropout_Regularization'))

    # Final output layer that uses softmax activation function (because the classes are exclusive).
    if classes_len == 2:
        model.add(Dense(1, activation='sigmoid', name='Output'))
    else:
        model.add(Dense(classes_len, activation='softmax', name='Output'))

    # Print model details if running in debug mode.
    if config.verbose_mode:
        model.summary()

    return model
def generate_vgg_model_advance_and_density(classes_len: int):
    """
    Function to create a VGG19 model pre-trained with custom FC Layers.
    If the "advanced" command line argument is selected, adds an extra convolutional layer with extra filters to support
    larger images.
    :param classes_len: The number of classes (labels).
    :return: The VGG19 model.
    """
    # Reconfigure single channel input into a greyscale 3 channel input
    img_input = Input(shape=(config.VGG_IMG_SIZE['HEIGHT'], config.VGG_IMG_SIZE['WIDTH'], 1))

    # Add convolution and pooling layers
    model = Sequential()
    model.add(img_input)
    for i in range (0, config.CONV_CNT):
        model.add(Conv2D(3, (3, 3),
                         activation='relu',
                         padding='same'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))
        
#     model.add(Conv2D(3, (5, 5),
#                      activation='relu',
#                      padding='same'))
#     model.add(MaxPooling2D((2, 2), strides=(2, 2)))

#     model.add(Conv2D(3, (3, 3),
#                      activation='relu',
#                      padding='same'))
#     model.add(MaxPooling2D((2, 2), strides=(2, 2)))
        
    # Generate a VGG19 model with pre-trained ImageNet weights, input as given above, excluded fully connected layers.
    model_base = VGG19(include_top=False, weights='imagenet')
    
    # Start with base model consisting of convolutional layers
    model.add(model_base)

    # Flatten layer to convert each input into a 1D array (no parameters in this layer, just simple pre-processing).
    model.add(Flatten())
    
    # Possible dropout for regularisation can be added later and experimented with:
    if config.DROPOUT != 0:
        model.add(Dropout(config.DROPOUT, name='Dropout_Regularization_1'))

    # Add fully connected hidden layers.
    model.add(Dense(units=512, activation='relu', name='Dense_Intermediate_1'))
    model.add(Dense(units=32, activation='relu', name='Dense_Intermediate_2'))

    model_density = Sequential()
    model_density.add(Dense(int(config.model.split('-')[1]), input_shape=(int(config.model.split('-')[1]),), activation='relu'))
    
    model_concat = concatenate([model.output, model_density.output], axis=-1)
    
    # Final output layer that uses softmax activation function (because the classes are exclusive).
    if classes_len == 2:
        model_concat = Dense(1, activation='sigmoid', name='Output')(model_concat)
    else:
        model_concat = Dense(classes_len, activation='softmax', name='Output')(model_concat)
    
    model_combine = Model(inputs=[model.input, model_density.input], outputs=model_concat)

    # Print model details if running in debug mode.
    if config.verbose_mode:
        print(model_combine.summary())

    return model_combine
Exemplo n.º 24
0
import time
from timeit import timeit

import numpy as np
import tensorflow as tf
from numpy.random import randn
from tensorflow.keras.applications import VGG19
from tensorflow.keras.losses import MAE

physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.optimizer.set_jit(True)  # XLA enabled

m = VGG19()
m.compile(optimizer='adam', loss=MAE)


def benchmark_tensorflow(batchsize):
    ip = tf.convert_to_tensor(np.array(randn(*(batchsize, 224, 224, 3)), dtype=np.float32))

    # warm-up
    m.predict(ip)

    time.sleep(10)

    # benchmark
    print(timeit(lambda: m.predict(ip), number=10))
Exemplo n.º 25
0
def create_model(
    model_name, log_dir, args
):  # optimizer, learning rate, activation, neurons, batch size, epochs...

    input_shape = input_size(model_name, args)

    if args.head == 'max' or (args.base_trainable
                              and args.head != 't_complex'):
        pool = 'max'
    else:
        pool = 'none'

    if model_name == 'VGG16':
        conv_base = VGG16(weights='imagenet',
                          include_top=False,
                          pooling=pool,
                          input_shape=input_shape)
    elif model_name == 'VGG19':
        conv_base = VGG19(weights='imagenet',
                          include_top=False,
                          pooling=pool,
                          input_shape=input_shape)
    elif model_name == 'ResNet50':
        conv_base = ResNet50(weights='imagenet',
                             include_top=False,
                             pooling=pool,
                             input_shape=input_shape)
    elif model_name == 'InceptionV3':
        conv_base = InceptionV3(weights='imagenet',
                                include_top=False,
                                pooling=pool,
                                input_shape=input_shape)
    elif model_name == 'Xception':
        conv_base = Xception(weights='imagenet',
                             include_top=False,
                             pooling=pool,
                             input_shape=input_shape)
    elif model_name == 'InceptionResNetV2':
        conv_base = InceptionResNetV2(weights='imagenet',
                                      include_top=False,
                                      pooling=pool,
                                      input_shape=input_shape)
    elif model_name == 'NASNetMobile':
        conv_base = NASNetMobile(weights='imagenet',
                                 include_top=False,
                                 pooling=pool,
                                 input_shape=input_shape)
    elif model_name == 'NASNetLarge':
        conv_base = NASNetLarge(weights='imagenet',
                                include_top=False,
                                pooling=pool,
                                input_shape=input_shape)
    elif model_name == 'DenseNet201':
        conv_base = DenseNet201(weights='imagenet',
                                include_top=False,
                                pooling=pool,
                                input_shape=input_shape)
    elif model_name == 'MobileNetV2':
        conv_base = MobileNetV2(weights='imagenet',
                                include_top=False,
                                pooling=pool,
                                input_shape=input_shape)
    else:
        conv_base = None
        print("Model name not known!")
        exit()

    conv_base.trainable = args.base_trainable

    model = models.Sequential()
    if args.base_trainable:
        if args.head == 't_complex':
            model = models.Sequential()
            model.add(conv_base)
            model.add(
                layers.Conv2D(filters=1024,
                              kernel_size=(3, 3),
                              padding='same',
                              strides=1))
            model.add(layers.Flatten())  # ??
            model.add(layers.Dense(1024, activation='sigmoid'))
            model.add(layers.Dense(256, activation='sigmoid'))
            model.add(layers.Dense(args.CLASSES_NO, activation='softmax')
                      )  # (samples, new_rows, new_cols, filters)
        else:
            model.add(conv_base)
            model.add(layers.Dense(args.CLASSES_NO, activation='softmax'))
    elif args.head == 'dense':
        # outside only?
        model.add(conv_base)
        model.add(layers.Flatten())
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(128, activation='relu'))
        model.add(layers.Dense(args.CLASSES_NO, activation='softmax'))
    elif args.head == 'max':
        model.add(conv_base)
        model.add(layers.Dense(512, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dense(args.CLASSES_NO, activation='softmax'))
    elif args.head == 'mod':
        model = models.Sequential()
        model.add(conv_base)
        model.add(
            layers.Conv2D(filters=2048, kernel_size=(3, 3), padding='valid'))
        model.add(layers.Flatten())  # ??
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(1024, activation='sigmoid'))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dense(
            args.CLASSES_NO,
            activation='softmax'))  # (samples, new_rows, new_cols, filters)

    if args.lr_decay:
        lr_schedule = ExponentialDecay(args.INIT_LEARN_RATE,
                                       decay_steps=args.DECAY_STEPS,
                                       decay_rate=args.DECAY_RATE,
                                       staircase=True)
        model.compile(loss='categorical_crossentropy',
                      optimizer=SGD(lr_schedule),
                      metrics=['acc'])  # To different optimisers?
    else:
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(lr=args.LEARNING_RATE),
                      metrics=['acc'])

    with open(os.path.join(log_dir, 'modelsummary.txt'), 'w') as f:
        with redirect_stdout(f):
            model.summary()
    print(model.summary())
    return model
    epochs=epochs,
    validation_data=val_generator,
    workers=4
)

score = model.evaluate(val_generator,verbose=2)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

"""## VGG19

"""

from tensorflow.keras.applications import VGG19

net= VGG19(include_top=False, weights='imagenet', input_tensor=Input(shape=(150,150,3))) 

for layer in net.layers[:-5]:
    layer.trainable = False

x = net.output
x = Flatten()(x)
x = Dropout(0.5)(x)
output_layer = Dense(1, activation='sigmoid', name='sigmoid')(x)
model = Model(inputs=net.input, outputs=output_layer)

# initiate RMSprop optimizer
opt = keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)

# Train the model using RMSprop
model.compile(loss='binary_crossentropy',
Exemplo n.º 27
0
def run_ars(params):

    dir_path = params['dir_path']

    if not (os.path.exists(dir_path)):
        os.makedirs(dir_path)
    logdir = dir_path
    if not (os.path.exists(logdir)):
        os.makedirs(logdir)

    # Disables TensorFlow GPU use for compatibility reasons.
    # To try and use GPU, set --enable_gpu to True on executione
    if not params['enable_gpu']:
        #os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
        config = ConfigProto(device_count={'GPU': 0})
        #config.gpu_options.allow_growth = True
        session = InteractiveSession(config=config)
    else:
        config = ConfigProto()
        config.gpu_options.allow_growth = True
        session = InteractiveSession(config=config)

    env = CarEnv()
    base_model = VGG19(weights='imagenet',
                       include_top=False,
                       input_shape=(env.img_height, env.img_width, 3))
    shape = 1
    for num in base_model.output_shape:
        if num is not None:
            shape *= num
    ob_dim = shape  #base_model.input_shape
    ac_dim = env.action_space.shape[0]

    # Set global variable for num workers
    #global worker_count
    #worker_count = params['n_workers']

    # Get initial weights if directory given. Can be csv or numpy
    if len(params['policy_file']) > 0:
        try:
            initial_policy = np.load(params['policy_file'])
            initial_weights = initial_policy['arr_0']
            print('Found .npz policy file at {}'.format(params['policy_file']))
            print('Loaded policy weights.')
            try:
                initial_mean = initial_policy['arr_1']
                initial_std = initial_policy['arr_2']
                print('Loaded policy stats.')
            except:
                initial_mean = None
                initial_std = None
        except:
            initial_weights = np.genfromtxt(params['policy_file'],
                                            delimiter=',')
            print('Found policy file at {}'.format(params['policy_file']))
            print('Loaded weights')
            initial_mean = None
            initial_std = None
    else:
        print('Initializing new policy.')
        initial_weights = None
        initial_mean = None
        initial_std = None

    # set policy parameters. Possible filters: 'MeanStdFilter' for v2, 'NoFilter' for v1.
    policy_params = {
        'type': 'linear',
        'ob_filter': params['filter'],
        'ob_dim': ob_dim,
        'ac_dim': ac_dim,
        'initial_weights': initial_weights,
        'initial_mean': initial_mean,
        'initial_std': initial_std
    }

    ARS = ARSLearner(env_name=params['env_name'],
                     policy_params=policy_params,
                     num_workers=params['n_workers'],
                     num_deltas=params['num_deltas'],
                     deltas_used=params['deltas_used'],
                     learning_rate=params['learning_rate'],
                     lr_decay=params['lr_decay'],
                     delta_std=params['delta_std'],
                     std_decay=params['std_decay'],
                     logdir=logdir,
                     rollout_length=params['rollout_length'],
                     shift=params['shift'],
                     params=params,
                     seed=params['seed'],
                     seconds_per_episode=params['seconds_per_episode'],
                     show_cam=params['show_cam'],
                     log_every=params['log_every'],
                     eval_rollouts=params['eval_rollouts'],
                     enable_gpu=params['enable_gpu'])

    ARS.train(params['n_iter'], state_filter=params['state_filter'])

    save_file = '/'.join(params['policy_file'].split('/')[:-1])
    np.savetxt(save_file + '/recent_weights.csv', ARS.w_policy, delimiter=',')

    for worker in ARS.workers:
        worker.clean_up.remote()

    return
Exemplo n.º 28
0
 def subvgg(self):
     vgg = VGG19(input_shape=(self.hr_h, self.hr_w, 3), include_top=False, weights='imagenet')
     vgg.outputs = [vgg.layers[9].output]
     img_input = Input(shape=(self.hr_h, self.hr_w, 3))
     img_features = vgg(img_input)
     return Model(img_input, img_features)
print("Robot vel input trainingset size: {}".format(robot_vel_train_input.shape))
robot_state_train_label = strawberry_states_frame_rate[0:train_images.shape[0]]
print("Robot state label trainset size: {}".format(robot_state_train_label.shape))

for i in range(0 , test_images.shape[0]):
    test_images[i, : , : , :] = test_images[i, : , : , :] / np.max(test_images[i, : , : , :])

robot_state_test_input = robot_states_frame_rate[train_images.shape[0]:train_images.shape[0]+test_images.shape[0]]
robot_pose_test_input = robot_state_test_input.iloc[:, 0:7]
robot_vel_test_input = robot_state_test_input.iloc[:, 7:14]
print("Robot pose input trainingset size: {}".format(robot_pose_test_input.shape))
print("Robot vel input trainingset size: {}".format(robot_vel_test_input.shape))
robot_state_test_label = strawberry_states_frame_rate[train_images.shape[0]:train_images.shape[0]+test_images.shape[0]]
print("Robot state label testset size: {}".format(robot_state_test_label.shape))

model = VGG19(include_top=False, weights='imagenet', input_shape=(224 , 224 , 3))
#model.summary()

for layer in model.layers[:21]:
    layer.trainable=False
for layer in model.layers[21:]:
    layer.trainable=True

y1 = model.output
y2 = GlobalAveragePooling2D()(y1)
y3 = Dense(512,activation='relu')(y2) 
y4 = Dense(512,activation='relu')(y3) 

new_model = Model(inputs=model.input,outputs=y4)
####################################################################################################################
cnn_out = new_model.output
Exemplo n.º 30
0
 def __init__(self, content_layer):
     super().__init__()
     vgg = VGG19(include_top=False, weights='imagenet')
     self.vgg = tf.keras.Model([vgg.input],
                               [vgg.get_layer(content_layer).output])
     self.vgg.trainable = False