コード例 #1
0
def level2():
    # Destroying the current TF graph - https://keras.io/backend/
    backend.clear_session()
    print("\n### LEVEL2 ###")
    conv_base2 = NASNetLarge(weights='imagenet', include_top=False, input_shape=(img_height, img_width, 3))
    model2 = models.Sequential()
    model2.add(conv_base2)
    model2.add(layers.GlobalAveragePooling2D())
    model2.add(layers.Dense(fcLayer1, activation='relu'))
    model2.add(layers.Dropout(dropout))
    model2.add(layers.Dense(classes, activation='softmax'))

    print("\nLoading trained weights from " + weights_path + " ...")
    model2.load_weights(weights_path)

    # unfreezing the base network up to a specific layer:
    if freezeUptoLayer == "":
        conv_base2.trainable = True
        print ("\ntrainable layers: ",int(len(model2.trainable_weights) / 2))
    else:
        print("\ntrainable layers before unfreezing the base network up to " + freezeUptoLayer + ": ",int(len(model2.trainable_weights) / 2))  # weights = weights + bias = 2 pro layer
        conv_base2.trainable = True
        set_trainable = False
        for layer in conv_base2.layers:
            if layer.name == freezeUptoLayer: set_trainable = True
            if set_trainable: layer.trainable = True
            else: layer.trainable = False
        print("trainable layers after the base network unfreezed from layer " + freezeUptoLayer + ": ", int(len(model2.trainable_weights)/2))

    print("\nLEVEL2 Model after unfreezing the base network")
    model2.summary()
    model2.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=learning_rate/10, decay=lr_decay), metrics=['acc'])
    print ("\n### Validating ... ")

    val_loss, val_acc = model2.evaluate_generator(test_generator, steps=nbrTestImages, verbose=0)
    print('Validation Results before training unfreeze layers and trained densely connected layers:\nValidation loss:',val_loss,",",'Validation accuracy:', val_acc, "\n")

    # Jointly training both the unfreeze layers and the added trained densely connected layers
    callbacks_list_L2 = [ModelCheckpoint(filepath=model_path+'l2.h5', save_weights_only=False, monitor='val_acc', verbose=1, save_best_only=True),
                      ReduceLROnPlateau(monitor='val_acc', factor=factorL2, patience=patiencel2, verbose=1),
                      TensorBoard(log_dir=TensorBoardLogDir+'\\level2')]

    print ("\n### Level2 Training ... ")
    history = model2.fit_generator(
        train_generator,
        steps_per_epoch=(nbrTrainImages * classes) // (batch * 5),
        epochs=epochsL2,
        callbacks=callbacks_list_L2,
        validation_data=test_generator,
        validation_steps=nbrTestImages,
        verbose=verbose_train)

    history_val2 = [history.history]  # saving all results of the final test
    plot(history_val2, "LEVEL2:", epochsL2)
    print("\n###LEVEL2 Training finished successfully ###")
コード例 #2
0
def build_model_transfer_learning():
    base_model = NASNetLarge(weights='imagenet', include_top=False, pooling = None,\
         input_shape=(config.img_rows,config.img_cols,3))

    for layer in base_model.layers:
        layer.trainable = False
    x = base_model.output
    x = UpSampling2D(size=(4, 4))(x)
    x = Conv2D(128, (kernel, kernel),
               activation='relu',
               padding='same',
               name='conv8_1',
               kernel_initializer="he_normal",
               kernel_regularizer=l2_reg)(x)
    x = Conv2D(128, (kernel, kernel),
               activation='relu',
               padding='same',
               name='conv8_2',
               kernel_initializer="he_normal",
               kernel_regularizer=l2_reg)(x)
    x = Conv2D(128, (kernel, kernel),
               activation='relu',
               padding='same',
               name='conv8_3',
               kernel_initializer="he_normal",
               kernel_regularizer=l2_reg)(x)
    x = BatchNormalization()(x)
    outputs = Conv2D(num_classes, (1, 1),
                     activation='softmax',
                     padding='same',
                     name='pred')(x)

    model = Model(inputs=base_model.input, outputs=outputs, name="ColorNet")
    return model
コード例 #3
0
ファイル: application.py プロジェクト: ssmehta/pyspec
    def build(self) -> Model:
        model = NASNetLarge(include_top=True,
                            weights=None,
                            input_shape=(self.width, self.height,
                                         self.channels),
                            classes=2)

        return model
コード例 #4
0
def level1():
    # Building the model using the pretrained model
    conv_base1 = NASNetLarge(weights='imagenet', include_top=False, input_shape=(img_height, img_width, 3))
    print("\n### LEVEL1 ###\npretrained network:")
    conv_base1.summary()
    model = models.Sequential()
    model.add(conv_base1)
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Dense(fcLayer1, activation='relu'))
    model.add(layers.Dropout(dropout))
    model.add(layers.Dense(classes, activation='softmax'))

    # freezing the base network
    print("trainable layers bevor freezing:", int(len(model.trainable_weights)/2)) # weights = weights + bias = 2 pro layer
    conv_base1.trainable = False
    print("trainable layers after freezing:", int(len(model.trainable_weights)/2))
    print("\npretrained network + densely connected classifier")
    model.summary()

    # training the added layers only
    model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=learning_rate, decay=lr_decay), metrics=['acc'])

    callbacks_list_L1 = [ModelCheckpoint(filepath=weights_path, save_weights_only=True, monitor='val_acc', verbose=1, save_best_only=True),
                      ReduceLROnPlateau(monitor='val_acc', factor=factorL1, patience=patiencel1, verbose=1),
                      TensorBoard(log_dir=TensorBoardLogDir+'\\level1')]

    print("\n### Level1 Training ... ")
    # training the model
    history = model.fit_generator(
        train_generator,
        steps_per_epoch=(nbrTrainImages * classes) // (batch * 5),
        epochs=epochsL1,
        callbacks=callbacks_list_L1,
        validation_data=test_generator,
        validation_steps=nbrTestImages,
        verbose=verbose_train)

    history_val1 = [history.history]  # saving all results of the final test
    plot(history_val1, "LEVEL1:", epochsL1)
    print("\n### LEVEL1 Training finished successfully ###")

    print("\nLoading trained weights from " + weights_path + " ...")
    model.load_weights(weights_path)
    model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=learning_rate), metrics=['acc'])
    print("\n### Saving Level1 Model to ", model_path+'l1.h5', " ... ")
    model.save(model_path+'l1.h5')
コード例 #5
0
ファイル: networks.py プロジェクト: SongFGH/AAG2018
    def build(l2_penalty, dropout, activation='softmax', neurons=2):

        # Initialize and freeze the pre-trained base model
        base_model = NASNetLarge(include_top=False,
                                 weights='imagenet',
                                 pooling='avg')

        for layer in base_model.layers:
            layer.trainable = False

        # Add dropout
        image = Dropout(dropout)(base_model.output)

        # Add a dense layer with 300 neurons to match the shape of text input
        image = Dense(300,
                      activation='relu',
                      kernel_regularizer=l2(l2_penalty),
                      activity_regularizer=l2(l2_penalty))(image)

        # Text input
        caption = Input(shape=(300, ), dtype='float32', name='caption')

        # Concatenate inputs for both image and text
        network_head = concatenate([image, caption], axis=-1)
        network_head = Dropout(dropout)(network_head)

        # Add a hidden layer with 64 neurons
        network_head = Dense(64,
                             activation='relu',
                             kernel_regularizer=l2(l2_penalty),
                             activity_regularizer=l2(l2_penalty))(network_head)

        # Add dropout
        network_head = Dropout(dropout)(network_head)

        # Add the final layer
        network_head = Dense(neurons,
                             activation=activation,
                             kernel_regularizer=l2(l2_penalty),
                             activity_regularizer=l2(l2_penalty))(network_head)

        # Combine the base model and network head
        model = Model(inputs=[base_model.input, caption], outputs=network_head)

        # Return the model
        return model
コード例 #6
0
def create_model(model_name, include_top, input_shape):
    if model_name == 'ResNet50':
        base_model = ResNet50(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == 'ResNet101':
        base_model = ResNet101(weights='imagenet', include_top=include_top, input_shape=input_shape, backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils)
    elif model_name == 'ResNet50V2':
        base_model = ResNet50V2(weights='imagenet', include_top=include_top, input_shape=input_shape, backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils)
    elif model_name == 'ResNet101V2':
        base_model = ResNet101V2(weights='imagenet', include_top=include_top, input_shape=input_shape, backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils)
    elif model_name == 'ResNeXt50':
        base_model = ResNeXt50(weights='imagenet', include_top=include_top, input_shape=input_shape, backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils)
    elif model_name == 'ResNeXt101':
        base_model = ResNeXt101(weights='imagenet', include_top=include_top, input_shape=input_shape, backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils)
    elif model_name == "MobileNetV2":
        base_model = MobileNetV2(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "NASNetLarge":
        base_model = NASNetLarge(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "NASNetMobile":
        base_model = NASNetMobile(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "DenseNet121":
        base_model = DenseNet121(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "DenseNet169":
        base_model = DenseNet169(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "DenseNet201":
        base_model = DenseNet201(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "InceptionResNetV2":
        base_model = InceptionResNetV2(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "VGG16":
        base_model = VGG16(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "VGG19":
        base_model = VGG19(weights='imagenet', include_top=include_top, input_shape=input_shape)
    else:
        return None, None

    x = base_model.output
    # x = Flatten()(x)
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(2, activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=predictions)

    return base_model, model
コード例 #7
0
ファイル: networks.py プロジェクト: SongFGH/AAG2018
    def build(neurons, l2_penalty, dropout):

        # Initialize the base model
        base_model = NASNetLarge(include_top=False,
                                 weights='imagenet',
                                 pooling='avg')

        # Freeze the model
        for layer in base_model.layers:
            layer.trainable = False

        # Build the network head
        head = NASNetL.build_head(base_model, neurons, l2_penalty, dropout)

        # Combine the base model and network head
        model = Model(inputs=base_model.input, outputs=head)

        # Return the model
        return model
コード例 #8
0
ファイル: ihd_final.py プロジェクト: banutu98/IHD
 def get_base_model(network, input_shape, pooling_method):
     network = network.lower()
     input_warning_message = 'WARNING! The input shape is not the default one!!! Proceeding anyway!'
     if network == 'nas':
         if input_shape != (331, 331, 3):
             print_error(input_warning_message)
         return NASNetLarge(input_shape=input_shape,
                            include_top=False,
                            pooling=pooling_method,
                            weights=None)
     elif network == 'inception':
         if input_shape != (299, 299, 3):
             print_error(input_warning_message)
         return InceptionResNetV2(input_shape=input_shape,
                                  include_top=False,
                                  pooling=pooling_method,
                                  weights='imagenet')
     elif network == 'xception':
         if input_shape != (299, 299, 3):
             print_error(input_warning_message)
         return Xception(input_shape=input_shape,
                         include_top=False,
                         pooling=pooling_method,
                         weights='imagenet')
     elif network == 'densenet':
         if input_shape != (224, 224, 3):
             print_error(input_warning_message)
         return DenseNet201(input_shape=input_shape,
                            include_top=False,
                            pooling=pooling_method,
                            weights='imagenet')
     elif network == 'resnet':
         if input_shape != (224, 224, 3):
             print_error(input_warning_message)
         return ResNet50(input_shape=input_shape,
                         include_top=False,
                         pooling=pooling_method,
                         weights='imagenet')
     else:
         print_error(
             f'Invalid network name: {network}! Please choose from: \n ')
         return None
コード例 #9
0
from keras import backend as K
from keras.applications import NASNetLarge, NASNetMobile
from keras.optimizers import Adam, RMSprop
from keras.layers import Input, merge, Dense
from keras import Model
input_shape = (150, 150, 3)

model = NASNetLarge(weights=None, input_shape=input_shape, pooling='max')

left_input = Input(input_shape)
right_input = Input(input_shape)

L1_distance = lambda x: K.abs(x[0] - x[1])

h1 = model(left_input)
h2 = model(right_input)

concat_features = merge([h1, h2],
                        mode=L1_distance,
                        output_shape=lambda x: x[0])

# were removed from below: bias_initializer= initializer.RandomNormal(mean=0.5,stddev=1e-2),kernel_initializer=initializer.RandomNormal(mean=0, stddev=2e-1)

prediction = Dense(1, activation="sigmoid")(concat_features)

siamese_net = Model(input=[left_input, right_input], output=prediction)
optimizer = RMSprop()
siamese_net.compile(optimizer=optimizer, loss="binary_crossentropy")

siamese_net.summary()
コード例 #10
0
    def createCoreModel(self):
        primaryDevice = "/cpu:0"
        if self.numGPUs == 1:
            primaryDevice = "/gpu:0"

        with tf.device(primaryDevice):
            imageNet = Sequential()
            imageNetCore = None
            if self.parameters['neuralNetwork']['core'] == 'resnet':
                imageNetCore = ResNet50(
                    include_top=False,
                    pooling='avg',
                    input_shape=(self.imageWidth, self.imageHeight, 3),
                    weights=('imagenet'
                             if self.parameters['startingWeights']['weights']
                             == 'imagenet' else None))
            elif self.parameters['neuralNetwork']['core'] == 'inceptionnet':
                imageNetCore = InceptionV3(
                    include_top=False,
                    pooling='avg',
                    input_shape=(self.imageWidth, self.imageHeight, 3),
                    weights=('imagenet'
                             if self.parameters['startingWeights']['weights']
                             == 'imagenet' else None))
            elif self.parameters['neuralNetwork']['core'] == 'mobilenet':
                imageNetCore = MobileNetV2(
                    include_top=False,
                    pooling='avg',
                    input_shape=(self.imageWidth, self.imageHeight, 3),
                    weights=('imagenet'
                             if self.parameters['startingWeights']['weights']
                             == 'imagenet' else None))
            elif self.parameters['neuralNetwork']['core'] == 'nasnetmobile':
                imageNetCore = NASNetMobile(
                    include_top=False,
                    pooling='avg',
                    input_shape=(self.imageWidth, self.imageHeight, 3),
                    weights=('imagenet'
                             if self.parameters['startingWeights']['weights']
                             == 'imagenet' else None))
            elif self.parameters['neuralNetwork']['core'] == 'nasnetlarge':
                imageNetCore = NASNetLarge(
                    include_top=False,
                    pooling='avg',
                    input_shape=(self.imageWidth, self.imageHeight, 3),
                    weights=('imagenet'
                             if self.parameters['startingWeights']['weights']
                             == 'imagenet' else None))

            imageNetCore.summary()

            imageNet.add(imageNetCore)
            imageNet.add(Reshape([2048]))
            imageNet.add(BatchNormalization())
            imageNet.add(
                Dropout(self.parameters["neuralNetwork"]["dropoutRate"]))
            imageNet.add(
                Dense(
                    int(self.parameters['neuralNetwork']['vectorSize'] *
                        self.parameters["neuralNetwork"]
                        ["denseLayerMultiplier"])))
            imageNet.add(BatchNormalization())
            # imageNet.add(Dense(int(self.parameters['neuralNetwork']['vectorSize'])))
            imageNet.add(
                Dense(int(self.parameters['neuralNetwork']['vectorSize']),
                      activation=self.parameters["neuralNetwork"]
                      ["finalActivation"]))
            # imageNet.add(Dense(int(self.parameters['neuralNetwork']['vectorSize']), activation=self.parameters["neuralNetwork"]["finalActivation"]))

            imageNet.summary()

        if self.numGPUs > 1:
            model = multi_gpu_model(imageNet, gpus=self.numGPUs)
        else:
            model = imageNet

        return model, imageNet
コード例 #11
0
 def build_network(self, model_name, fine_tune):
     if model_name.lower() == 'vgg16':
         if fine_tune:
             base_model = VGG16(include_top=False,
                                input_shape=(None, None, 3),
                                pooling='avg')
             for layer in base_model.layers:
                 if layer.name.startswith('block5'):
                     layer.trainable = True
                 else:
                     layer.trainable = False
             x = base_model.output
             x = Dense(1024, activation='relu')(x)
             x = Dropout(0.5)(x)
             x = Dense(512, activation='relu')(x)
             predictions = Dense(1, activation='sigmoid')(x)
             model = Model(base_model.input, predictions)
             model.summary()
             return model
         else:
             base_model = VGG16(include_top=False,
                                input_shape=(None, None, 3),
                                pooling='avg')
             for layer in base_model.layers:
                 layer.trainable = True
             x = base_model.output
             x = Dense(1024, activation='relu')(x)
             x = Dropout(0.5)(x)
             x = Dense(512, activation='relu')(x)
             predictions = Dense(1, activation='sigmoid')(x)
             model = Model(base_model.input, predictions)
             model.summary()
             return model
     elif model_name.lower() == 'resnet50':
         base_model = ResNet50(include_top=False,
                               input_shape=(None, None, 3),
                               pooling='avg',
                               backend=keras.backend,
                               layers=keras.layers,
                               models=keras.models,
                               utils=keras.utils)
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     # elif model_name.lower()=='resnet34':
     #     base_model=ResNet34(include_top=False, input_shape=self.shape, pooling='avg')
     #     x = base_model.output
     #     x = Dense(1024, activation='relu')(x)
     #     x = Dropout(0.5)(x)
     #     x = Dense(1024, activation='relu')(x)
     #     predictions = Dense(1, activation='sigmoid')(x)
     #     model = Model(base_model.input, predictions)
     #     if fine_tune:
     #         for layer in base_model.layers:
     #             layer.trainable = False
     #     model.summary()
     #     return model
     elif model_name.lower() == 'resnet101':
         base_model = ResNet101(include_top=False,
                                input_shape=(None, None, 3),
                                pooling='avg',
                                backend=keras.backend,
                                layers=keras.layers,
                                models=keras.models,
                                utils=keras.utils)
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'resnet152':
         base_model = ResNet152(include_top=False,
                                input_shape=(None, None, 3),
                                pooling='avg',
                                backend=keras.backend,
                                layers=keras.layers,
                                models=keras.models,
                                utils=keras.utils)
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'inceptionresnetv2':
         base_model = InceptionResNetV2(include_top=False,
                                        input_shape=(None, None, 3),
                                        pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'xception':
         base_model = Xception(include_top=False,
                               input_shape=(None, None, 3),
                               pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'densenet121':
         base_model = DenseNet121(include_top=False,
                                  input_shape=(None, None, 3),
                                  pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'densenet169':
         base_model = DenseNet169(include_top=False,
                                  input_shape=(None, None, 3),
                                  pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'densenet201':
         base_model = DenseNet201(include_top=False,
                                  input_shape=(None, None, 3),
                                  pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'nasnetlarge':
         base_model = NASNetLarge(include_top=False,
                                  input_shape=(None, None, 3),
                                  pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'vgg19':
         base_model = VGG19(include_top=False,
                            input_shape=(None, None, 3),
                            pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     else:
         base_model = NASNetMobile(include_top=False,
                                   input_shape=(None, None, 3),
                                   pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
コード例 #12
0
"""预训练代码,生成garbage_20191111.h5文件"""

import keras
from keras.layers import Dense, GlobalAveragePooling2D, Dropout, Flatten, BatchNormalization, Input
from keras.applications import MobileNet, ResNet50, VGG16, VGG19, InceptionV3, InceptionResNetV2, NASNetLarge
from keras.preprocessing import image
from keras.applications.nasnet import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam, Nadam
from keras import regularizers
import tensorflow as tf
import numpy as np
import time

base_model = NASNetLarge(weights='imagenet', include_top=False)

CLASS_NUM = 100

x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.2)(x)
preds = Dense(CLASS_NUM, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=preds)

# 所有层均可训练
for layer in model.layers:
    layer.trainable = True

train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,
コード例 #13
0
ファイル: trainModel.py プロジェクト: thongdk8/zalo_landmark
    def __init__(self,
                 image_size=299,
                 batch_size=64,
                 num_classes=100,
                 trainable=True,
                 load_trained=False,
                 max_trainable=False,
                 pretrained_model='pretrained.h5',
                 init_lr=0.001,
                 n_chanels=3,
                 optimizer='adam',
                 init_epoch=0,
                 max_epoch=100,
                 net_type=0):
        try:
            os.mkdir("out_model")
            os.mkdir("logs")
        except:
            print("Created output directory !")

        self.image_size = image_size
        self.batch_size = batch_size
        self.init_lr = init_lr
        self.max_epoch = max_epoch
        self.init_epoch = init_epoch
        self.net_type = net_type

        self.model = None
        self.pre_process = None

        input_shape = (image_size, image_size, n_chanels)

        if net_type == 0:
            self.model = DenseNet121(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.densenet.preprocess_input
        elif net_type == 1:
            self.model = DenseNet169(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.densenet.preprocess_input
        elif net_type == 2:
            self.model = DenseNet201(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.densenet.preprocess_input
        elif net_type == 3:
            self.model = ResNet50(input_shape=input_shape,
                                  include_top=False,
                                  weights='imagenet',
                                  pooling='max')
            self.pre_process = keras.applications.resnet50.preprocess_input
        elif net_type == 4:
            self.model = InceptionV3(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.inception_v3.preprocess_input
        elif net_type == 5:
            self.model = InceptionResNetV2(input_shape=input_shape,
                                           include_top=False,
                                           weights='imagenet',
                                           pooling='max')
            self.pre_process = keras.applications.inception_resnet_v2.preprocess_input
        elif net_type == 6:
            self.model = NASNetLarge(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.nasnet.preprocess_input
        elif net_type == 7:
            self.model = NASNetMobile(input_shape=input_shape,
                                      include_top=False,
                                      weights='imagenet',
                                      pooling='max')
            self.pre_process = keras.applications.nasnet.preprocess_input

        x = self.model.output
        # x = GlobalAveragePooling2D()(x)
        x = Dense(1024, activation='relu')(x)  # add a fully-connected layer
        self.predictions = Dense(num_classes,
                                 activation='softmax',
                                 name='out_put')(x)
        self.model = Model(inputs=self.model.input, outputs=self.predictions)

        if load_trained:
            self.model.load_weights(pretrained_model)
            print("Load pretrained model successfully!")

        if trainable == False:
            for layer in self.model.layers:
                layer.trainable = False
            print("Use model for inference is activated!")
        if trainable and not max_trainable:
            for layer in self.model.layers[:-5]:
                layer.trainable = False
            for layer in self.model.layers[-5:]:
                layer.trainable = True
            print("Train last layers is activated!")
        if max_trainable:
            for layer in self.model.layers:
                layer.trainable = True
            print("Train whole network is activated!")

        if (optimizer == 'adam'):
            opt = Adam(lr=init_lr, beta_1=0.9, beta_2=0.999, decay=1e-6)
        else:
            opt = SGD(lr=init_lr, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(optimizer=opt,
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])

        self.earlyStopping = callbacks.EarlyStopping(monitor='val_acc',
                                                     min_delta=0.001,
                                                     patience=10,
                                                     verbose=1)
        self.tensorBoard = callbacks.TensorBoard('./logs',
                                                 batch_size=batch_size,
                                                 write_grads=True,
                                                 write_images=True)
        self.checkpoint = callbacks.ModelCheckpoint(
            './out_model/weights.' + type_models[self.net_type] +
            '.{epoch:02d}-{acc:.2f}-{val_acc:.2f}.hdf5',
            monitor='val_acc',
            verbose=1,
            save_best_only=True,
            save_weights_only=False,
            mode='auto',
            period=1)
        self.lrController = callbacks.ReduceLROnPlateau(monitor='val_acc',
                                                        factor=0.5,
                                                        patience=3,
                                                        verbose=1,
                                                        mode='auto',
                                                        min_delta=0.0001,
                                                        cooldown=0,
                                                        min_lr=0.00001)
        self.history_ = callbacks.History()
        self.callBackList = [
            self.earlyStopping, self.tensorBoard, self.checkpoint,
            self.lrController, self.history_
        ]

        # [self.train_loss, self.train_metrics] = 2*[None]
        self.history = None
        self.dataGenerator = None
コード例 #14
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: yuri tolkach
"""

#Import NASNetLarge architecture with imagenet weights
from keras.applications import NASNetLarge
conv_base = NASNetLarge(weights='imagenet', include_top=False, input_shape=(350,350,3))

from keras import models, layers
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation = 'relu'))
model.add(layers.Dense(3, activation = 'softmax'))


#v0, convolutional layers 17-18 free
conv_base.trainable = True
set_trainable = False

for layer in conv_base.layers:
    if layer.name == 'normal_conv_1_18' or layer.name == 'normal_conv_1_17':
        set_trainable = True
    if set_trainable:
        layer.trainable = True
    else:
        layer.trainable = False

#Optimizer = Adam
コード例 #15
0
def load_NASNetLarge_model():

    # # NASNetLarge Model 331x331
    settings.SITE_MODEL = NASNetLarge(weights="imagenet")
    settings.SITE_GRAPH = tf.get_default_graph()
コード例 #16
0
# vgg16 = VGG16() # (None, 224, 224, 3)
# model = VGG19()
model = Xception()
model = ResNet101()
model = ResNet101V2()
model = ResNet152()
model = ResNet152V2()
model = ResNet50()
model = ResNet50V2()
model = InceptionV3()
model = InceptionResNetV2()
model = MobileNet()
model = MobileNetV2()
model = DenseNet121()
model = DenseNet169()
model = DenseNet201()
model = NASNetLarge()
model = NASNetMobile()

# vgg16.summary()
'''
model= Sequential()
# model.add(vgg16)
# model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(10, activation='softmax'))

model.summary()
'''
コード例 #17
0
# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=360, width_shift_range=0.1,
    height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
    horizontal_flip=True, vertical_flip=True, fill_mode="nearest")

# initialize the model
filepath = 'keras_checkpoints/'
checkpoints = ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)

tensboard = TensorBoard(log_dir='./logs/log-{}'.format(int(time())), histogram_freq=0, batch_size=BATCH_SIZE, write_graph=True,
                     write_grads=False, write_images=False, embeddings_freq=0,
                     embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None)
#
# Create the base pre-trained model
# Can't download weights in the kernel
base_model = NASNetLarge(weights = None, include_top=False, input_shape=(IM_SIZE, IM_SIZE, 1))

# Add a new top layer
x = base_model.output
x = Flatten()(x)

predictions = Dense(num_class, activation='softmax')(x)

# This is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)

# First: train only the top layers (which were randomly initialized)
if FINE_TUNE: 
	for layer in base_model.layers: layer.trainable = False

model.compile(loss='categorical_crossentropy', 
コード例 #18
0
ファイル: keras_vgg.py プロジェクト: suki0331/bc2020
from keras.applications import VGG16,VGG19, Xception, ResNet101,ResNet101V2,ResNet152
from keras.applications import ResNet152V2,ResNet50,ResNet50V2,InceptionV3,InceptionResNetV2
from keras.applications import MobileNet,MobileNetV2,DenseNet121,DenseNet169,DenseNet201
from keras.applications import NASNetLarge, NASNetMobile
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, BatchNormalization, Activation,Flatten
from keras.optimizers import Adam

# applications = [VGG19, Xception, ResNet101, ResNet101V2, ResNet152,ResNet152V2, ResNet50, 
#                 ResNet50V2, InceptionV3, InceptionResNetV2,MobileNet, MobileNetV2, 
#                 DenseNet121, DenseNet169, DenseNet201]

# for i in applications:
#     take_model = i()

# vgg16 = VGG16()
nasnetlarge = NASNetLarge()
nasnetmobile =  NASNetMobile() 
コード例 #19
0
ファイル: Nas Net.py プロジェクト: Boltuzamaki/Easy-CV
def Nas_Net(trainable=None, net="NASNetMobile"):

    # Preprocessing the dataset into keras feedable format

    train_datagen = ImageDataGenerator(rotation_range=rotation,
                                       width_shift_range=width_shift,
                                       height_shift_range=height_shift,
                                       rescale=scale,
                                       shear_range=shear,
                                       zoom_range=zoom,
                                       horizontal_flip=horizontal,
                                       fill_mode=fill,
                                       validation_split=validation)
    test_datagen = ImageDataGenerator(rescale=scale, )

    train_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='training',
    )
    validation_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='validation')

    models_list = ['NASNetLarge', 'NASNetMobile']

    # Loading the NasNet Model

    if net == "NASNetLarge":
        nasnet = NASNetLarge(include_top=False,
                             weights='imagenet',
                             input_shape=input_sh,
                             pooling=pooling_model)
    if net == "NASNetMobile":
        nasnet = NASNetMobile(include_top=False,
                              weights='imagenet',
                              input_shape=input_sh,
                              pooling=pooling_model)
    if net not in models_list:
        raise ValueError('Please provide the raise model ')
    output = nasnet.layers[-1].output
    if pooling_model is None:
        output = keras.layers.Flatten()(output)
    nasnet = Model(nasnet.input, output=output)
    print(nasnet.summary())
    print('\n\n\n')
    # If you chose not for fine tuning
    if trainable is None:
        model = Sequential()
        model.add(nasnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in nasnet.layers:
            layer.trainable = False
        print("The model summary of Nasnet  -->\n\n\n"
              )  # In this the Nasnet layers are not trainable

        for i, layer in enumerate(nasnet.layers):
            print(i, layer.name, layer.trainable)
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()

    if trainable is not None:
        # Make last block of the conv_base trainable:

        for layer in nasnet.layers[:trainable]:
            layer.trainable = False
        for layer in nasnet.layers[trainable:]:
            layer.trainable = True

        print('Last block of the conv_base is now trainable')

        for i, layer in enumerate(nasnet.layers):
            print(i, layer.name, layer.trainable)

        model = Sequential()
        model.add(nasnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in nasnet.layers:
            layer.trainable = False
        print("The model summary of Nasnet -->\n\n\n"
              )  # In this the Nasnet layers are not trainable
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()
コード例 #20
0
VGG16_top = VGG16(include_top=False, input_shape=(224, 224, 3))
VGG19_top = VGG19(include_top=False, input_shape=(224, 224, 3))
Res50_top = ResNet50(include_top=False, input_shape=(224, 224, 3))
Xception_top = Xception(include_top=False, input_shape=(299, 299, 3))
InceptionV3_top = InceptionV3(include_top=False, input_shape=(299, 299, 3))
InceptionResNetV2_top = InceptionResNetV2(include_top=False,
                                          input_shape=(299, 299, 3))

# 不太常用的预训练的模型, Keras 也提供预训练的权重的模型

from keras.applications import MobileNet
from keras.applications import DenseNet121, DenseNet169, DenseNet201
from keras.applications import NASNetLarge, NASNetMobile

Mobile_base = MobileNet(include_top=True, input_shape=(224, 224, 3))

Dense121_base = DenseNet121(include_top=True, input_shape=(224, 224, 3))
Dense169_base = DenseNet169(include_top=True, input_shape=(224, 224, 3))
Dense201_base = DenseNet201(include_top=True, input_shape=(224, 224, 3))

NASNetLarge_base = NASNetLarge(include_top=True, input_shape=(331, 331, 3))
NASNetMobile_base = NASNetMobile(include_top=True, input_shape=(224, 224, 3))

# -------------------------------------------------------------------------
# 无顶层权重的网络
Mobile_top = MobileNet(include_top=False, input_shape=(224, 224, 3))

Dense121_top = DenseNet121(include_top=False, input_shape=(224, 224, 3))
Dense169_top = DenseNet169(include_top=False, input_shape=(224, 224, 3))
Dense201_top = DenseNet201(include_top=False, input_shape=(224, 224, 3))
コード例 #21
0
	color_mode="rgb",
	shuffle=False,
	batch_size=config.BATCH_SIZE)

# initialize the testing generator
testGen = valAug.flow_from_directory(
	testPath,
	class_mode="categorical",
	target_size=(224, 224),
	color_mode="rgb",
	shuffle=False,
	batch_size=config.BATCH_SIZE)

# load the VGG16 network, ensuring the head FC layer sets are left
# off
baseModel = NASNetLarge(weights="imagenet", include_top=False,
	input_tensor=Input(shape=(224, 224, 3)))

# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(512, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(len(config.CLASSES), activation="softmax")(headModel)

# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)

def train_freezing(model,freezing_portion=0.8):
	# loop over all layers in the base model and freeze them so they will