Esempio n. 1
0
def build_model(classes=2):
    inputs = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    x = preprocess_input(inputs)
    x = NASNetLarge(weights=None, classes=classes)(x)
    model = Model(inputs=inputs, outputs=x)
    model.compile(loss='categorical_crossentropy', metrics=['accuracy'])
    return model
Esempio n. 2
0
def NASNetLargemodel(no_classes, shape):
    """
    NASNetLarge Learning Transferable Architectures for Scalable Image Recognition,2018
    """
    base_model = NASNetLarge(include_top=False,
                             weights='imagenet',
                             input_shape=shape)
    base_model.trainable = False
    inputs = Input(shape=shape)
    x = base_model(inputs, training=False)
    x = GlobalAveragePooling2D()(x)
    #x = Dense(1024,activation='relu')(x)
    x = Dense(1056, activation='relu')(x)
    predictions = Dense(no_classes, activation='softmax',
                        name='predictions')(x)
    model = Model(inputs, outputs=predictions)
    return model
 def __init__(self, model_name=None):
     if model_name == 'Xception':
         base_model = Xception(weights='imagenet')
         self.preprocess_input = xception.preprocess_input
     elif model_name == 'VGG19':
         base_model = VGG19(weights='imagenet')
         self.preprocess_input = vgg19.preprocess_input
     elif model_name == 'ResNet50':
         base_model = ResNet50(weights='imagenet')
         self.preprocess_input = resnet.preprocess_input
     elif model_name == 'ResNet101':
         base_model = ResNet101(weights='imagenet')
         self.preprocess_input = resnet.preprocess_input
     elif model_name == 'ResNet152':
         base_model = ResNet152(weights='imagenet')
         self.preprocess_input = resnet.preprocess_input
     elif model_name == 'ResNet50V2':
         base_model = ResNet50V2(weights='imagenet')
         self.preprocess_input = resnet_v2.preprocess_input
     elif model_name == 'ResNet101V2':
         base_model = ResNet101V2(weights='imagenet')
         self.preprocess_input = resnet_v2.preprocess_input
     elif model_name == 'ResNet152V2':
         base_model = ResNet152V2(weights='imagenet')
         self.preprocess_input = resnet_v2.preprocess_input
     elif model_name == 'InceptionV3':
         base_model = InceptionV3(weights='imagenet')
         self.preprocess_input = inception_v3.preprocess_input
     elif model_name == 'InceptionResNetV2':
         base_model = InceptionResNetV2(weights='imagenet')
         self.preprocess_input = inception_resnet_v2.preprocess_input
     elif model_name == 'DenseNet121':
         base_model = DenseNet121(weights='imagenet')
         self.preprocess_input = densenet.preprocess_input
     elif model_name == 'DenseNet169':
         base_model = DenseNet169(weights='imagenet')
         self.preprocess_input = densenet.preprocess_input
     elif model_name == 'DenseNet201':
         base_model = DenseNet201(weights='imagenet')
         self.preprocess_input = densenet.preprocess_input
     elif model_name == 'NASNetLarge':
         base_model = NASNetLarge(weights='imagenet')
         self.preprocess_input = nasnet.preprocess_input
     elif model_name == 'NASNetMobile':
         base_model = NASNetMobile(weights='imagenet')
         self.preprocess_input = nasnet.preprocess_input
     elif model_name == 'MobileNet':
         base_model = MobileNet(weights='imagenet')
         self.preprocess_input = mobilenet.preprocess_input
     elif model_name == 'MobileNetV2':
         base_model = MobileNetV2(weights='imagenet')
         self.preprocess_input = mobilenet_v2.preprocess_input
     else:
         base_model = VGG16(weights='imagenet')
         self.preprocess_input = vgg16.preprocess_input
     self.model = Model(inputs=base_model.input,
                        outputs=base_model.layers[-2].output)
Esempio n. 4
0
def nasnet(shape, class_num):
    base_model = NASNetLarge(
        include_top=False, weights='imagenet',
        pooling='avg')  #, input_tensor=Input(shape=shape))
    nw = base_model.output

    nw = Dense(512, activation='relu')(nw)
    nw = Dropout(.4)(nw)
    nw = Dense(512, activation='relu')(nw)

    if class_num <= 2:
        output = Dense(class_num, activation='sigmoid', name='output')(nw)
    else:
        output = Dense(class_num, activation='softmax', name='output')(nw)

    base_model.trainable = False
    '''#for train part of model
    layer_names = [l.name for l in base_model.layers]   
    idx = layer_names.index('block7a_expand_conv')
    for layer in base_model.layers[:idx]:
        layer.trainable = False
    '''
    return Model(inputs=base_model.input, outputs=output)
Esempio n. 5
0
def checkfeature(image_name):
    # Load images from directory
    image_directory = "food"
    image_list = os.listdir(image_directory)
    feature_list = []
    image_name_list = []

    # Load ResNet50
    model_notop = NASNetLarge(weights='imagenet',
                              include_top=False,
                              pooling='avg')
    # Load image and preprocess
    img = image.load_img(os.path.join(image_directory, image_name),
                         target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    # x = preprocess_input(x)

    # Compute features
    features = model_notop.predict(x)

    # Todo:change these vals
    diff = x_train[2, :2048] - features
    print(diff.max())
def nasnet_classification(input_size, nb_classes, final_activation="softmax"):
    # NOTE: If using imagenet, input size needs to be [331, 331].
    base_model = NASNetLarge(
        input_shape=(input_size[0], input_size[1], 3),
        include_top=False,
        weights="imagenet",
        input_tensor=None,
        pooling="avg",
    )

    x = base_model.output
    predictions = Dense(nb_classes, activation=final_activation)(x)

    model = Model(inputs=base_model.input, outputs=[
        predictions,
    ])

    return model
# OneHotEncoding
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train.shape, y_test.shape) 

# CNN을 위한 reshape
x_train = x_train.reshape(x_train.shape[0],x_train.shape[1],x_train.shape[2],x_train.shape[3])
x_test = x_test.reshape(x_test.shape[0],x_test.shape[1],x_test.shape[2],x_train.shape[3])
print("reshape x:", x_train.shape, x_test.shape)

# 2. 모델 
model1 = NASNetLarge(
    weights='imagenet', 
    include_top=False, 
    input_shape=(32,32,3)
)

model1.trainable = False

model = Sequential()
model.add(model1)
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
# model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Dense(10, activation='softmax'))
Esempio n. 8
0
                         width_shift_range=0.2,
                         height_shift_range=0.2,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode='nearest')

# test_dataset = train.flow_from_directory('C:/Users/param/Face-Mask-Detection/dataset/test',
# target_size=(224, 224),
# batch_size=32,
# class_mode='binary')
# train_dataset.class_indices

# local_weights_file = 'C:/Users/param/Face-Mask-Detection/face_detector/inceptionv3-model-10ep.h5'
pre_trained_model = NASNetLarge(weights="imagenet",
                                include_top=False,
                                input_tensor=Input(shape=(224, 224, 3)))

# pre_trained_model.load_weights(local_weights_file)
#

#
pre_trained_model.summary()

# last_layer = pre_trained_model.get_layer('mixed7')
# print('last layer output shape: ', last_layer.output_shape)
last_output = pre_trained_model.output
# construct the head of the model that will be placed on top of the
# the base model
# headModel = pre_trained_model.output
# headModel = AveragePooling2D(pool_size=(5, 5))(last_output)
Esempio n. 9
0
def extract_features_from_images():
    # Check if images have already been extracted
    try:
        feature_list = pickle.load(
            open(
                "NasNetLarge_features_balanced/image_features_NasNetLarge_avg_pooling.p",
                "rb"))
        image_name_list = pickle.load(
            open(
                "NasNetLarge_features_balanced/image_list_NasNetLarge_avg_pooling.p",
                "rb"))
        print(
            "[extract_features_from_images] Successfully loaded preexisting NasNetLarge features."
        )
        return feature_list, image_name_list
    except FileNotFoundError:
        # Extract features from images
        print(
            "[extract_features_from_images] Extracting NasNetLarge features..."
        )
        # Load images from directory
        image_directory = "food"
        image_list = os.listdir(image_directory)
        feature_list = []
        image_name_list = []

        # Load ResNet50
        model_notop = NASNetLarge(weights='imagenet',
                                  include_top=False,
                                  pooling='avg')

        # Extract features from images
        start = time.time()
        counter = 0
        for image_name in image_list:
            try:
                # Load image and preprocess
                img = image.load_img(os.path.join(image_directory, image_name),
                                     target_size=(331, 331))
                x = image.img_to_array(img)
                x = np.expand_dims(x, axis=0)
                x = tf.keras.applications.nasnet.preprocess_input(
                    x, data_format=None)

                # Compute features
                features = model_notop.predict(x)
                feature_list.append(features)
                image_name_list.append(image_name)

                if counter % 500 == 0:
                    print(
                        f"[extract_features_from_images] {counter} images extracted"
                    )
                    pickle.dump(
                        feature_list,
                        open(
                            "NasNetLarge_features_balanced/image_features_NasNetLarge_avg_pooling.p",
                            "wb"))
                    pickle.dump(
                        image_name_list,
                        open(
                            "NasNetLarge_features_balanced/image_list_NasNetLarge_avg_pooling.p",
                            "wb"))
                counter += 1

            # handles the odd operating system helper files in the folder TODO make this safer
            except:
                print(
                    f"[extract_features_from_images] ERROR: with image name {image_name}"
                )

        print(
            f"[extract_features_from_images] Processing images took: {'%.2f' % (time.time() - start)} seconds"
        )

        # File I/O
        pickle.dump(
            feature_list,
            open(
                "NasNetLarge_features_balanced/image_features_NasNetLarge_avg_pooling.p",
                "wb"))
        pickle.dump(
            image_name_list,
            open(
                "NasNetLarge_features_balanced/image_list_NasNetLarge_avg_pooling.p",
                "wb"))
        feature_list = pickle.load(
            open(
                "NasNetLarge_features_balanced/image_features_NasNetLarge_avg_pooling.p",
                "rb"))
        image_name_list = pickle.load(
            open(
                "NasNetLarge_features_balanced/image_list_NasNetLarge_avg_pooling.p",
                "rb"))

        return feature_list, image_name_list
Esempio n. 10
0
row = 0
rows = sheet1.row(row)
# header = ["Image","MobileNetV2 Label","MobileNetV2 Percentage","","InceptionV3 Label","InceptionV3 Percentage","","VGG16 Label","VGG16 Percentage"]
header = [
    "Image", "ResNet50 Label", "ResNet50 Percentage", "", "NASNetLarge Label",
    "NASNetLarge Percentage", "", "VGG16 Label", "VGG16 Percentage"
]

for index, value in enumerate(header):
    rows.write(index, value)

# Loading application models
#mobileNet_model = MobileNetV2(weights='imagenet')
ResNet50_model = ResNet50(weights='imagenet')
#inception_model = InceptionV3(weights='imagenet')
NASnet_model = NASNetLarge(weights='imagenet')
vgg16_model = VGG16(weights='imagenet')

# Listing files from folder
test_images = [f for f in listdir('Imagens/') if isfile(join('Imagens/', f))]

# Loop to test images
for filename in test_images:

    print('\n\n\n\n-----------------------' + filename +
          '--------------------------')

    # loading the image
    path = 'Imagens/' + filename

    # img_original_inc = load_img(path, target_size=(299, 299))
Esempio n. 11
0
 def backbone(x_in):
     if backbone_type == 'ResNet50':
         return ResNet50(input_shape=x_in.shape[1:],
                         include_top=False,
                         weights=weights)(x_in)
     elif backbone_type == 'ResNet50V2':
         return ResNet50V2(input_shape=x_in.shape[1:],
                           include_top=False,
                           weights=weights)(x_in)
     elif backbone_type == 'ResNet101V2':
         return ResNet101V2(input_shape=x_in.shape[1:],
                            include_top=False,
                            weights=weights)(x_in)
     elif backbone_type == 'InceptionResNetV2':
         return InceptionResNetV2(input_shape=x_in.shape[1:],
                                  include_top=False,
                                  weights=weights)(x_in)
     elif backbone_type == 'InceptionV3':
         return InceptionV3(input_shape=x_in.shape[1:],
                            include_top=False,
                            weights=weights)(x_in)
     elif backbone_type == 'MobileNet':
         return MobileNet(input_shape=x_in.shape[1:],
                          include_top=False,
                          weights=weights)(x_in)
     elif backbone_type == 'MobileNetV2':
         return MobileNetV2(input_shape=x_in.shape[1:],
                            include_top=False,
                            weights=weights)(x_in)
     elif backbone_type == 'NASNetLarge':
         model = NASNetLarge(input_shape=x_in.shape[1:],
                             include_top=False,
                             weights=None)
         model.load_weights(WEIGHTS_DIR + "nasnet_large_no_top.h5")
         return model(x_in)
     elif backbone_type == 'NASNetMobile':
         model = NASNetMobile(input_shape=x_in.shape[1:],
                              include_top=False,
                              weights=None)
         model.load_weights(WEIGHTS_DIR + "nasnet_mobile_no_top.h5")
         return model(x_in)
     elif backbone_type == 'Xception':
         return Xception(input_shape=x_in.shape[1:],
                         include_top=False,
                         weights=weights)(x_in)
     elif backbone_type == 'MobileNetV3Small':
         model = MobileNetV3Small(input_shape=x_in.shape[1:],
                                  include_top=False,
                                  weights=None)
         model.load_weights(WEIGHTS_DIR + "mobilenet_v3_small_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'MobileNetV3Large':
         model = MobileNetV3Large(input_shape=x_in.shape[1:],
                                  include_top=False,
                                  weights=None)
         model.load_weights(WEIGHTS_DIR + "mobilenet_v3_large_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite0':
         model = EfficientNetLite0(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite0_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite1':
         model = EfficientNetLite1(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite1_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite2':
         model = EfficientNetLite2(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite2_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite3':
         model = EfficientNetLite3(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite3_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite4':
         model = EfficientNetLite4(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite4_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite5':
         model = EfficientNetLite5(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite5_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite6':
         model = EfficientNetLite6(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite6_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB0':
         model = EfficientNetB0(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb0_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB1':
         model = EfficientNetB1(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb1_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB2':
         model = EfficientNetB2(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb2_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB3':
         model = EfficientNetB3(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb3_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB4':
         model = EfficientNetB4(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb4_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB5':
         model = EfficientNetB5(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb5_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB6':
         model = EfficientNetB6(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         if use_pretrain:
             model.load_weights(WEIGHTS_DIR + "efficientnetb6_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB7':
         model = EfficientNetB7(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb7_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'MnasNetA1':
         return MnasNetModel(input_shape=x_in.shape[1:],
                             include_top=False,
                             weights=None,
                             name="MnasNetA1")(x_in)
     elif backbone_type == 'MnasNetB1':
         return MnasNetModel(input_shape=x_in.shape[1:],
                             include_top=False,
                             weights=None,
                             name="MnasNetB1")(x_in)
     elif backbone_type == 'MnasNetSmall':
         return MnasNetModel(input_shape=x_in.shape[1:],
                             include_top=False,
                             weights=None,
                             name="MnasNetSmall")(x_in)
     else:
         raise TypeError('backbone_type error!')
history = model.fit(
    train_generator,
    epochs=epochs,
    validation_data=val_generator,
    workers=4
)

score = model.evaluate(val_generator,verbose=2)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

"""## NASNetLarge"""

from tensorflow.keras.applications import NASNetLarge

net= NASNetLarge(include_top=False, weights='imagenet', input_tensor=Input(shape=(150,150,3))) 

for layer in net.layers[:]:
    layer.trainable = True

x = net.output
x = Flatten()(x)
x = Dropout(0.5)(x)
output_layer = Dense(1, activation='sigmoid', name='sigmoid')(x)
model = Model(inputs=net.input, outputs=output_layer)

# initiate RMSprop optimizer
opt = keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)

# Let's train the model using RMSprop
model.compile(loss='binary_crossentropy',
Esempio n. 13
0
def model():
    #mirrored_strategy = tf.distribute.MirroredStrategy() ###used when using multi-GPU setup. Currently commented since last used on single GPU setup
    tf.config.experimental.set_memory_growth(
        tf.config.list_physical_devices('GPU')[0],
        True)  #initialises single GPU for use in training
    total_train = 0
    total_val = 0
    for i in os.listdir('train2'):
        total_train += len(
            os.listdir(f'train2/{i}'))  #count number of training images
    for i in os.listdir('validation2'):
        total_val += len(
            os.listdir(f'validation2/{i}'))  #count number of validation images
    print("Total training images:" + str(total_train))
    print("Total validation images:" + str(total_val))
    train_dir = 'train2'  #directory of training images
    validation_dir = 'validation2'  #directory of validation images
    batch_size = 8
    epochs = 25
    IMG_HEIGHT = 331
    IMG_WIDTH = 331
    train_image_generator = ImageDataGenerator(  #applies image augmentation as well as normalisation of RGB values
        rescale=1. /
        255,  #Image augmentation used is explained further in the report under Machine Learning Development section
        rotation_range=15,
        width_shift_range=.15,
        height_shift_range=.15,
        horizontal_flip=True,
        zoom_range=0.15)

    validation_image_generator = ImageDataGenerator(
        rescale=1. /
        255  #no image augmentation used for validation images since they are used as an indicator of real-world accuracy, very unnecessary
    )

    train_data_gen = train_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory=train_dir,
        shuffle=True,
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='sparse',
        color_mode='rgb')
    val_data_gen = validation_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory=validation_dir,
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='sparse',
        color_mode='rgb')
    counter = Counter(train_data_gen.classes)
    max_val = float(max(counter.values()))
    class_weights = {
        class_id: max_val / num_images
        for class_id, num_images in counter.items()
    }  #Calculates class weights. Usage is explained further in report
    #with mirrored_strategy.scope(): ###used when using multi-GPU setup. Currently commented since last used on single GPU setup
    model = NASNetLarge(
        weights='imagenet'
    )  #use NASNetLarge model with imagenet weights for transfer learning

    def top3_acc(
        y_true, y_pred
    ):  #defines new fucntion that the model returns: returns top 3 accuracy
        return tf.keras.metrics.sparse_top_k_categorical_accuracy(y_true,
                                                                  y_pred,
                                                                  k=3)

    #model.trainable = True
    #set_trainable = False
    #for layer in model.layers: ###freeze layers before activation_166, layers after activation_166 unfrozen to facilitate faster training and better accuracy of transfer learning
    #if layer.name == 'activation_166': ###currently commented out because test accuracy utilsing this technique was ~0.5% worse
    #set_trainable = True
    #if set_trainable:
    #layer.trainable = True
    #else:
    #layer.trainable = False
    #print("layer {} is {}".format(layer.name, '+++trainable' if layer.trainable else '---frozen'))
    model.compile(optimizer=SGD(0.0005, 0.88, False),
                  loss='sparse_categorical_crossentropy',
                  metrics=['sparse_categorical_accuracy',
                           top3_acc])  #compiles model
    model.summary()
    checkpoint_path1 = "checkpoint/NASNetLarge-typesgd8-30-10.h5"  #location of model to save to, includes filename. Change filename portion to rename

    checkpoint_dir1 = os.path.dirname(checkpoint_path1)

    #    model = load_model(checkpoint_path1)
    #    model = load_weights(checkpoint_path1)

    def scheduler(
        epoch
    ):  #learning rate scheduler, decreases learning rate after a certain number of epochs
        if epoch < 7:
            return 0.00001
        else:
            return 0.00001 * tf.math.exp(0.30 * (7 - epoch))

    lr_schedule = tf.keras.callbacks.LearningRateScheduler(scheduler)

    cp_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=
        checkpoint_path1,  #save model with best validation loss after every epoch
        save_weights_only=
        False,  #if new epoch returns worse validation loss, old model will be retained
        monitor='val_loss',
        save_best_only=True,
        verbose=1)
    early_callback = tf.keras.callbacks.EarlyStopping(  #model will prematurely stop training if validation accuracy does not increase in 5 epochs
        monitor='val_sparse_categorical_accuracy',
        min_delta=0.000001,
        patience=5,
        verbose=0,
        mode='auto',
        baseline=None,
        restore_best_weights=True)
    #    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
    #            monitor='val_loss', factor=0.1, patience=3, verbose=1, mode='auto',
    #           min_delta=0.03, cooldown=2, min_lr=0.000001
    #        )

    history = model.fit_generator(  #train model
        train_data_gen,
        steps_per_epoch=total_train // batch_size,
        epochs=epochs,
        validation_data=val_data_gen,
        validation_steps=total_val // batch_size,
        callbacks=[lr_schedule, cp_callback, early_callback],
        verbose=1,
        class_weight=class_weights)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1)
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)

from tensorflow.keras.applications import NASNetLarge
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, GlobalAveragePooling2D
from tensorflow.keras.layers import BatchNormalization  # 要銜接不同 model 中間輸出輸入要 Normalization
from tensorflow.keras.models import Model
# 原來
base_model = NASNetLarge(
    input_shape=(331, 331, 3),
    include_top=False,
    weights="imagenet",
    input_tensor=None,
    pooling=None,
    #classes="2",
)

for layer in base_model.layers:
    layer.trainable = False

x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(128, activation="relu")(x)
predictions = Dense(2, activation="softmax")(x)
model = Model(base_model.input, predictions)
model.summary()

from tensorflow.keras.losses import SparseCategoricalCrossentropy
elif use_the_model is 3: 
    base_model = ResNet50(weights='imagenet', include_top=False)
    model_name = 'ResNet50'
    epoch_num = 30
    
elif use_the_model is 4: 
    base_model = InceptionResNetV2(weights='imagenet', include_top=False)
    model_name = 'InceptionResNetV2'
    epoch_num = 50
    
elif use_the_model is 5: 
    base_model = NASNetMobile(input_shape=(224,224,3), weights='imagenet', include_top=False)
    model_name = 'NASNetMobile'
    epoch_num = 50
elif use_the_model is 6: 
    base_model = NASNetLarge(input_shape=(331,331,3), weights='imagenet', include_top=False)
    model_name = 'NASNetLarge'
    epoch_num = 50
    
elif use_the_model is 7: 
    base_model = MobileNetV2(weights='imagenet', include_top=False)
    model_name = 'MobileNetV2'
    epoch_num = 70
    
elif use_the_model is 8: 
    base_model = DenseNet121(weights='imagenet', include_top=False)
    model_name = 'DenseNet121'
    epoch_num = 50
    
elif use_the_model is 9: 
    base_model = PureFoodNet.getModel(input_shape=train_generator.image_shape)
print("ResNet101V2",len(vgg16.trainable_weights)/2) 
print('----------------------------------------------------------------------------')
vgg16 = ResNet152()
# vgg16.summary()
print("ResNet152",len(vgg16.trainable_weights)/2) 
print('----------------------------------------------------------------------------')
vgg16 = ResNet50()
# vgg16.summary()
print("ResNet50",len(vgg16.trainable_weights)/2) 
print('----------------------------------------------------------------------------')
vgg16 = ResNet50V2()
# vgg16.summary()
print("ResNet50V2",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = NASNetLarge()
# vgg16.summary()
print("NASNetLarge",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = NASNetMobile()
# vgg16.summary()
print("NASNetMobile",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = DenseNet121()
# vgg16.summary()
print("DenseNet121",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = DenseNet169()
Esempio n. 17
0
def create_model(
    model_name, log_dir, args
):  # optimizer, learning rate, activation, neurons, batch size, epochs...

    input_shape = input_size(model_name, args)

    if args.head == 'max' or (args.base_trainable
                              and args.head != 't_complex'):
        pool = 'max'
    else:
        pool = 'none'

    if model_name == 'VGG16':
        conv_base = VGG16(weights='imagenet',
                          include_top=False,
                          pooling=pool,
                          input_shape=input_shape)
    elif model_name == 'VGG19':
        conv_base = VGG19(weights='imagenet',
                          include_top=False,
                          pooling=pool,
                          input_shape=input_shape)
    elif model_name == 'ResNet50':
        conv_base = ResNet50(weights='imagenet',
                             include_top=False,
                             pooling=pool,
                             input_shape=input_shape)
    elif model_name == 'InceptionV3':
        conv_base = InceptionV3(weights='imagenet',
                                include_top=False,
                                pooling=pool,
                                input_shape=input_shape)
    elif model_name == 'Xception':
        conv_base = Xception(weights='imagenet',
                             include_top=False,
                             pooling=pool,
                             input_shape=input_shape)
    elif model_name == 'InceptionResNetV2':
        conv_base = InceptionResNetV2(weights='imagenet',
                                      include_top=False,
                                      pooling=pool,
                                      input_shape=input_shape)
    elif model_name == 'NASNetMobile':
        conv_base = NASNetMobile(weights='imagenet',
                                 include_top=False,
                                 pooling=pool,
                                 input_shape=input_shape)
    elif model_name == 'NASNetLarge':
        conv_base = NASNetLarge(weights='imagenet',
                                include_top=False,
                                pooling=pool,
                                input_shape=input_shape)
    elif model_name == 'DenseNet201':
        conv_base = DenseNet201(weights='imagenet',
                                include_top=False,
                                pooling=pool,
                                input_shape=input_shape)
    elif model_name == 'MobileNetV2':
        conv_base = MobileNetV2(weights='imagenet',
                                include_top=False,
                                pooling=pool,
                                input_shape=input_shape)
    else:
        conv_base = None
        print("Model name not known!")
        exit()

    conv_base.trainable = args.base_trainable

    model = models.Sequential()
    if args.base_trainable:
        if args.head == 't_complex':
            model = models.Sequential()
            model.add(conv_base)
            model.add(
                layers.Conv2D(filters=1024,
                              kernel_size=(3, 3),
                              padding='same',
                              strides=1))
            model.add(layers.Flatten())  # ??
            model.add(layers.Dense(1024, activation='sigmoid'))
            model.add(layers.Dense(256, activation='sigmoid'))
            model.add(layers.Dense(args.CLASSES_NO, activation='softmax')
                      )  # (samples, new_rows, new_cols, filters)
        else:
            model.add(conv_base)
            model.add(layers.Dense(args.CLASSES_NO, activation='softmax'))
    elif args.head == 'dense':
        # outside only?
        model.add(conv_base)
        model.add(layers.Flatten())
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(128, activation='relu'))
        model.add(layers.Dense(args.CLASSES_NO, activation='softmax'))
    elif args.head == 'max':
        model.add(conv_base)
        model.add(layers.Dense(512, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dense(args.CLASSES_NO, activation='softmax'))
    elif args.head == 'mod':
        model = models.Sequential()
        model.add(conv_base)
        model.add(
            layers.Conv2D(filters=2048, kernel_size=(3, 3), padding='valid'))
        model.add(layers.Flatten())  # ??
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(1024, activation='sigmoid'))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dense(
            args.CLASSES_NO,
            activation='softmax'))  # (samples, new_rows, new_cols, filters)

    if args.lr_decay:
        lr_schedule = ExponentialDecay(args.INIT_LEARN_RATE,
                                       decay_steps=args.DECAY_STEPS,
                                       decay_rate=args.DECAY_RATE,
                                       staircase=True)
        model.compile(loss='categorical_crossentropy',
                      optimizer=SGD(lr_schedule),
                      metrics=['acc'])  # To different optimisers?
    else:
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(lr=args.LEARNING_RATE),
                      metrics=['acc'])

    with open(os.path.join(log_dir, 'modelsummary.txt'), 'w') as f:
        with redirect_stdout(f):
            model.summary()
    print(model.summary())
    return model
def get_model(args,loss_function='binary_crossentropy',initial_lr=0.0001,weights="imagenet"):
  """
  Select model to classification
  parameters:
    args(argparse) = initial argsparse, contain information of input shape.
    loss_function(str) = define loss function.
    initial_lr(int) = define initial learning rate, using Adam optimizer
  return model compiled 
  """

  if args.model=='Xception':

    if args.optm=='Adam':
        optm = Adam(lr=0.00005,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True)  
            
    pre_trained_model = xception.Xception(weights="/scratch/parceirosbr/bigoilict/share/Polen/clasificacion/weigths/xception_weights_tf_dim_ordering_tf_kernels_notop.h5",include_top=False, input_shape=(args.size, args.size, 3))
    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    #x=Dropout(rate=0.5)(x)    
    x=Dense(2048, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x=Dense(1024, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) 
    x=Dropout(rate=0.2)(x)
    preds=Dense(args.classes,activation='softmax')(x) 
    model=Model(inputs=pre_trained_model.input, outputs=preds)

    for layer in pre_trained_model.layers:
        layer.trainable=False


    for layer in model.layers:
        if hasattr(layer, 'moving_mean') and hasattr(layer, 'moving_variance'):
            layer.trainable = True
            K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
            K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
        else:
            layer.trainable = False
            
    for layer in model.layers[129:]:
        layer.trainable=True
    #for layer in model.layers[:129]:
    #    layer.trainable=False            
    #for layer in model.layers[129:]:
    #    layer.trainable=True

    #for layer in model.layers:
    #    if hasattr(layer, 'moving_mean') and hasattr(layer, 'moving_variance'):
    #        layer.trainable = True
    #        K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
    #        K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
        #else:
        #    layer.trainable = False
            
    #for layer in model.layers[129:]:
    #    layer.trainable = True

    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()    
    return model

  if args.model=='Xception_1':

    if args.optm=='Adam':
        optm = Adam(lr=0.0001,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True)  
            
    pre_trained_model = xception.Xception(weights="/scratch/parceirosbr/bigoilict/share/Polen/clasificacion/weigths/xception_weights_tf_dim_ordering_tf_kernels_notop.h5",include_top=False, input_shape=(args.size, args.size, 3))
    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    #x=Dropout(rate=0.5)(x)    
    x=Dense(2048, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x=Dense(1024, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) 
    x=Dropout(rate=0.2)(x)
    preds=Dense(args.classes,activation='softmax')(x) 
    model=Model(inputs=pre_trained_model.input, outputs=preds)

    for layer in pre_trained_model.layers:
        layer.trainable=True


    for layer in model.layers:
       if hasattr(layer, 'moving_mean') and hasattr(layer, 'moving_variance'):
           layer.trainable = True
           K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
           K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
    #   else:
    #       layer.trainable = False

    #for layer in model.layers[:129]:
    #    layer.trainable=False            
    #for layer in model.layers[129:]:
    #    layer.trainable=True

    #for layer in model.layers:
    #    if hasattr(layer, 'moving_mean') and hasattr(layer, 'moving_variance'):
    #        layer.trainable = True
    #        K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
    #        K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
        #else:
        #    layer.trainable = False
            
    #for layer in model.layers[129:]:
    #    layer.trainable = True

    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()    
    return model


  if args.model=='InceptionV3':

    if args.optm=='Adam':
        optm = Adam(lr=0.0001,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = InceptionV3(input_shape=(args.size, args.size, 3), include_top=False, weights="/scratch/parceirosbr/bigoilict/share/Polen/clasificacion/weigths/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5")

    #last_layer = pre_trained_model.get_layer('mixed10')
    last_output = pre_trained_model.output#last_layer.output
    #x = GlobalMaxPooling2D()(last_output)
 
    x = GlobalAveragePooling2D()(last_output)
    # Add a fully connected layer with 512 hidden units and ReLU activation
    #x=Dropout(rate=0.5)(x)   
    x = Dense(2048, activation='relu')(x)
    #x=Dropout(rate=0.2)(x)
    x#=Dropout(rate=0.5)(x)    
    x=Dense(1024, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) 
    #x=Dropout(rate=0.2)(x)
    #x = Dense(512, activation='relu')(x)    
    x=Dropout(rate=0.2)(x)
    # Add a final sigmoid layer for classification
    x = Dense(args.classes, activation='softmax')(x)
    # Configure and compile the model

    model = Model(pre_trained_model.input, x)

    #for layer in pre_trained_model.layers:
    #    layer.trainable = True
    #for layer in model.layers:
    #    layer.trainable = True


    for layer in pre_trained_model.layers:
        layer.trainable=True

    #for layer in model.layers:
    #   if hasattr(layer, 'mixed10') and hasattr(layer, 'mixed10'):
    #       layer.trainable = True
    #       K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
    #       K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
        #else:
        #   layer.trainable = False
    for layer in model.layers[280:]:
        layer.trainable = True
   # for layer in pre_trained_model.layers:
    #    layer.trainable = False
    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='InceptionV3_1':

    if args.optm=='Adam':
        optm = Adam(lr=0.00005,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = InceptionV3(input_shape=(args.size, args.size, 3), include_top=False, weights="/scratch/parceirosbr/bigoilict/share/Polen/clasificacion/weigths/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5")

    #last_layer = pre_trained_model.get_layer('mixed10')
    last_output = pre_trained_model.output#last_layer.output
    #x = GlobalMaxPooling2D()(last_output)
 
    x = GlobalAveragePooling2D()(last_output)
    # Add a fully connected layer with 512 hidden units and ReLU activation
    x=Dropout(rate=0.5)(x)   
    #x = Dense(2048, activation='relu')(x)
    x=Dense(1024, activation='relu')(x)
    #x=Dropout(rate=0.2)(x)
    x=Dropout(rate=0.5)(x)    
    x=Dense(1024, activation='relu')(x)
    x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) 
    #x=Dropout(rate=0.2)(x)
    #x = Dense(512, activation='relu')(x)    
    x=Dropout(rate=0.5)(x)
    # Add a final sigmoid layer for classification
    x = Dense(args.classes, activation='softmax')(x)
    # Configure and compile the model

    model = Model(pre_trained_model.input, x)

    #for layer in pre_trained_model.layers:
    #    layer.trainable = True
    #for layer in model.layers:
    #    layer.trainable = True


    for layer in pre_trained_model.layers:
        layer.trainable=False

    #for layer in model.layers:
    #   if hasattr(layer, 'mixed10') and hasattr(layer, 'mixed10'):
    #       layer.trainable = True
    #       K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
    #       K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
        #else:
        #   layer.trainable = False
    #for layer in model.layers[280:]:
    #    layer.trainable = True
   # for layer in pre_trained_model.layers:
    #    layer.trainable = False



    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='Resnet50':

    if args.optm=='Adam':
        optm = Adam(lr=0.00005,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = resnet50.ResNet50(weights="/scratch/parceirosbr/bigoilict/share/Polen/radar_temp/weigths/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5",include_top=False, input_shape=(args.size, args.size, 3))
    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    x=Dropout(rate=0.3)(x)
    x=Dense(1024, activation='relu')(x)
    x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) 
    preds=Dense(2,activation='softmax')(x) 

    model=Model(inputs=pre_trained_model.input, outputs=preds)

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional InceptionV3 layers
    #for layer in model.layers:
    #    if hasattr(layer, 'moving_mean') and hasattr(layer, 'moving_variance'):
    #        layer.trainable = True
    #        K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
    #        K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
    #    else:
    #        layer.trainable = False
    for layer in pre_trained_model.layers:
        layer.trainable=True      
    for layer in model.layers[165:]:
        layer.trainable=True           
    #for layer in model.layers[165:]:
    #    layer.trainable=True

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='vgg16':

    if args.optm=='Adam':
        optm = Adam(lr=0.0001,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = vgg16.VGG16(weights="/scratch/parceirosbr/bigoilict/share/Polen/radar_temp/weigths/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5",include_top=False, input_shape=(args.size, args.size, 3))
    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    #x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
    x=Dropout(rate=0.5)(x)
    x=Dense(1024,activation='relu')(x) #dense layer 2
    x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) #dense layer 3

    preds=Dense(2,activation='softmax')(x) #final layer with softmax activation

    model=Model(inputs=pre_trained_model.input, outputs=preds)

    for layer in pre_trained_model.layers:
        layer.trainable = True

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='MobileNetV2':

    if args.optm=='Adam':
        optm = Adam(lr=0.0001,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = MobileNetV2(weights='/scratch/parceirosbr/bigoilict/share/Polen/clasificacion/weigths/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224_no_top.h5',include_top=False, input_shape=(args.size, args.size, 3))
    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    x=Dense(2048,activation='relu')(x)
    x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
    #preds=Dense(2,activation='softmax')(x) #final layer with softmax activation
    x=Dense(512,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
    preds=Dense(2,activation='softmax')(x) #final layer with softmax activation
    x=Dropout(rate=0.2)(x)
    model=Model(inputs=pre_trained_model.input, outputs=preds)
    for layer in pre_trained_model.layers:
        layer.trainable = True

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='MobileNetV2_1':

    if args.optm=='Adam':
        optm = Adam(lr=0.0002,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = MobileNetV2(weights='/scratch/parceirosbr/bigoilict/share/Polen/clasificacion/weigths/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224_no_top.h5',include_top=False, input_shape=(args.size, args.size, 3))
    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    x=Dense(1024,activation='relu')(x)
    x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
    x=Dense(512,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
    preds=Dense(2,activation='softmax')(x) #final layer with softmax activation
    model=Model(inputs=pre_trained_model.input, outputs=preds)

    for layer in model.layers[:20]:
        layer.trainable=True
    for layer in model.layers[20:]:
        layer.trainable=True

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='inception_resnet_v2':

    if args.optm=='Adam':
        optm = Adam(lr=0.0001,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = InceptionResNetV2(weights='/scratch/parceirosbr/bigoilict/share/Polen/radar_temp/weigths/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5',include_top=False, input_shape=(args.size, args.size, 3))

    for layer in pre_trained_model.layers:
        if hasattr(layer, 'moving_mean') and hasattr(layer, 'moving_variance'):
            layer.trainable = True
            K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
            K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
        else:
            layer.trainable = False
    for layer in pre_trained_model.layers:
        layer.trainable = True
        
    last_layer = pre_trained_model.get_layer('conv_7b_ac')
    last_output = last_layer.output
    x = GlobalMaxPooling2D()(last_output)
    # Add a fully connected layer with 512 hidden units and ReLU activation
    #x=Dense(1024, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x = Dense(1024, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x = Dense(512, activation='relu')(x)
    # Add a dropout rate of 0.7
    x = Dropout(0.2)(x)
    # Add a final sigmoid layer for classification
    x = Dense(2, activation='softmax')(x)

    # Configure and compile the model
    model = Model(pre_trained_model.input, x)

    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='NASNetLarge':

    if args.optm=='Adam':
        optm = Adam(lr=0.0001,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = NASNetLarge(weights='/scratch/parceirosbr/bigoilict/share/Polen/radar_temp/weigths/nasnet_large_no_top.h5',include_top=False, input_shape=(args.size, args.size, 3))


    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    x=Dropout(rate=0.5)(x)    
    x=Dense(2048, activation='relu')(x)
    x=Dropout(rate=0.5)(x)
    x=Dense(1024, activation='relu')(x)
    x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) 
    #x=Dropout(rate=0.5)(x)
    preds=Dense(2,activation='softmax')(x) 
    model=Model(inputs=pre_trained_model.input, outputs=preds)

    for layer in pre_trained_model.layers:
        layer.trainable=False

    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model
labels['target'] = 1
labels['rank'] = labels.groupby('breed').rank()
labels_pivot = labels.pivot('id', 'breed', 'target').reset_index().fillna(0)
"""
"""
def read_img(img_id, train_or_test, size):
    img =cv2.imread(join(data_dir, train_or_test, img_id + '.jpg'))
    img = cv2.resize(img,size)
    return img
"""

#predictions

from sklearn.utils import shuffle
from tensorflow.keras.applications import NASNetLarge
model = NASNetLarge(weights='imagenet')

# Lower loop can be used if you want to test the data on your Algorithm.
"""
true= []
def Run_model(num_of_img):
    i = 0
    for  img_id, breed,_,_ in shuffle(labels).head(num_of_img).itertuples(index=False):
        i += 1
        img = read_img(img_id, 'train_set/train',(331,331))    
        x = preprocess_input(np.expand_dims(img.copy(), axis=0))
        x = x / 255
        preds = model.predict(x)
        _, imagenet_class_name, prob = decode_predictions(preds, top=0)[0][0]
        cv2.putText(img,imagenet_class_name,(10,10),fontFace=cv2.FONT_HERSHEY_SIMPLEX,fontScale=1,color=(255, 0, 0),thickness=1,bottomLeftOrigin = False)
        cv2.putText(img, breed,(10,10),fontFace=cv2.FONT_HERSHEY_SIMPLEX,fontScale=1,color=(255,0,0),thickness=1,bottomLeftOrigin = False)
Esempio n. 20
0
def loadModel(mode, modelWeights, organ, modelType):
    """
    Load model and compile it 
    Input training or inference mode, model weights and type of model 
    Return model
    """
    # Load model input configuration
    modelInputConfig = loadModelInputConf(organ)
    # Get values
    useChannels = modelInputConfig.useChannels
    useClasses = modelInputConfig.useClasses
    useResolution = modelInputConfig.useResolution

    # Define model
    if modelType == 'ResNet101':
        model = ResNet101(include_top=True,
                          weights=modelWeights,
                          input_shape=(useResolution[0], useResolution[1],
                                       useChannels),
                          classes=useClasses)
    elif modelType == 'SEResNet101':
        mySEResNet = AllSEResNets.SEResNet101
        model = mySEResNet(include_top=True,
                           weights=modelWeights,
                           input_shape=(useResolution[0], useResolution[1],
                                        useChannels),
                           classes=useClasses)
    elif modelType == 'SEResNet154':
        mySEResNet = AllSEResNets.SEResNet154
        model = mySEResNet(include_top=True,
                           weights=modelWeights,
                           input_shape=(useResolution[0], useResolution[1],
                                        useChannels),
                           classes=useClasses)
    # elif modelType == 'SEInceptionResNetV2':
    #         mySEInceptionResNet = AllSEInceptionResNets.SEInceptionResNetV2
    #         model = mySEInceptionResNet(include_top=True, weights=modelWeights, input_shape=(
    #             useResolution[0], useResolution[1], useChannels), classes=useClasses)
    elif modelType == 'EfficientNetB4':
        model = EfficientNetB4(include_top=True,
                               weights=modelWeights,
                               input_shape=(useResolution[0], useResolution[1],
                                            useChannels),
                               classes=useClasses,
                               classifier_activation="softmax")
    elif modelType == 'Xception':
        model = Xception(include_top=True,
                         weights=modelWeights,
                         input_shape=(useResolution[0], useResolution[1],
                                      useChannels),
                         classes=useClasses)
    elif modelType == 'ResNet101V2':
        model = ResNet101V2(include_top=True,
                            weights=modelWeights,
                            input_shape=(useResolution[0], useResolution[1],
                                         useChannels),
                            classes=useClasses,
                            classifier_activation="softmax")
    elif modelType == 'ResNet152V2':
        model = ResNet152V2(include_top=True,
                            weights=modelWeights,
                            input_shape=(useResolution[0], useResolution[1],
                                         useChannels),
                            classes=useClasses,
                            classifier_activation="softmax")
    elif modelType == 'InceptionResNetV2':
        model = InceptionResNetV2(include_top=True,
                                  weights=modelWeights,
                                  input_shape=(useResolution[0],
                                               useResolution[1], useChannels),
                                  classes=useClasses,
                                  classifier_activation="softmax")
    elif modelType == 'ResNet50V2':
        model = ResNet50V2(include_top=True,
                           weights=modelWeights,
                           input_shape=(useResolution[0], useResolution[1],
                                        useChannels),
                           classes=useClasses,
                           classifier_activation="softmax")
    elif modelType == 'NASNetLarge':
        model = NASNetLarge(include_top=True,
                            weights=modelWeights,
                            input_shape=(useResolution[0], useResolution[1],
                                         useChannels),
                            classes=useClasses)

    else:
        raise ValueError('The selected model could not be found')

    if mode == 'training':
        print('Loaded model ' + modelType + ' for training, no weights loaded')
        # Add reglizarization if needed
        # model = addRegularization(model, tf.keras.regularizers.l2(0.0000))
    if mode == 'inference':
        print('Loaded model ' + modelType + ' for inference, weights loaded.')
        # Do not add regularization

    model.compile(
        optimizer='adam',
        loss='categorical_crossentropy',
        # metrics=['accuracy']
        metrics=[
            'accuracy',
            tf.keras.metrics.Precision(),
            tf.keras.metrics.Recall(),
            tf.keras.metrics.AUC()
        ],
        weighted_metrics=[
            'accuracy',
            tf.keras.metrics.Precision(),
            tf.keras.metrics.Recall(),
            tf.keras.metrics.AUC()
        ])

    return model