def callModel(model_picked='vgg16'):
    '''function returns the model picked based on input
    Input choices:
        'vgg16'     - VGG16
        'vgg19'     - VGG19
        'res50'     - ResNet50
        'xception'  - Xception
        'inception' - InceptionV3
        'monet'     - MobileNetV2
    '''
    if model_picked == 'vgg16':
        model = applications.VGG16(include_top=False, weights='imagenet')
    elif model_picked == 'vgg19':
        model = applications.VGG19(include_top=False, weights='imagenet')
    elif model_picked == 'res50':
        model = applications.ResNet50(include_top=False, weights='imagenet')
    elif model_picked == 'xception':
        model = applications.Xception(include_top=False, weights='imagenet')
    elif model_picked == 'inception':
        model = applications.InceptionV3(include_top=False, weights='imagenet')
    elif model_picked == 'monet':
        model = applications.MobileNetV2(include_top=False,
                                         weights='imagenet',
                                         input_shape=(224, 224, 3))
    return model
示例#2
0
def callModel(model_picked = 'vgg16'):
    '''function returns the model picked based on input
    Input choices:
        'vgg16'     - VGG16
        'vgg19'     - VGG19
        'res50'     - ResNet50
        'xception'  - Xception
        'inception' - InceptionV3
        'monet'     - MobileNetV2
    '''
    #The models have a series of convolutional layers and then they have dense(deeply connected layers)
    #include_top = False only gets the convo layers and ignores the dense layer
    #imagenet is a huge image dataset on which the models are trained. if weights ='imagenet' means the weights are acquired from that.
    if model_picked == 'vgg16':
        model = applications.VGG16(include_top=False, weights='imagenet')
    elif model_picked =='vgg19':
        model = applications.VGG19(include_top=False, weights='imagenet')
    elif model_picked == 'res50':
        model = applications.ResNet50(include_top=False, weights='imagenet')
    elif model_picked == 'xception':
        model = applications.Xception(include_top=False, weights='imagenet')
    elif model_picked == 'inception':
        model = applications.InceptionV3(include_top=False, weights='imagenet')
    elif model_picked == 'monet':
        model = applications.MobileNetV2(include_top=False, weights='imagenet',
        input_shape=(224,224,3))
    return model
示例#3
0
def do_test_keras_MobileNetV2(data_dir):
    model = keras_application.MobileNetV2(weights="imagenet")
    preprocess_func = keras_application.mobilenet.preprocess_input
    test_generator = load_data_keras(data_dir, preprocess_func)
    steps = int(data_dir.split("_")[-1]) / BSIZE
    preds = model.predict_generator(test_generator, verbose=1, steps=steps)
    return preds
示例#4
0
文件: features.py 项目: mme/vergeml
def get_imagenet_architecture(architecture, variant, size, alpha, output_layer, include_top=False, weights='imagenet'):
    from keras import applications, Model

    if include_top:
        assert output_layer == 'last'

    if size == 'auto':
        size = get_image_size(architecture, variant, size)

    shape = (size, size, 3)

    if architecture == 'densenet':
        if variant == 'auto':
            variant = 'densenet-121'
        if variant == 'densenet-121':
            model = applications.DenseNet121(weights=weights, include_top=include_top, input_shape=shape)
        elif variant == 'densenet-169':
            model = applications.DenseNet169(weights=weights, include_top=include_top, input_shape=shape)
        elif variant == 'densenet-201':
            model = applications.DenseNet201(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'inception-resnet-v2':
        model = applications.InceptionResNetV2(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'mobilenet':
        model = applications.MobileNet(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha)
    elif architecture == 'mobilenet-v2':
        model = applications.MobileNetV2(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha)
    elif architecture == 'nasnet':
        if variant == 'auto':
            variant = 'large'
        if variant == 'large':
            model = applications.NASNetLarge(weights=weights, include_top=include_top, input_shape=shape)
        else:
            model = applications.NASNetMobile(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'resnet-50':
        model = applications.ResNet50(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'vgg-16':
        model = applications.VGG16(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'vgg-19':
        model = applications.VGG19(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'xception':
        model = applications.Xception(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'inception-v3':
        model = applications.InceptionV3(weights=weights, include_top=include_top, input_shape=shape)

    if output_layer != 'last':
        try:
            if isinstance(output_layer, int):
                layer = model.layers[output_layer]
            else:
                layer = model.get_layer(output_layer)
        except Exception:
            raise VergeMLError('layer not found: {}'.format(output_layer))
        model = Model(inputs=model.input, outputs=layer.output)

    return model
示例#5
0
def model_app(arch, input_tensor):
    """Loads the appropriate convolutional neural network (CNN) model
      Args:
        arch: String key for model to be loaded.
        input_tensor: Keras tensor to use as image input for the model.
      Returns:
        model: The specified Keras Model instance with ImageNet weights loaded and without the top classification layer.
      """
    # function that loads the appropriate model
    if arch == 'Xception':
        model = applications.Xception(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('Xception loaded')
    elif arch == 'VGG16':
        model = applications.VGG16(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('VGG16 loaded')
    elif arch == 'VGG19':
        model = applications.VGG19(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('VGG19 loaded')
    elif arch == 'ResNet50':
        model = applications.ResNet50(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('ResNet50 loaded')
    elif arch == 'InceptionV3':
        model = applications.InceptionV3(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('InceptionV3 loaded')
    elif arch == 'InceptionResNetV2':
        model = applications.InceptionResNetV2(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('InceptionResNetV2 loaded')
    elif arch == 'MobileNet':
        model = applications.MobileNet(input_shape=(224, 224, 3), weights='imagenet', include_top=False,
                                       input_tensor=input_tensor)
        print('MobileNet loaded')
    elif arch == 'DenseNet121':
        model = applications.DenseNet121(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('DenseNet121 loaded')
    elif arch == 'NASNetLarge':
        model = applications.NASNetLarge(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('NASNetLarge loaded')
    elif arch == 'MobileNetV2':
        model = applications.MobileNetV2(input_shape=(224, 224, 3), weights='imagenet', include_top=False,
                                         input_tensor=input_tensor)
        print('MobileNetV2 loaded')
    else:
        print('Invalid model selected')
        model = False

    return model
示例#6
0
def main():

    print("Beginning transfer learning program...")

    if any("--verbose" in arg for arg in sys.argv):
        verbose = True
    elif any("-v" in arg for arg in sys.argv):
        verbose = True
    else:
        verbose = False

    # Useful persisting variables
    image_size = 160

    # Retrieve the Kaggle Competition cats and dogs dataset.
    data_url = "https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip"
    zip_file_path = retrieve_data(data_url, verbose)
    base_dir, _ = os.path.splitext(zip_file_path)

    # Create training and test sets
    path_list = split_data(base_dir, verbose)

    # Preprocess the raw images using ImageDataGenerator
    generators = create_generators(path_list, image_size, verbose)

    # Create the pretrained base model.
    # Using MobileNet V2 pretrained on ImageNet.
    # Important caveat for transfer learning: include_top=False
    base_model = applications.MobileNetV2(input_shape=(image_size, image_size,
                                                       3),
                                          include_top=False,
                                          weights='imagenet')

    headless_model = extract_features(base_model, verbose)

    # Train the top layers of the model
    train(headless_model, generators, verbose)
    headless_model.save('untuned_cat_dog.model')

    # Finetune the model
    finetune(headless_model, generators, verbose)
    headless_model.save('finetuned_cat_dog.model')
示例#7
0
    def create_model(self):
        # Transfer learning with Inception V3
        # base_model = applications.MobileNetV2(include_top=False, input_shape=(self.sizew, self.sizeh, 3), weights=None)
        base_model = applications.MobileNetV2(include_top=False, weights=None,
                                              input_shape=(self.sizeh, self.sizew, 3))  # (高,宽,通道数)

        x = base_model.output
        # 每个特征图平均成一个特征 也是展平的效果
        x = GlobalAveragePooling2D()(x)

        # 全连接网络
        x = Dense(64, activation='relu')(x)
        x = Dense(32, activation='relu')(x)
        # x = Dropout(0.1)(x)
        x = Dense(self.n_class, activation='softmax')(x)

        model = Model(inputs=base_model.input, outputs=x)
        model.compile(loss='categorical_crossentropy', optimizer='adam',
                      metrics=['accuracy'])
        # model.summary()
        return model
def train(train_path,
          val_path,
          test_path,
          batch_size=32,
          epochs=50,
          network='InceptionResNetV2',
          data_augmentation=True,
          mode='finetune',
          optimizer='Adadelta',
          fc=1,
          classes=5,
          gpu=1):
    '''
    Inputs:
        train_path: data path for train set (data should be stored like train/DR, train/Normal) 
        val_path: data path for validation set
        test_path: data path for test set
        batch_size: data sizes per step
        epochs: loop counts over whole train set
        network: {
            'InceptionResNetV2': fine-tune mode will train last 2 inception blocks
            'DenseNet201': fine-tune mode will train last Dense block
            'InceptionV3': fine-tune mode will train last 2 inception blocks
            'Xception'
            'NASNet'
            'MobileNetV2'
            'ResNet50': According to https://arxiv.org/pdf/1805.08974.pdf, it is most suitable for transfer learning?
        }
        data_augmentation: whether to do data augmentation or not
        mode: {
            'retrain': randomly initialize all layers and retrain the whole model
            'finetune': train specified layers
            'transfer' train fc layer(s)
        }
        optimizer: {
            'Adadelta'
            'RMSprop'
        }
        fc: {
            1: only one fc layer at last
            2: include two fc layers at last
        }
        classes: category counts
    '''
    if mode == 'retrain':
        include_top = False
        weights = None
        pooling = 'avg'
    else:
        include_top = False
        weights = 'imagenet'
        pooling = 'avg'

    if network == 'DenseNet201':
        from keras.applications.densenet import preprocess_input
        img_width, img_height = 224, 224
        base_model = applications.DenseNet201(include_top=include_top,
                                              weights=weights,
                                              pooling=pooling)
        # train last Dense Block
        if mode == 'finetune':
            trainable = False
            for layer in base_model.layers:
                if layer.name == 'conv5_block1_0_bn':
                    trainable = True
                layer.trainable = trainable

    if network == 'Xception':
        from keras.applications.xception import preprocess_input
        img_width, img_height = 299, 299
        base_model = applications.Xception(include_top=include_top,
                                           weights=weights,
                                           pooling=pooling)

    if network == 'InceptionV3':
        from keras.applications.inception_v3 import preprocess_input
        img_width, img_height = 299, 299
        base_model = applications.InceptionV3(include_top=include_top,
                                              weights=weights,
                                              pooling=pooling)
        # train top 2 inception blocks
        if mode == 'finetune':
            for layer in base_model.layers[:249]:
                layer.trainable = False
            for layer in base_model.layers[249:]:
                #print(layer.name)
                layer.trainable = True

    if network == 'InceptionResNetV2':
        from keras.applications.inception_resnet_v2 import preprocess_input
        img_width, img_height = 299, 299
        base_model = applications.InceptionResNetV2(include_top=include_top,
                                                    weights=weights,
                                                    pooling=pooling)
        # train top 1 inception blocks
        if mode == 'finetune':
            trainable = True
            for layer in base_model.layers:
                #print(layer.name)
                if layer.name == 'conv2d_9':
                    trainable = False
                if layer.name == 'conv2d_201':
                    trainable = True
                layer.trainable = trainable

    if network == 'NASNet':
        from keras.applications.nasnet import preprocess_input
        img_width, img_height = 331, 331
        base_model = applications.NASNetLarge(include_top=include_top,
                                              weights=weights,
                                              pooling=pooling)

    if network == 'MoblieNetV2':
        from keras.applications.mobilenetv2 import preprocess_input
        img_width, img_height = 224, 224
        base_model = applications.MobileNetV2(include_top=include_top,
                                              weights=weights,
                                              pooling=pooling)

    if network == 'ResNet50':
        from keras.applications.resnet50 import preprocess_input
        img_width, img_height = 224, 224
        base_model = applications.ResNet50(include_top=include_top,
                                           weights=weights,
                                           pooling=pooling)

    bottleneck = base_model.output
    if fc == 2:
        bottleneck = Dense(
            512,
            activation='relu',
            kernel_regularizer=keras.regularizers.l2(l=0.001))(bottleneck)
    predictions = Dense(
        classes,
        kernel_regularizer=keras.regularizers.l2(l=0.001),
        activation='softmax',
        bias_regularizer=keras.regularizers.l2(l=0.001))(bottleneck)
    model = Model(inputs=base_model.input, outputs=predictions)

    if mode == 'transfer':
        # train only the top layers (which were randomly initialized)
        # freeze all convolutional layers
        for layer in base_model.layers:
            layer.trainable = False

    if mode == 'retrain':
        # train a complete model
        for layer in base_model.layers:
            layer.trainable = True

    if optimizer == 'Adadelta':
        opt = optimizers.Adadelta()
    if optimizer == 'Adam':
        opt = optimizers.Adam()
    if optimizer == 'RMSprop':
        opt = optimizers.RMSprop(lr=0.005, rho=0.9, epsilon=1.0, decay=0.94)

    if gpu > 1:
        batch_size *= gpu
        model = multi_gpu_model(model, gpus=gpu)

    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    if data_augmentation:
        # Initialize the train and test generators with data Augumentation
        train_datagen = ImageDataGenerator(
            preprocessing_function=preprocess_input,
            horizontal_flip=True,
            fill_mode="nearest",
            zoom_range=0.3,
            width_shift_range=0.3,
            height_shift_range=0.3,
            rotation_range=30)
        val_datagen = ImageDataGenerator(
            preprocessing_function=preprocess_input)

    else:
        train_datagen = ImageDataGenerator(
            preprocessing_function=preprocess_input)
        val_datagen = ImageDataGenerator(
            preprocessing_function=preprocess_input)

    train_generator = train_datagen.flow_from_directory(
        train_path,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode="categorical")

    validation_generator = val_datagen.flow_from_directory(
        val_path,
        target_size=(img_height, img_width),
        class_mode="categorical")

    test_generator = val_datagen.flow_from_directory(test_path,
                                                     target_size=(img_height,
                                                                  img_width),
                                                     class_mode="categorical")

    checkpoint = ModelCheckpoint("{}_{}_{}.h5".format(network, mode,
                                                      optimizer),
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=False,
                                 mode='auto',
                                 period=1)
    early = EarlyStopping(monitor='val_acc',
                          min_delta=0,
                          patience=10,
                          verbose=1,
                          mode='auto')

    model.fit_generator(train_generator,
                        epochs=epochs,
                        validation_data=validation_generator,
                        callbacks=[checkpoint, early])

    score = model.evaluate_generator(test_generator)

    print(score)
示例#9
0
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras import backend as k
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping
from keras.models import load_model
import os
import pickle
from keras.models import model_from_json
import matplotlib.pyplot as plt

image_width, image_height= 256, 256

nb_train_samples= 11000
nb_validation_sample=2000
batch_size = 8

model = applications.MobileNetV2(weights= "imagenet", include_top=False, input_shape=(image_height, image_width,3))

x=model.layers[7].output
#take the first 5 layers of the model
x=Flatten()(x)
x=Dense(1024, activation="relu")(x)
x=Dropout(0.5)(x)
x=Dense(384, activation="relu")(x)
x=Dropout(0.5)(x)
x=Dense(96, activation="relu")(x)
x=Dropout(0.5)(x)
predictions = Dense(30, activation="softmax")(x)

model_final =Model(input=model.input, output=predictions)

#model_final = load_model("weights_Mobile_Net.h5")
示例#10
0
    'categorical',  # 이진분류 말고 카테 고리로 분류 이진 으로 해도 되지만 해본결과 카테고리로 해서 one-hot으로 가는것이 정확도가 높았음
    shuffle=True,  #데이터를 무작위로 섞음
    subset='training')

val_generator = datagen.flow_from_directory(train_data_dir,
                                            target_size=(img_width,
                                                         img_height),
                                            batch_size=batch_size,
                                            class_mode='categorical',
                                            shuffle=True,
                                            subset='validation')

#모델 구축하는 단락
base_model = applications.MobileNetV2(
    weights='imagenet',
    input_shape=(img_width, img_height, 3),
    include_top=False
)  # transfer learning: moblienet을 기본 네트워크로 구축해봄, weights는 imagenet에서 사용된 weights로 가져옴
base_model.trainable = False  # trainable =False, include_top=False 부분은 transfer learing freeze 단계와 관련됨

x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dense(512, activation='relu')(x)
x = Dense(2, activation='softmax')(x)

custom_model = Model(inputs=base_model.input, outputs=x)
custom_model.summary()

#옵티마이저 설정
示例#11
0
def get_model(num, print_model=False, reg=False, reg_type=None, num_classes=2):
    """
    reg_type is only in accesssed if reg is True
    reg_type is a dictionary with keys
        dropout: [<rate>, ...]
        l2: [<rate>, ...]
    """
    model = Sequential()

    if num == 1:
        if reg:
            model.add(
                Conv2D(
                    16,
                    kernel_size=(3, 3),
                    activation='relu',
                    input_shape=input_shape,
                    activity_regularizer=regularizers.l2(reg_type['l2'][0]),
                ))
            model.add(Dropout(reg_type['dropout'][0]))
            model.add(
                Conv2D(
                    4,
                    (5, 5),
                    activation='relu',
                    activity_regularizer=regularizers.l2(reg_type['l2'][1]),
                ))
            model.add(Dropout(reg_type['dropout'][1]))
        else:
            model.add(
                Conv2D(16,
                       kernel_size=(3, 3),
                       activation='relu',
                       input_shape=input_shape))
            model.add(Conv2D(4, (5, 5), activation='relu'), )

        model.add(MaxPooling2D(pool_size=(7, 7)))
        model.add(Flatten())
        model.add(Dense(num_classes, activation='softmax'))

    elif num == 2:
        if reg:
            model.add(
                Conv2D(
                    4,
                    kernel_size=(3, 3),
                    activation='relu',
                    input_shape=input_shape,
                    activity_regularizer=regularizers.l2(reg_type['l2'][0]),
                ))
            model.add(MaxPooling2D(pool_size=(5, 5)))
            model.add(Dropout(reg_type['dropout'][0]))

            model.add(
                Conv2D(
                    16,
                    kernel_size=(3, 3),
                    activation='relu',
                    input_shape=input_shape,
                    activity_regularizer=regularizers.l2(reg_type['l2'][1]),
                ))
            model.add(MaxPooling2D(pool_size=(5, 5)))
            model.add(Dropout(reg_type['dropout'][1]))

            model.add(
                Conv2D(
                    1,
                    kernel_size=(3, 3),
                    activation='relu',
                    input_shape=input_shape,
                    activity_regularizer=regularizers.l2(reg_type['l2'][2]),
                ))
            model.add(Dropout(reg_type['dropout'][2]))

        else:
            model.add(
                Conv2D(4,
                       kernel_size=(3, 3),
                       activation='relu',
                       input_shape=input_shape))
            model.add(MaxPooling2D(pool_size=(5, 5)))

            model.add(
                Conv2D(16,
                       kernel_size=(3, 3),
                       activation='relu',
                       input_shape=input_shape))
            model.add(MaxPooling2D(pool_size=(5, 5)))
            model.add(
                Conv2D(1,
                       kernel_size=(3, 3),
                       activation='relu',
                       input_shape=input_shape))
        model.add(Flatten())
        model.add(Dense(num_classes, activation='softmax'))

    elif num == 3:
        if reg:
            model.add(
                Conv2D(
                    4,
                    kernel_size=(3, 3),
                    activation='relu',
                    input_shape=input_shape,
                    activity_regularizer=regularizers.l2(reg_type['l2'][0]),
                ))
            model.add(MaxPooling2D(pool_size=(3, 3)))
            model.add(Dropout(reg_type['dropout'][0]))

            model.add(
                Conv2D(
                    16,
                    kernel_size=(3, 3),
                    strides=(1, 1),
                    activation='relu',
                    input_shape=input_shape,
                    activity_regularizer=regularizers.l2(reg_type['l2'][1]),
                ))
            model.add(MaxPooling2D(pool_size=(3, 3)))
            model.add(Dropout(reg_type['dropout'][1]))

            model.add(
                Conv2D(
                    1,
                    kernel_size=(3, 3),
                    strides=(1, 1),
                    activation='relu',
                    input_shape=input_shape,
                    activity_regularizer=regularizers.l2(reg_type['l2'][2]),
                ))
            model.add(Dropout(reg_type['dropout'][2]))
        else:
            model.add(
                Conv2D(4,
                       kernel_size=(3, 3),
                       activation='relu',
                       input_shape=input_shape))
            model.add(MaxPooling2D(pool_size=(3, 3)))

            model.add(
                Conv2D(16,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       activation='relu',
                       input_shape=input_shape))
            model.add(MaxPooling2D(pool_size=(3, 3)))
            model.add(
                Conv2D(1,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       activation='relu',
                       input_shape=input_shape))
        model.add(Flatten())
        model.add(Dense(num_classes, activation='softmax'))

    elif num == 4:  # VGG Model
        base_model = applications.VGG16(weights='imagenet',
                                        include_top=False,
                                        input_tensor=input_tensor)

        layer_dict = dict([(layer.name, layer) for layer in base_model.layers])
        num = 6
        x = layer_dict['block2_pool'].output
        x = Conv2D(filters=4, kernel_size=(1, 1), activation='relu')(x)
        x = MaxPooling2D(pool_size=(5, 5))(x)
        x = Flatten()(x)
        x = Dense(32, activation='relu')(x)
        if reg:
            num += 1
            x = Dropout(reg_type['dropout'][0])(x)
        x = Dense(num_classes, activation='softmax')(x)
        model = Model(base_model.input, x)
        for layer in model.layers[:-num]:
            layer.trainable = False

    elif num == 5:  # Resnet 50 v2
        base_model = applications.ResNet50V2(weights='imagenet',
                                             include_top=False,
                                             input_tensor=input_tensor)

        layer_dict = dict([(layer.name, layer) for layer in base_model.layers])
        num = 5
        x = layer_dict['post_relu'].output
        x = Conv2D(filters=8, kernel_size=(1, 1), activation='relu')(x)
        x = MaxPooling2D(pool_size=(5, 5))(x)
        if reg:
            num += 1
            x = Dropout(reg_type['dropout'][0])(x)
        x = Flatten()(x)
        x = Dense(num_classes, activation='softmax')(x)
        model = Model(base_model.input, x)
        for layer in model.layers[:-num]:
            layer.trainable = False

    elif num == 6:  # MobileNet v2
        base_model = applications.MobileNetV2(weights='imagenet',
                                              include_top=False,
                                              input_tensor=input_tensor)

        layer_dict = dict([(layer.name, layer) for layer in base_model.layers])
        num = 5
        x = layer_dict['out_relu'].output
        x = Conv2D(filters=16, kernel_size=(1, 1), activation='relu')(x)
        x = MaxPooling2D(pool_size=(5, 5))(x)
        if reg:
            num += 1
            x = Dropout(reg_type['dropout'][0])(x)
        x = Flatten()(x)
        x = Dense(num_classes, activation='softmax')(x)
        model = Model(base_model.input, x)
        for layer in model.layers[:-num]:
            layer.trainable = False

    model.compile(
        loss=keras.losses.categorical_crossentropy,
        optimizer=keras.optimizers.Adam(),
        metrics=['accuracy'],
    )
    if print_model:
        print(model.summary())
    return model
    if show_confusion_matrix:
        print("Confusion Matrix:")
        plot_test_confusion_matrix(cls_pred=cls_pred,
                                   labels_test=labels_test,
                                   classes=classes)

    return float(acc)


#%%

img_height = 128
img_width = 128

base_model = applications.MobileNetV2(weights=None,
                                      include_top=False,
                                      input_shape=(img_height, img_width, 1))

x = base_model.output

x = GlobalAveragePooling2D()(x)
x = Dropout(0.7)(x)

predictions = Dense(3, activation="softmax")(x)

model_final = Model(inputs=base_model.input, outputs=predictions)

model_final.summary()

#%%