예제 #1
0
 def __init__(self):
     """Load  DenseNet201 model.
     """
     self.base_model = DenseNet201(weights='imagenet', include_top=False)
     self.DenseNet201_model = GlobalAveragePooling2D()(base_model.output)
     self.DenseNet201_model = Model(self.base_model.input,
                                    DenseNet201_model)
예제 #2
0
def func5(shape):
	from keras.applications import DenseNet201
	BS = 16
	conv_base = DenseNet201(weights = 'imagenet',
                 	include_top = False,
                 	input_shape = (shape[0],shape[1],3))
	return BS,conv_base
예제 #3
0
def get_model(summary=False,
              img_width=150,
              fc_layers=[4096, 4096],
              fc_dropout_layers=[0.5, 0.5]):
    # Get back the convolutional part of a VGG network trained on ImageNet
    base_model = DenseNet201(input_tensor=Input(shape=(img_width, img_width,
                                                       3)),
                             include_top=False)
    x = GlobalAveragePooling2D(name='avg_pool')(base_model.output)
    x = Dense(10,
              activation='softmax',
              kernel_regularizer=regularizers.l2(0.01))(x)
    my_model = Model(input=base_model.input, output=x)
    layers_to_freeze = 481
    for i in range(layers_to_freeze):
        my_model.layers[i].trainable = False
    if summary:
        print("---------------------------------------------------------")
        for i, layer in enumerate(my_model.layers):
            print(i, layer.name)
        print("---------------------------------------------------------")
        print("---------------------------------------------------------")
        print("---------------------------------------------------------")
        my_model.summary()
    return my_model, layers_to_freeze, 2
예제 #4
0
def UDenseNet201(input_shape=(None, None, 3),
                 classes=1,
                 decoder_filters=16,
                 decoder_block_type='upsampling',
                 encoder_weights=None,
                 input_tensor=None,
                 activation='sigmoid',
                 **kwargs):

    backbone = DenseNet201(input_shape=input_shape,
                           input_tensor=input_tensor,
                           weights=encoder_weights,
                           include_top=False)

    skip_connections = list(reversed([4, 51, 139, 479]))
    model = build_unet(backbone,
                       classes,
                       decoder_filters,
                       skip_connections,
                       block_type=decoder_block_type,
                       activation=activation,
                       **kwargs)
    model.name = 'u-densenet201'

    return model
예제 #5
0
def extract_features(sample_count, dataset, batch_size, input_shape,
                     n_of_cls: int):
    conv_base = DenseNet201(weights='imagenet',
                            include_top=False,
                            input_shape=input_shape)

    conv_base.summary()

    files_paths = [
        join(dataset.directory, file_name) for file_name in dataset.filenames
    ]

    conv_base_output_shape = conv_base.layers[-1].output.shape[
        1:]  # get shape of last layer
    features_shape = (sample_count, *conv_base_output_shape)

    features = np.zeros(shape=features_shape)
    labels = to_categorical(np.zeros(shape=sample_count), num_classes=n_of_cls)
    i = 0
    for inputs_batch, labels_batch in dataset:
        features_batch = conv_base.predict(inputs_batch)
        features[i * batch_size:(i + 1) * batch_size] = features_batch
        labels[i * batch_size:(i + 1) * batch_size] = labels_batch
        i += 1
        print(i * batch_size, "====>", sample_count)
        if i * batch_size >= sample_count:
            break

    return features, labels, files_paths
예제 #6
0
def load_densenet201(width, height, classes_num):
    with tf.device('/cpu:0'):
        model = DenseNet201(weights=None,
                            input_shape=(width, height, 3),
                            classes=classes_num)

    return model
예제 #7
0
    def build(self) -> Model:
        model = DenseNet201(include_top=True,
                            weights=None,
                            input_shape=(self.width, self.height,
                                         self.channels),
                            classes=2)

        return model
예제 #8
0
def create_model(args, branch):
    optz = Adam(lr=1e-5)

    if args['dataset'] != Dataset.flood_heights:
        if args['model'] == arguments.Model.dense_net or args[
                'model'] == arguments.Model.attention_guided:
            base_model = DenseNet201(include_top=False, weights='imagenet')
        else:
            base_model = EfficientNetB3(include_top=False, weights='imagenet')

        x = base_model.output
        x = GlobalAveragePooling2D(name="global_average_pooling2d_layer")(x)

        if args['is_binary']:
            print("Binary model.")
            predictions = Dense(1, activation=activations.sigmoid)(x)
            model = Model(inputs=base_model.input, outputs=predictions)
            model.compile(optimizer=optz,
                          loss=losses.binary_crossentropy,
                          metrics=[metrics.binary_accuracy])

        else:
            print("3 number of classes.")
            predictions = Dense(3, activation=activations.softmax)(x)
            model = Model(inputs=base_model.input, outputs=predictions)
            model.compile(optimizer=optz,
                          metrics=[metrics.categorical_accuracy],
                          loss=losses.categorical_crossentropy)

    else:
        if args['model'] == arguments.Model.dense_net or args[
                'model'] == arguments.Model.attention_guided:
            base_model = load_model(
                "/home/jpereira/Tests/weights/" +
                "flood_severity_3_classes_attention_guided_{}_branch_cv/".
                format(branch) + "weights_fold_1_from_10.hdf5")
        else:
            base_model = load_model(
                "/home/jpereira/Tests/weights/" +
                "flood_severity_3_classes_cv/weights_fold_1_from_10.hdf5")

        print("Regression model.")
        output_less_1_m = Dense(1,
                                activation=custom_activation_less_1m,
                                name="less_1m")(base_model.layers[-2].output)
        output_more_1_m = Dense(1,
                                activation=custom_activation_more_1m,
                                name="more_1m")(base_model.layers[-2].output)
        predictions = OutputLayer()(
            [base_model.layers[-1].output, output_less_1_m, output_more_1_m])

        model = Model(inputs=base_model.input,
                      outputs=[base_model.layers[-1].output, predictions])
        model.compile(
            optimizer=optz,
            loss=[losses.categorical_crossentropy, losses.mean_squared_error])

    return model
예제 #9
0
def get_model(ckpt_path, classes):
    resnet = DenseNet201(weights='imagenet',
                         include_top=False,
                         input_shape=(100, 100, 3),
                         classes=classes)
    model = build_model(resnet, classes, lr=1e-4)
    model.summary()
    ##Load Trained Weights
    model.load_weights(ckpt_path)
    return model
예제 #10
0
def get_densenet():
    base_densenet_model = DenseNet201(input_shape=(128, 128, 3),
                                      include_top=False,
                                      weights=None)
    pooling_layer = GlobalAveragePooling2D()(base_densenet_model.output)
    dropout_layer1 = Dropout(0.5)(pooling_layer)
    dense_layer1 = Dense(512)(dropout_layer1)
    dropout_layer2 = Dropout(0.5)(dense_layer1)
    dense_layer2 = Dense(15, activation='sigmoid')(dropout_layer2)
    model = Model(inputs=base_densenet_model.inputs, outputs=dense_layer2)
    return model
예제 #11
0
 def __create_model(cls):
     pre_trained_model = DenseNet201(input_shape=(cls.image_size,
                                                  cls.image_size, 3),
                                     include_top=False,
                                     weights="imagenet")
     model = Sequential([
         pre_trained_model,
         MaxPool2D((2, 2), strides=2),
         Flatten(),
         Dense(cls.labels, activation='softmax')
     ])
     model.compile(optimizer="adam",
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])
     return model
	def pretrained_model(self):
		# base_model = ResNet50(include_top=False,input_shape=self.input_shape,weights='imagenet')
		base_model = DenseNet201(include_top=False,input_shape=self.input_shape,weights='imagenet')
		# base_model = Xception(include_top=False,input_shape=self.input_shape,weights='imagenet')
		# base_model = InceptionResNetV2(include_top=False,input_shape=self.input_shape,weights='imagenet')

		x = base_model.output
		x = Flatten()(x)
		x = Dense(512,activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512,activation='relu')(x)
		x = Dropout(0.5)(x)
		predictions = Dense(self.num_classes,activation='softmax')(x)

		model = Model(base_model.inputs,predictions)
		return model
예제 #13
0
    def classifier(self) -> Model:
        """ Returns the model of this configuration """
        base_model = DenseNet201(include_top=False,
                                 weights='imagenet',
                                 input_shape=self.data_shape,
                                 pooling=None)
        x = base_model.output
        x = Convolution2D(self.number_of_classes,
                          kernel_size=(1, 1),
                          padding='same')(x)
        x = GlobalAveragePooling2D()(x)
        x = Activation('softmax', name='output_class')(x)
        model = Model(inputs=base_model.inputs, outputs=x)
        model.compile(self.get_optimizer(),
                      loss="categorical_crossentropy",
                      metrics=["accuracy"])

        return model
예제 #14
0
def train_chesspiece_model():
    """Trains the chesspiece model based on DenseNet201."""
    base_model = DenseNet201(input_shape=(224, 224, 3), include_top=False,
                             weights='imagenet')

    # First train only the top layers
    for layer in base_model.layers:
        layer.trainable = False

    model = build_model(base_model)

    train_generator, validation_generator = data_generators(
        preprocess_input, (224, 224), 64)

    callbacks = model_callbacks(5, "./models/DenseNet201_pre.h5", 0.1, 10)

    history = train_model(model, 20, train_generator, validation_generator,
                          callbacks, use_weights=False, workers=5)

    plot_model_history(history, "./models/DenseNet201_pre_acc.png",
                       "./models/DenseNet201_pre_loss.png")
    evaluate_model(model, validation_generator)

    # Also train conv5
    for layer in model.layers[:481]:
        layer.trainable = False
    for layer in model.layers[481:]:
        layer.trainable = True

    model.compile(optimizer='Adam', loss='categorical_crossentropy',
                  metrics=['accuracy'])

    callbacks = model_callbacks(20, "./models/DenseNet201.h5", 0.2, 8)

    history = train_model(model, 100, train_generator, validation_generator,
                          callbacks, use_weights=False, workers=5)

    plot_model_history(history, "./models/DenseNet201_acc.png",
                       "./models/DenseNet201_loss.png")
    evaluate_model(model, validation_generator)

    model.save("./models/DenseNet201_last.h5")
예제 #15
0
파일: ihd_final.py 프로젝트: banutu98/IHD
 def get_base_model(network, input_shape, pooling_method):
     network = network.lower()
     input_warning_message = 'WARNING! The input shape is not the default one!!! Proceeding anyway!'
     if network == 'nas':
         if input_shape != (331, 331, 3):
             print_error(input_warning_message)
         return NASNetLarge(input_shape=input_shape,
                            include_top=False,
                            pooling=pooling_method,
                            weights=None)
     elif network == 'inception':
         if input_shape != (299, 299, 3):
             print_error(input_warning_message)
         return InceptionResNetV2(input_shape=input_shape,
                                  include_top=False,
                                  pooling=pooling_method,
                                  weights='imagenet')
     elif network == 'xception':
         if input_shape != (299, 299, 3):
             print_error(input_warning_message)
         return Xception(input_shape=input_shape,
                         include_top=False,
                         pooling=pooling_method,
                         weights='imagenet')
     elif network == 'densenet':
         if input_shape != (224, 224, 3):
             print_error(input_warning_message)
         return DenseNet201(input_shape=input_shape,
                            include_top=False,
                            pooling=pooling_method,
                            weights='imagenet')
     elif network == 'resnet':
         if input_shape != (224, 224, 3):
             print_error(input_warning_message)
         return ResNet50(input_shape=input_shape,
                         include_top=False,
                         pooling=pooling_method,
                         weights='imagenet')
     else:
         print_error(
             f'Invalid network name: {network}! Please choose from: \n ')
         return None
예제 #16
0
def param_check():
    model1 = VGG16(include_top=False)
    model2 = VGG19(include_top=False)
    model3_a = ResNet50(include_top=False)
    model3_b = ResNet101(include_top=False, weights=None)
    model3_c = ResNet152(include_top=False, weights=None)
    model4 = InceptionV3(include_top=False)
    model5 = InceptionResNetV2(include_top=False)
    model6 = Xception(include_top=False)
    model7 = MobileNet(include_top=False, input_shape=(224, 224, 3))
    model8 = DenseNet201(include_top=False)
    print(model1.count_params())
    print(model2.count_params())
    print(model3_a.count_params())
    print(model3_b.count_params())
    print(model3_c.count_params())
    print(model4.count_params())
    print(model5.count_params())
    print(model6.count_params())
    print(model7.count_params())
    print(model8.count_params())
def build_model(input_shape, output_num, feature_extractor='vgg16'):
    if feature_extractor == 'vgg16':
        arch = VGG16(include_top=False,
                     weights='imagenet',
                     input_shape=input_shape)
    elif feature_extractor == 'vgg19':
        arch = VGG19(include_top=False,
                     weights='imagenet',
                     input_shape=input_shape)
    elif feature_extractor == 'dense121':
        arch = DenseNet121(include_top=False,
                           weights='imagenet',
                           input_shape=input_shape)
    elif feature_extractor == 'dense169':
        arch = DenseNet169(include_top=False,
                           weights='imagenet',
                           input_shape=input_shape)
    elif feature_extractor == 'dense201':
        arch = DenseNet201(include_top=False,
                           weights='imagenet',
                           input_shape=input_shape)
    return create_attention_branch_net(arch, output_num)
예제 #18
0
from keras.optimizers import adam
from keras.applications import DenseNet201
from keras.metrics import Precision, Recall
def build_model(backbone):
    model = Sequential()
    model.add(backbone)
    model.add(MaxPooling2D())
    model.add(Dropout(0.5))
    model.add(BatchNormalization())
    model.add(Flatten())
    model.add(Dense(2, activation='softmax'))
    return model

densenet = DenseNet201(
    weights='imagenet',
    include_top=False,
    input_shape=(224,224,3)
)

model = build_model(densenet)
model.compile(
        loss='categorical_crossentropy',
        optimizer='adam',
        metrics=['accuracy', keras.metrics.Precision(), keras.metrics.Recall()]
    )
model.summary()

#TODO: re-run
model.fit_generator(
        train_generator,
        steps_per_epoch=500 // batch_size,
예제 #19
0
 def build_network(self, model_name, fine_tune):
     if model_name.lower() == 'vgg16':
         if fine_tune:
             base_model = VGG16(include_top=False,
                                input_shape=(None, None, 3),
                                pooling='avg')
             for layer in base_model.layers:
                 if layer.name.startswith('block5'):
                     layer.trainable = True
                 else:
                     layer.trainable = False
             x = base_model.output
             x = Dense(1024, activation='relu')(x)
             x = Dropout(0.5)(x)
             x = Dense(512, activation='relu')(x)
             predictions = Dense(1, activation='sigmoid')(x)
             model = Model(base_model.input, predictions)
             model.summary()
             return model
         else:
             base_model = VGG16(include_top=False,
                                input_shape=(None, None, 3),
                                pooling='avg')
             for layer in base_model.layers:
                 layer.trainable = True
             x = base_model.output
             x = Dense(1024, activation='relu')(x)
             x = Dropout(0.5)(x)
             x = Dense(512, activation='relu')(x)
             predictions = Dense(1, activation='sigmoid')(x)
             model = Model(base_model.input, predictions)
             model.summary()
             return model
     elif model_name.lower() == 'resnet50':
         base_model = ResNet50(include_top=False,
                               input_shape=(None, None, 3),
                               pooling='avg',
                               backend=keras.backend,
                               layers=keras.layers,
                               models=keras.models,
                               utils=keras.utils)
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     # elif model_name.lower()=='resnet34':
     #     base_model=ResNet34(include_top=False, input_shape=self.shape, pooling='avg')
     #     x = base_model.output
     #     x = Dense(1024, activation='relu')(x)
     #     x = Dropout(0.5)(x)
     #     x = Dense(1024, activation='relu')(x)
     #     predictions = Dense(1, activation='sigmoid')(x)
     #     model = Model(base_model.input, predictions)
     #     if fine_tune:
     #         for layer in base_model.layers:
     #             layer.trainable = False
     #     model.summary()
     #     return model
     elif model_name.lower() == 'resnet101':
         base_model = ResNet101(include_top=False,
                                input_shape=(None, None, 3),
                                pooling='avg',
                                backend=keras.backend,
                                layers=keras.layers,
                                models=keras.models,
                                utils=keras.utils)
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'resnet152':
         base_model = ResNet152(include_top=False,
                                input_shape=(None, None, 3),
                                pooling='avg',
                                backend=keras.backend,
                                layers=keras.layers,
                                models=keras.models,
                                utils=keras.utils)
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'inceptionresnetv2':
         base_model = InceptionResNetV2(include_top=False,
                                        input_shape=(None, None, 3),
                                        pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'xception':
         base_model = Xception(include_top=False,
                               input_shape=(None, None, 3),
                               pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'densenet121':
         base_model = DenseNet121(include_top=False,
                                  input_shape=(None, None, 3),
                                  pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'densenet169':
         base_model = DenseNet169(include_top=False,
                                  input_shape=(None, None, 3),
                                  pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'densenet201':
         base_model = DenseNet201(include_top=False,
                                  input_shape=(None, None, 3),
                                  pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'nasnetlarge':
         base_model = NASNetLarge(include_top=False,
                                  input_shape=(None, None, 3),
                                  pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'vgg19':
         base_model = VGG19(include_top=False,
                            input_shape=(None, None, 3),
                            pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     else:
         base_model = NASNetMobile(include_top=False,
                                   input_shape=(None, None, 3),
                                   pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
    classifier = Sequential()
    classifier.add(backbone)
    classifier.add(layers.GlobalAveragePooling2D())
    classifier.add(layers.Dropout(0.5))
    classifier.add(layers.BatchNormalization())
    classifier.add(layers.Dense(3, activation='softmax'))

    classifier.compile(loss='sparse_categorical_crossentropy',
                       optimizer=Adam(lr=lr),
                       metrics=['accuracy'])

    return classifier


resnet = DenseNet201(weights='imagenet',
                     include_top=False,
                     input_shape=(100, 100, 3),
                     classes=3)

model = build_model(resnet, lr=1e-4)
model.summary()

learn_control = ReduceLROnPlateau(monitor='val_loss',
                                  patience=5,
                                  verbose=1,
                                  factor=0.2,
                                  min_lr=1e-7)

##Training the Model
ckpt_path = './checkpoints/Irfan'
ckpt_name = "Master_Model.hdf5"
os.makedirs(ckpt_path, exist_ok=True)
        0: 1.,
        1: 5., # weigh covid weights as 5x more than the others 
        2: 1.,
        3: 1.
    },
    callbacks=[checkpoint]
)

"""# V8: DenseNet201 Finetune"""

from keras.applications import DenseNet121,DenseNet169,DenseNet201
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten,GlobalMaxPooling2D,BatchNormalization,InputLayer
from keras.optimizers import Adam
from keras import Sequential, Model

densenet = DenseNet201(include_top=False,input_shape=(IMAGE_SHAPE,IMAGE_SHAPE,3))

x = densenet.layers[-1].output 
x = Flatten()(x)
x = Dense(512,activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(256,activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(4,activation='softmax')(x)

model = Model(inputs=densenet.inputs,output=x)
model.compile(optimizer=Adam(lr=3e-5),loss='categorical_crossentropy',metrics=['accuracy'],weighted_metrics=['accuracy'])

from keras.callbacks import ModelCheckpoint
checkpoint = ModelCheckpoint("model_checkpoints/v9/densenet201_finetune_weights_{epoch:02d}-{val_accuracy:.2f}.hdf5",save_best_only=True)
VGG16_top = VGG16(include_top=False, input_shape=(224, 224, 3))
VGG19_top = VGG19(include_top=False, input_shape=(224, 224, 3))
Res50_top = ResNet50(include_top=False, input_shape=(224, 224, 3))
Xception_top = Xception(include_top=False, input_shape=(299, 299, 3))
InceptionV3_top = InceptionV3(include_top=False, input_shape=(299, 299, 3))
InceptionResNetV2_top = InceptionResNetV2(include_top=False,
                                          input_shape=(299, 299, 3))

# 不太常用的预训练的模型, Keras 也提供预训练的权重的模型

from keras.applications import MobileNet
from keras.applications import DenseNet121, DenseNet169, DenseNet201
from keras.applications import NASNetLarge, NASNetMobile

Mobile_base = MobileNet(include_top=True, input_shape=(224, 224, 3))

Dense121_base = DenseNet121(include_top=True, input_shape=(224, 224, 3))
Dense169_base = DenseNet169(include_top=True, input_shape=(224, 224, 3))
Dense201_base = DenseNet201(include_top=True, input_shape=(224, 224, 3))

NASNetLarge_base = NASNetLarge(include_top=True, input_shape=(331, 331, 3))
NASNetMobile_base = NASNetMobile(include_top=True, input_shape=(224, 224, 3))

# -------------------------------------------------------------------------
# 无顶层权重的网络
Mobile_top = MobileNet(include_top=False, input_shape=(224, 224, 3))

Dense121_top = DenseNet121(include_top=False, input_shape=(224, 224, 3))
Dense169_top = DenseNet169(include_top=False, input_shape=(224, 224, 3))
Dense201_top = DenseNet201(include_top=False, input_shape=(224, 224, 3))
예제 #23
0
    def __init__(self,
                 image_size=299,
                 batch_size=64,
                 num_classes=100,
                 trainable=True,
                 load_trained=False,
                 max_trainable=False,
                 pretrained_model='pretrained.h5',
                 init_lr=0.001,
                 n_chanels=3,
                 optimizer='adam',
                 init_epoch=0,
                 max_epoch=100,
                 net_type=0):
        try:
            os.mkdir("out_model")
            os.mkdir("logs")
        except:
            print("Created output directory !")

        self.image_size = image_size
        self.batch_size = batch_size
        self.init_lr = init_lr
        self.max_epoch = max_epoch
        self.init_epoch = init_epoch
        self.net_type = net_type

        self.model = None
        self.pre_process = None

        input_shape = (image_size, image_size, n_chanels)

        if net_type == 0:
            self.model = DenseNet121(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.densenet.preprocess_input
        elif net_type == 1:
            self.model = DenseNet169(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.densenet.preprocess_input
        elif net_type == 2:
            self.model = DenseNet201(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.densenet.preprocess_input
        elif net_type == 3:
            self.model = ResNet50(input_shape=input_shape,
                                  include_top=False,
                                  weights='imagenet',
                                  pooling='max')
            self.pre_process = keras.applications.resnet50.preprocess_input
        elif net_type == 4:
            self.model = InceptionV3(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.inception_v3.preprocess_input
        elif net_type == 5:
            self.model = InceptionResNetV2(input_shape=input_shape,
                                           include_top=False,
                                           weights='imagenet',
                                           pooling='max')
            self.pre_process = keras.applications.inception_resnet_v2.preprocess_input
        elif net_type == 6:
            self.model = NASNetLarge(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.nasnet.preprocess_input
        elif net_type == 7:
            self.model = NASNetMobile(input_shape=input_shape,
                                      include_top=False,
                                      weights='imagenet',
                                      pooling='max')
            self.pre_process = keras.applications.nasnet.preprocess_input

        x = self.model.output
        # x = GlobalAveragePooling2D()(x)
        x = Dense(1024, activation='relu')(x)  # add a fully-connected layer
        self.predictions = Dense(num_classes,
                                 activation='softmax',
                                 name='out_put')(x)
        self.model = Model(inputs=self.model.input, outputs=self.predictions)

        if load_trained:
            self.model.load_weights(pretrained_model)
            print("Load pretrained model successfully!")

        if trainable == False:
            for layer in self.model.layers:
                layer.trainable = False
            print("Use model for inference is activated!")
        if trainable and not max_trainable:
            for layer in self.model.layers[:-5]:
                layer.trainable = False
            for layer in self.model.layers[-5:]:
                layer.trainable = True
            print("Train last layers is activated!")
        if max_trainable:
            for layer in self.model.layers:
                layer.trainable = True
            print("Train whole network is activated!")

        if (optimizer == 'adam'):
            opt = Adam(lr=init_lr, beta_1=0.9, beta_2=0.999, decay=1e-6)
        else:
            opt = SGD(lr=init_lr, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(optimizer=opt,
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])

        self.earlyStopping = callbacks.EarlyStopping(monitor='val_acc',
                                                     min_delta=0.001,
                                                     patience=10,
                                                     verbose=1)
        self.tensorBoard = callbacks.TensorBoard('./logs',
                                                 batch_size=batch_size,
                                                 write_grads=True,
                                                 write_images=True)
        self.checkpoint = callbacks.ModelCheckpoint(
            './out_model/weights.' + type_models[self.net_type] +
            '.{epoch:02d}-{acc:.2f}-{val_acc:.2f}.hdf5',
            monitor='val_acc',
            verbose=1,
            save_best_only=True,
            save_weights_only=False,
            mode='auto',
            period=1)
        self.lrController = callbacks.ReduceLROnPlateau(monitor='val_acc',
                                                        factor=0.5,
                                                        patience=3,
                                                        verbose=1,
                                                        mode='auto',
                                                        min_delta=0.0001,
                                                        cooldown=0,
                                                        min_lr=0.00001)
        self.history_ = callbacks.History()
        self.callBackList = [
            self.earlyStopping, self.tensorBoard, self.checkpoint,
            self.lrController, self.history_
        ]

        # [self.train_loss, self.train_metrics] = 2*[None]
        self.history = None
        self.dataGenerator = None
print('****************')
classes_name = [0 for i in range(NUM_CLASSES)]
for cls, idx in train_batches.class_indices.items():
    print('Class #{} = {}'.format(idx, cls))
    classes_name[idx] = cls
print(classes_name)
print('****************')

# build our classifier model based on pre-trained model:
# 1. we don't include the top (fully connected) layers of pretrained models
# 2. we add a DropOut layer followed by a Dense (fully connected)
#    layer which generates softmax class score for each class
# 3. we compile the final model using an Adam optimizer, with a
#    low learning rate (since we are 'fine-tuning')
base_model = DenseNet201(include_top=False,
                        weights='imagenet',
                        input_tensor=None,
                        input_shape=(IMAGE_SIZE[0],IMAGE_SIZE[1],3))
for layer in base_model.layers:
    layer.trainable = True

# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dropout(0.5)(x)

# and a logistic layer -- let's say we have 2 classes
predictions = Dense(NUM_CLASSES, activation='softmax')(x)

# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
def run():
    # data_link_dict = get_skfold_data(path="../data/imgs/*.jpg")
    start_time = time.time()

    IMAGE_DIM = (224, 224, 3)

    EPOCHS = 5
    MINITRAINS = 30
    DO = 0.50  # drop out
    SPRINT = True   # is this run a full train or a sprint

    # for Adam inital LR of 0.0001 is a good starting point
    # for SGD initial LR of 0.001 is a good starting point
    LR = 0.025
    DECAY = 0.5e-6
    L2_REG = 0.05
    # OPTIMIZER = Adam(lr=LR, decay=DECAY)
    OPTIMIZER = SGD(lr=LR, momentum=0.9, nesterov=True)

    NB_IV3_LAYERS_TO_FREEZE = 172
    MODEL_ID = 'v4_0a'

    plot_file = "model_{:}.png".format(MODEL_ID)
    weights_file = "weights/model_{:}_weights.h5".format(MODEL_ID)
    history_file = "histories/history_{:}.json".format(MODEL_ID)

    # # user parameters for LoaderBot v1.0
    # # Parameters for Generators
    # params = {'dim': (299, 299),
    #           'batch_size': 256,
    #           'n_classes': 128,
    #           'n_channels': 3,
    #           'shuffle': False}

    # These parameters are for LoaderBot v2.0
    # Parameters for Generators
    params = {'batch_size': 16,
              'dim': (224, 224),
              'augment': True,
              'augment_odds': 1.0,
              'shuffle': True}
    #
    # Parameters for Generators
    test_params = {'batch_size': 16,
                   'dim': (224, 224),
                   'augment': False,
                   'shuffle': False}

    # Datasets
    data = get_data("../data/metadata_splits.json")

    print(data["X_train"][:5])

    # Generators
    training_generator = LoaderBot(data["X_train"], data["y_train"], **params)
    validation_generator = LoaderBot(data["X_test"], data["y_test"], **test_params)

    # setup model
    base_model = DenseNet201(include_top=False, input_shape=IMAGE_DIM)
    # base_model = InceptionV3(weights='imagenet', include_top=False)  # include_top=False excludes final FC layer
    model = regular_brian_layers(base_model, 128, DO, l1_reg=0.01)

    # print(model.summary())
    # model.load_weights("weights/model_v2_7g_weights.h5")

    # mini-train 1, like normal
    # transfer learning
    setup_to_transfer_learn(model, base_model, OPTIMIZER)

    history = {}   # preinitialize history log

    for mt in range(MINITRAINS):
        temp = mt + 1

        if temp == 1:
            # Run model
            new_history = model.fit_generator(generator=training_generator,
                                              validation_data=validation_generator,
                                              epochs=EPOCHS,
                                              use_multiprocessing=False)

            history["acc"] = new_history.history["acc"]
            history["val_acc"] = new_history.history["val_acc"]
            history["loss"] = new_history.history["loss"]
            history["val_loss"] = new_history.history["val_loss"]

        else:

            # params["augment_odds"] += 0.05
            # if params["augment_odds"] > 1.0:
            #     params["augment_odds"] = 1.0
            # training_generator = LoaderBot(data["X_train"], data["y_train"], **params)

            temp_lr = LR / (10.0**(mt / 5 - (mt // 7.5)))
            # temp_lr = LR / 1.5**temp
            # mini-train 2
            OPTIMIZER = Adam(lr=temp_lr, decay=0.0)
            # try to fine tune some of the InceptionV3 layers also

            thaw_count = int(2.5 * temp)
            if thaw_count > 10:
                thaw_count = 10

            thaw_count = NB_IV3_LAYERS_TO_FREEZE - thaw_count

            setup_to_finetune(model, thaw_count, OPTIMIZER, L2_REG)

            model = activate_regularization(model)

            print("\n\n        Starting epoch {:}\n\n".format(EPOCHS * mt + 1))
            print("Learning rate for mini-train: {:2.8f}   augmentation odds:{:2.2f}\n\n".format(temp_lr, params["augment_odds"]))

            # Run model
            new_history = model.fit_generator(generator=training_generator,
                                              validation_data=validation_generator,
                                              epochs=EPOCHS,
                                              use_multiprocessing=False)

            # save the weights in case we want to predict on them later
            model.save(weights_file)

            history["acc"] += new_history.history["acc"]
            history["val_acc"] += new_history.history["val_acc"]
            history["loss"] += new_history.history["loss"]
            history["val_loss"] += new_history.history["val_loss"]

        # seems to be prepending a 0 to the list so ignore that
        history["acc"] = history["acc"]
        history["val_acc"] = history["val_acc"]
        history["loss"] = history["loss"]
        history["val_loss"] = history["val_loss"]

        plot_hist(history, plot_file, epochs=len(history["acc"]), sprint=SPRINT)

    # try to save the history so models can be more easily compared and Also
    # to better log results if going back is needed
    with open(history_file, "w") as outfile:
        history = clean_history(history)  # clean up zero padding, if it exists
        json.dump(history, outfile)

    print("\n\n\n\nCompleted in {:6.2f} hrs".format(((time.time() - start_time)) / 3600))  # convert to hours
예제 #26
0
def convolutional(instruction=None,
                  read_mode=None,
                  preprocess=True,
                  data_path=None,
                  verbose=0,
                  new_folders=True,
                  image_column=None,
                  training_ratio=0.8,
                  fine_tune=False,
                  augmentation=True,
                  custom_arch=None,
                  pretrained=None,
                  epochs=10,
                  height=None,
                  width=None,
                  save_as_tfjs=None,
                  save_as_tflite=None,
                  generate_plots=True):
    '''
    Body of the convolutional function used that is called in the neural network query
    if the data is presented in images.
    :param many parameters: used to preprocess, tune, plot generation, and parameterizing the convolutional neural network trained.
    :return dictionary that holds all the information for the finished model.
    '''

    # data_path = get_folder_dir()

    logger("Generating datasets for classes")

    LR = 0.001
    plots = {}
    if pretrained:
        if not height:
            height = 224
        if not width:
            width = 224
        if height != 224 or width != 224:
            raise ValueError(
                "For pretrained models, both 'height' and 'width' must be 224."
            )

    if preprocess:
        if custom_arch:
            raise ValueError(
                "If 'custom_arch' is not None, 'preprocess' must be set to false."
            )

        read_mode_info = set_distinguisher(data_path, read_mode)
        read_mode = read_mode_info["read_mode"]

        training_path = "/proc_training_set"
        testing_path = "/proc_testing_set"

        if read_mode == "setwise":
            processInfo = setwise_preprocessing(data_path, new_folders, height,
                                                width)
            if not new_folders:
                training_path = "/training_set"
                testing_path = "/testing_set"

        # if image dataset in form of csv
        elif read_mode == "csvwise":
            if training_ratio <= 0 or training_ratio >= 1:
                raise BaseException(f"Test ratio must be between 0 and 1.")
            processInfo = csv_preprocessing(read_mode_info["csv_path"],
                                            data_path, instruction,
                                            image_column, training_ratio,
                                            height, width)

        # if image dataset in form of one folder containing class folders
        elif read_mode == "classwise":
            if training_ratio <= 0 or training_ratio >= 1:
                raise BaseException(f"Test ratio must be between 0 and 1.")
            processInfo = classwise_preprocessing(data_path, training_ratio,
                                                  height, width)

    else:
        training_path = "/training_set"
        testing_path = "/testing_set"
        processInfo = already_processed(data_path)

    num_channels = 3
    color_mode = 'rgb'
    if processInfo["gray_scale"]:
        num_channels = 1
        color_mode = 'grayscale'

    input_shape = (processInfo["height"], processInfo["width"], num_channels)
    input_single = (processInfo["height"], processInfo["width"])
    num_classes = processInfo["num_categories"]
    loss_func = ""
    output_layer_activation = ""

    if num_classes > 2:
        loss_func = "categorical_crossentropy"
        output_layer_activation = "softmax"
    elif num_classes == 2:
        num_classes = 1
        loss_func = "binary_crossentropy"
        output_layer_activation = "sigmoid"

    logger("Creating convolutional neural network dynamically")

    # Convolutional Neural Network

    # Build model based on custom_arch configuration if given
    if custom_arch:
        with open(custom_arch, "r") as f:
            custom_arch_dict = json.load(f)
            custom_arch_json_string = json.dumps(custom_arch_dict)
            model = model_from_json(custom_arch_json_string)

    # Build an existing state-of-the-art model
    elif pretrained:

        arch_lower = pretrained.get('arch').lower()

        # If user specifies value of pretrained['weights'] as 'imagenet', weights pretrained on ImageNet will be used
        if 'weights' in pretrained and pretrained.get('weights') == 'imagenet':
            # Load ImageNet pretrained weights
            if arch_lower == "vggnet16":
                base_model = VGG16(include_top=False,
                                   weights='imagenet',
                                   input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "vggnet19":
                base_model = VGG19(include_top=False,
                                   weights='imagenet',
                                   input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet50":
                base_model = ResNet50(include_top=False,
                                      weights='imagenet',
                                      input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet101":
                base_model = ResNet101(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet152":
                base_model = ResNet152(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "mobilenet":
                base_model = MobileNet(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "mobilenetv2":
                base_model = MobileNetV2(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "densenet121":
                base_model = DenseNet121(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "densenet169":
                base_model = DenseNet169(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "densenet201":
                base_model = DenseNet201(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            else:
                raise ModuleNotFoundError("arch \'" + pretrained.get('arch') +
                                          "\' not supported.")

        else:
            # Randomly initialized weights
            if arch_lower == "vggnet16":
                model = VGG16(include_top=True,
                              weights=None,
                              classes=num_classes,
                              classifier_activation=output_layer_activation)
            elif arch_lower == "vggnet19":
                model = VGG19(include_top=True,
                              weights=None,
                              classes=num_classes,
                              classifier_activation=output_layer_activation)
            elif arch_lower == "resnet50":
                model = ResNet50(include_top=True,
                                 weights=None,
                                 classes=num_classes)
            elif arch_lower == "resnet101":
                model = ResNet101(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "resnet152":
                model = ResNet152(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "mobilenet":
                model = MobileNet(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "mobilenetv2":
                model = MobileNetV2(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            elif arch_lower == "densenet121":
                model = DenseNet121(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            elif arch_lower == "densenet169":
                model = DenseNet169(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            elif arch_lower == "densenet201":
                model = DenseNet201(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            else:
                raise ModuleNotFoundError("arch \'" + pretrained.get('arch') +
                                          "\' not supported.")

    else:
        model = Sequential()
        # model.add(
        #     Conv2D(
        #         64,
        #         kernel_size=3,
        #         activation="relu",
        #         input_shape=input_shape))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Conv2D(64, kernel_size=3, activation="relu"))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Flatten())
        # model.add(Dense(num_classes, activation="softmax"))
        # model.compile(
        #     optimizer="adam",
        #     loss=loss_func,
        #     metrics=['accuracy'])
        model.add(
            Conv2D(filters=64,
                   kernel_size=5,
                   activation="relu",
                   input_shape=input_shape))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(filters=64, kernel_size=3, activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Conv2D(filters=64, kernel_size=3, activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(units=256, activation="relu"))
        model.add(Dropout(0.25))
        model.add(Dense(units=num_classes, activation="softmax"))

    if pretrained and 'weights' in pretrained and pretrained.get(
            'weights') == 'imagenet':
        for layer in base_model.layers:
            layer.trainable = False

    opt = Adam(learning_rate=LR)

    model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy'])

    logger("Located image data")

    if augmentation:
        train_data = ImageDataGenerator(rescale=1. / 255,
                                        shear_range=0.2,
                                        zoom_range=0.2,
                                        horizontal_flip=True)
        test_data = ImageDataGenerator(rescale=1. / 255)

        logger('Dataset augmented through zoom, shear, flip, and rescale')
    else:
        train_data = ImageDataGenerator()
        test_data = ImageDataGenerator()

    logger("->", "Optimal image size identified: {}".format(input_shape))
    X_train = train_data.flow_from_directory(
        data_path + training_path,
        target_size=input_single,
        color_mode=color_mode,
        batch_size=(16 if processInfo["train_size"] >= 16 else 1),
        class_mode=loss_func[:loss_func.find("_")])
    X_test = test_data.flow_from_directory(
        data_path + testing_path,
        target_size=input_single,
        color_mode=color_mode,
        batch_size=(16 if processInfo["test_size"] >= 16 else 1),
        class_mode=loss_func[:loss_func.find("_")])

    if epochs <= 0:
        raise BaseException("Number of epochs has to be greater than 0.")

    print("\n")
    logger('Training image model')

    # model.summary()

    history = model.fit_generator(
        X_train,
        steps_per_epoch=X_train.n // X_train.batch_size,
        validation_data=X_test,
        validation_steps=X_test.n // X_test.batch_size,
        epochs=epochs,
        verbose=verbose)

    if fine_tune:

        logger(
            '->', 'Training accuracy: {}'.format(
                history.history['accuracy'][len(history.history['accuracy']) -
                                            1]))
        logger(
            '->',
            'Validation accuracy: {}'.format(history.history['val_accuracy'][
                len(history.history['val_accuracy']) - 1]))

        for layer in base_model.layers:
            layer.trainable = True

        opt = Adam(learning_rate=LR / 10)

        model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy'])

        print("\n\n")
        logger('Training fine tuned model')

        fine_tuning_epoch = epochs + 10
        history_fine = model.fit_generator(
            X_train,
            steps_per_epoch=X_train.n // X_train.batch_size,
            validation_data=X_test,
            validation_steps=X_test.n // X_test.batch_size,
            epochs=fine_tuning_epoch,
            initial_epoch=history.epoch[-1],
            verbose=verbose)
        #frozen model acc and loss history
        acc = history.history['accuracy']
        val_acc = history.history['val_accuracy']

        loss = history.history['loss']
        val_loss = history.history['val_loss']

        #fine tuned model acc and loss history
        acc += history_fine.history['accuracy']
        val_acc += history_fine.history['val_accuracy']

        loss += history_fine.history['loss']
        val_loss += history_fine.history['val_loss']

        if generate_plots:
            plots = generate_fine_tuned_classification_plots(
                acc, val_acc, loss, val_loss, epochs)

    models = []
    losses = []
    accuracies = []
    model_data = []

    model_data.append(model)
    models.append(history)

    losses.append(
        history.history["val_loss"][len(history.history["val_loss"]) - 1])
    accuracies.append(
        history.history['val_accuracy'][len(history.history['val_accuracy']) -
                                        1])

    # final_model = model_data[accuracies.index(max(accuracies))]
    # final_hist = models[accuracies.index(max(accuracies))]

    if generate_plots and not fine_tune:
        plots = generate_classification_plots(models[len(models) - 1])

    print("\n")
    logger(
        '->', 'Final training accuracy: {}'.format(
            history.history['accuracy'][len(history.history['accuracy']) - 1]))
    logger(
        '->',
        'Final validation accuracy: {}'.format(history.history['val_accuracy'][
            len(history.history['val_accuracy']) - 1]))
    # storing values the model dictionary

    number_of_examples = len(X_test.filenames)
    number_of_generator_calls = math.ceil(number_of_examples /
                                          (1.0 * X_test.batch_size))

    test_labels = []

    for i in range(0, int(number_of_generator_calls)):
        test_labels.extend(np.array(X_test[i][1]))

    predIdx = model.predict(X_test)

    if output_layer_activation == "sigmoid":
        real = [int(x) for x in test_labels]
        ans = []
        for i in range(len(predIdx)):
            ans.append(int(round(predIdx[i][0])))

    elif output_layer_activation == "softmax":
        real = []
        for ans in test_labels:
            real.append(ans.argmax())
        ans = []
        for r in predIdx:
            ans.append(r.argmax())

    else:
        print("NOT THE CASE")

    logger("Stored model under 'convolutional_NN' key")

    if save_as_tfjs:
        tfjs.converters.save_keras_model(model, "tfjsmodel")
        logger("Saved tfjs model under 'tfjsmodel' directory")

    if save_as_tflite:
        converter = tf.lite.TFLiteConverter.from_keras_model(model)
        tflite_model = converter.convert()
        open("model.tflite", "wb").write(tflite_model)
        logger("Saved tflite model as 'model.tflite' ")

    clearLog()

    K.clear_session()

    return {
        'id': generate_id(),
        'data_type': read_mode,
        'data_path': data_path,
        'data': {
            'train': X_train,
            'test': X_test
        },
        'shape': input_shape,
        'res': {
            'real': real,
            'ans': ans
        },
        'model': model,
        'plots': plots,
        'losses': {
            'training_loss': history.history['loss'],
            'val_loss': history.history['val_loss']
        },
        'accuracy': {
            'training_accuracy': history.history['accuracy'],
            'validation_accuracy': history.history['val_accuracy']
        },
        'num_classes': (2 if num_classes == 1 else num_classes),
        'data_sizes': {
            'train_size': processInfo['train_size'],
            'test_size': processInfo['test_size']
        }
    }
예제 #27
0
def generate_model_base(preset, width, height, channel, weights_init):
    '''
    モデルを作成する

    # Arguments
        preset: プリセットモデルの名前
        width: 入力画像の幅
        height: 入力画像の高さ
        channel: 入力画像のチャンネル数
        class_num: 分類クラス数
        weights_init: 初期値(None, imagenet)

    # Returns
        keras.models.Model オブジェクト
    '''
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

    from keras.layers import Dense, BatchNormalization, Dropout, Input, Conv2D
    # from keras.layers import GlobalAveragePooling2D
    from keras.models import Model

    input_tensor = Input(shape=(width, height, channel))
    conv_base = None
    # output_layer = None
    prediction_layer = None

    if preset.upper() == "bench".upper():
        conv_base = create_bench_model(input_tensor)
        prediction_layer = conv_base
    elif preset.upper() == "VGG16".upper():
        from keras.applications import VGG16
        conv_base = None
        if channel == 3:
            conv_base = VGG16(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, channel)
                              )
        else:
            conv_base = VGG16(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, 3)
                              )

            conv_base.layers.pop(0)
            conv_base.layers.pop(0)
            input_layer = Input(shape=(width, height, channel), name='multi_input')
            block1_conv1_new = Conv2D(64, (3, 3),
                                      activation='relu',
                                      padding='same',
                                      kernel_initializer='glorot_uniform',
                                      name='block1_conv1_new')

            conv_base = insert_intermediate_layer_in_keras(conv_base, [0, 0], [input_layer, block1_conv1_new])

        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.get_output_at(-1)  # x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.2, name='fc_dropout')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "VGG19".upper():
        from keras.applications import VGG19
        conv_base = None
        if channel == 3:
            conv_base = VGG19(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, channel)
                              )
        else:
            conv_base = VGG19(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, 3)
                              )

            conv_base.layers.pop(0)
            conv_base.layers.pop(0)
            input_layer = Input(shape=(width, height, channel), name='multi_input')
            block1_conv1_new = Conv2D(64, (3, 3),
                                      activation='relu',
                                      padding='same',
                                      kernel_initializer='glorot_uniform',
                                      name='block1_conv1_new')

            conv_base = insert_intermediate_layer_in_keras(conv_base, [0, 0], [input_layer, block1_conv1_new])

        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.get_output_at(-1)  # x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.2, name='fc_dropout')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "VGG16BN".upper():
        from model import VGG16BN
        conv_base = None
        if channel == 3:
            conv_base = VGG16BN(weights=weights_init,
                                include_top=False,
                                pooling='avg',
                                kernel_initializer='glorot_uniform',
                                input_shape=(width, height, channel)
                                )
        else:
            conv_base = VGG16BN(weights=weights_init,
                                include_top=False,
                                pooling='avg',
                                kernel_initializer='glorot_uniform',
                                input_shape=(width, height, 3)
                                )

            conv_base.layers.pop(0)
            conv_base.layers.pop(0)
            input_layer = Input(shape=(width, height, channel), name='multi_input')
            block1_conv1_new = Conv2D(64, (3, 3),
                                      activation='relu',
                                      padding='same',
                                      kernel_initializer='glorot_uniform',
                                      name='block1_conv1_new')

            conv_base = insert_intermediate_layer_in_keras(conv_base, [0, 0], [input_layer, block1_conv1_new])

        # conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.get_output_at(-1)  # x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.2, name='fc_dropout')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "VGG19BN".upper():
        from model import VGG19BN
        conv_base = None
        if channel == 3:
            conv_base = VGG19BN(weights=weights_init,
                                include_top=False,
                                pooling='avg',
                                kernel_initializer='glorot_uniform',
                                input_shape=(width, height, channel)
                                )
        else:
            conv_base = VGG19BN(weights=weights_init,
                                include_top=False,
                                pooling='avg',
                                kernel_initializer='glorot_uniform',
                                input_shape=(width, height, 3)
                                )

            conv_base.layers.pop(0)
            conv_base.layers.pop(0)
            input_layer = Input(shape=(width, height, channel), name='multi_input')
            block1_conv1_new = Conv2D(64, (3, 3),
                                      activation='relu',
                                      padding='same',
                                      kernel_initializer='glorot_uniform',
                                      name='block1_conv1_new')

            conv_base = insert_intermediate_layer_in_keras(conv_base, [0, 0], [input_layer, block1_conv1_new])

        # conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.get_output_at(-1)  # x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.2, name='fc_dropout')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet20".upper():
        # from keras.applications import ResNet50
        from model.resnet import ResNet20
        conv_base = ResNet20(weights=weights_init,
                             include_top=True,
                             input_shape=(width, height, channel),
                             input_tensor=input_tensor
                             )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet50".upper():
        # from keras.applications import ResNet50
        from model.resnet import ResNet50
        conv_base = ResNet50(weights=weights_init,
                             include_top=True,
                             input_shape=(width, height, channel),
                             input_tensor=input_tensor
                             )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet101".upper():
        from model.resnet import ResNet101
        conv_base = ResNet101(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, channel),
                              input_tensor=input_tensor
                              )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet152".upper():
        from model.resnet import ResNet152
        conv_base = ResNet152(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, channel),
                              input_tensor=input_tensor
                              )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet50V2".upper():
        from model.resnet_v2 import ResNet50V2
        conv_base = ResNet50V2(weights=weights_init,
                               include_top=True,
                               input_shape=(width, height, channel),
                               input_tensor=input_tensor
                               )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet101V2".upper():
        from model.resnet_v2 import ResNet101V2
        conv_base = ResNet101V2(weights=weights_init,
                                include_top=True,
                                input_shape=(width, height, channel),
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet152V2".upper():
        from model.resnet_v2 import ResNet152V2
        conv_base = ResNet152V2(weights=weights_init,
                                include_top=True,
                                input_shape=(width, height, channel),
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNeXt50".upper():
        from model.resnext import ResNeXt50
        conv_base = ResNeXt50(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, channel),
                              input_tensor=input_tensor
                              )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNeXt101".upper():
        from model.resnext import ResNeXt101
        conv_base = ResNeXt101(weights=weights_init,
                               include_top=True,
                               input_shape=(width, height, channel),
                               input_tensor=input_tensor
                               )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "InceptionV3".upper():
        from keras.applications import InceptionV3
        conv_base = InceptionV3(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "InceptionResNetV2".upper():
        from keras.applications import InceptionResNetV2
        conv_base = InceptionResNetV2(weights=weights_init,
                                      include_top=True,
                                      input_tensor=input_tensor
                                      )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "DenseNet121".upper():
        from keras.applications import DenseNet121
        conv_base = DenseNet121(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "DenseNet169".upper():
        from keras.applications import DenseNet169
        conv_base = DenseNet169(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "DenseNet201".upper():
        from keras.applications import DenseNet201
        conv_base = DenseNet201(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "Xception".upper():
        from keras.applications import Xception
        conv_base = Xception(weights=weights_init,
                             include_top=True,
                             input_tensor=input_tensor
                             )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEDenseNetImageNet121".upper():
        from model import SEDenseNetImageNet121
        conv_base = SEDenseNetImageNet121(weights=weights_init,
                                          include_top=True,
                                          input_tensor=input_tensor
                                          )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEDenseNetImageNet169".upper():
        from model import SEDenseNetImageNet169
        conv_base = SEDenseNetImageNet169(weights=weights_init,
                                          include_top=True,
                                          input_tensor=input_tensor
                                          )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEDenseNetImageNet201".upper():
        from model import SEDenseNetImageNet201
        conv_base = SEDenseNetImageNet201(weights=weights_init,
                                          include_top=True,
                                          input_tensor=input_tensor
                                          )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEDenseNetImageNet264".upper():
        from model import SEDenseNetImageNet264
        conv_base = SEDenseNetImageNet264(weights=weights_init,
                                          include_top=True,
                                          input_tensor=input_tensor
                                          )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEDenseNetImageNet161".upper():
        from model import SEDenseNetImageNet161
        conv_base = SEDenseNetImageNet161(weights=weights_init,
                                          include_top=True,
                                          input_tensor=input_tensor
                                          )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEInceptionResNetV2".upper():
        from model import SEInceptionResNetV2
        conv_base = SEInceptionResNetV2(weights=weights_init,
                                        include_top=True,
                                        input_tensor=input_tensor
                                        )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEInceptionV3".upper():
        from model import SEInceptionV3
        conv_base = SEInceptionV3(weights=weights_init,
                                  include_top=True,
                                  input_tensor=input_tensor
                                  )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEMobileNet".upper():
        from model import SEMobileNet
        conv_base = SEMobileNet(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet6".upper():
        from model import SEResNet6
        conv_base = SEResNet6(weights=weights_init,
                              include_top=True,
                              input_tensor=input_tensor
                              )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet8".upper():
        from model import SEResNet8
        conv_base = SEResNet8(weights=weights_init,
                              include_top=True,
                              input_tensor=input_tensor
                              )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet10".upper():
        from model import SEResNet10
        conv_base = SEResNet10(weights=weights_init,
                               include_top=True,
                               input_tensor=input_tensor
                               )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet18".upper():
        from model import SEResNet18
        conv_base = SEResNet18(weights=weights_init,
                               include_top=True,
                               input_tensor=input_tensor
                               )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet34".upper():
        from model import SEResNet34
        conv_base = SEResNet34(weights=weights_init,
                               include_top=True,
                               input_tensor=input_tensor
                               )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet50".upper():
        from model import SEResNet50
        conv_base = SEResNet50(weights=weights_init,
                               include_top=True,
                               input_tensor=input_tensor
                               )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet101".upper():
        from model import SEResNet101
        conv_base = SEResNet101(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet154".upper():
        from model import SEResNet154
        conv_base = SEResNet154(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNext".upper():
        from model import SEResNext
        conv_base = SEResNext(weights=weights_init,
                              include_top=True,
                              input_tensor=input_tensor
                              )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    else:
        raise ValueError('unknown model name : {}'.format(preset))

    # x = output_layer.output
    # # x = Flatten()(x)
    # # x = Dense(512, activation='relu', kernel_initializer='glorot_uniform')(x)
    # # # x = Dropout(0.7)(x)
    # # x = BatchNormalization(name='fc_bachnorm')(x)
    # prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x)

    model = Model(inputs=conv_base.input,
                  outputs=prediction_layer, name='classification_model')
    # #weights_filepath = 'work/test/vgg19_weights_tf_dim_ordering_tf_kernels.h5'
    # weights_filepath = 'work/test/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
    # model.load_weights(weights_filepath, by_name=True, skip_mismatch=True)
    return model
예제 #28
0
def Dense_Net(trainable=None, net = "DenseNet121"):
    
    # Preprocessing the dataset into keras feedable format
    
    train_datagen = ImageDataGenerator(
            rotation_range = rotation,
            width_shift_range = width_shift,
            height_shift_range= height_shift,
            rescale= scale,
            shear_range= shear,
            zoom_range= zoom,
            horizontal_flip= horizontal,
            fill_mode=fill,
            validation_split=validation
        )
    test_datagen = ImageDataGenerator(
            rescale= scale,
        )
   
    train_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='training',
    )
    validation_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='validation'
    )
    
    models_list = ['DenseNet121','DenseNet169','DenseNet201']
    
    # Loading the DenseNet Model
    
    if net == "DenseNet121":
        densenet = DenseNet121(include_top=False, weights='imagenet', input_shape=input_sh,pooling = pooling_model)
    if net == "DenseNet169":
        densenet = DenseNet169(include_top=False, weights='imagenet', input_shape=input_sh,pooling = pooling_model)
    if net == "DenseNet201":
        densenet = DenseNet201(include_top=False, weights='imagenet', input_shape=input_sh,pooling = pooling_model)
    if net  not in models_list:
        raise ValueError('Please provide the raise model ')
    output = densenet.layers[-1].output
    if pooling_model is None:
        output = keras.layers.Flatten()(output)
    densenet = Model(densenet.input, output=output)
    print(densenet.summary())
    print('\n\n\n')
    # If you chose not for fine tuning
    if trainable is None:
        model = Sequential()
        model.add(densenet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num ))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))
            
        for layer in densenet.layers:
            layer.trainable = False
        print("The model summary of Densenet  -->\n\n\n")        # In this the Densenet layers are not trainable 
        
        for i, layer in enumerate(densenet.layers):
            print(i, layer.name, layer.trainable)
        model.compile(loss=loss_param,                # Change according to data
                      optimizer=optimizers.RMSprop(),
                      metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')
       
       

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param, restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)    
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize = (15,8)) 

        plt.subplot(221)  
        plt.plot(fit_history.history['accuracy'])  
        plt.plot(fit_history.history['val_accuracy'])  
        plt.title('model accuracy')  
        plt.ylabel('accuracy')  
        plt.xlabel('epoch')  
        plt.legend(['train', 'valid']) 

        plt.subplot(222)  
        plt.plot(fit_history.history['loss'])  
        plt.plot(fit_history.history['val_loss'])  
        plt.title('model loss')  
        plt.ylabel('loss')  
        plt.xlabel('epoch')  
        plt.legend(['train', 'valid']) 

        plt.show()
        
              
    if trainable is not None:
        # Make last block of the conv_base trainable:

        for layer in densenet.layers[:trainable]:
            layer.trainable = False
        for layer in densenet.layers[trainable:]:
            layer.trainable = True

        print('Last block of the conv_base is now trainable')
        
        for i, layer in enumerate(densenet.layers):
            print(i, layer.name, layer.trainable)
            
        model = Sequential()
        model.add(densenet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num ))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num ))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))
            
        for layer in densenet.layers:
            layer.trainable = False
        print("The model summary of Densenet -->\n\n\n")        # In this the Densenet layers are not trainable     
        model.compile(loss=loss_param,                # Change according to data
                      optimizer=optimizers.RMSprop(),
                      metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')
       
       

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param, restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)    
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize = (15,8)) 

        plt.subplot(221)  
        plt.plot(fit_history.history['accuracy'])  
        plt.plot(fit_history.history['val_accuracy'])  
        plt.title('model accuracy')  
        plt.ylabel('accuracy')  
        plt.xlabel('epoch')  
        plt.legend(['train', 'valid']) 

        plt.subplot(222)  
        plt.plot(fit_history.history['loss'])  
        plt.plot(fit_history.history['val_loss'])  
        plt.title('model loss')  
        plt.ylabel('loss')  
        plt.xlabel('epoch')  
        plt.legend(['train', 'valid']) 

        plt.show()
import os

from keras.applications import DenseNet201 as DenseNet
from keras.applications.densenet import preprocess_input
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator

DATA_PATH = "da"
TRAIN_DATA_PATH = os.path.join(DATA_PATH, "train")
VALIDATION_DATA_PATH = os.path.join(DATA_PATH, "eval")
TEST_DATA_PATH = os.path.join(DATA_PATH, "test")

MODEL_PATH = os.path.join(DATA_PATH, "model2.h5")

model = DenseNet(include_top=True, weights=None, classes=3)
image_generator = ImageDataGenerator(preprocessing_function=preprocess_input)

model.compile(
    optimizer="adam",
    loss="categorical_crossentropy",
    metrics=["accuracy", "categorical_accuracy"],
)
model.fit_generator(
    image_generator.flow_from_directory(TRAIN_DATA_PATH,
                                        target_size=(224, 224)),
    steps_per_epoch=2,
    epochs=5,
    validation_data=image_generator.flow_from_directory(VALIDATION_DATA_PATH,
                                                        target_size=(224,
                                                                     224)),
    validation_steps=1,
    target_size=(trgt_sz, trgt_sz),
    batch_size=batch_size,
    class_mode='categorical')  # changed from "categorical" to "binary"

validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    shuffle=False,
    target_size=(trgt_sz, trgt_sz),
    batch_size=batch_size,
    class_mode='categorical')  # changed from "categorical" to "binary"

#######################################################################
## Model ##############################################################
#######################################################################

base_model = DenseNet201(weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(16, activation='softmax')(
    x)  # changing to detect only two types

model = Model(inputs=base_model.input, output=predictions)

########################################################################

## Freez the base model #################

for layer in base_model.layers:
    layer.trainable = False  # all layers are  not trainable