def load_model():
    # Get default graph:
    global graph
    graph = tf.get_default_graph()
    # Load the pre-trained Keras model(pre-trained on ImageNet):
    global model
    model = NASNetMobile(weights="imagenet")
Exemplo n.º 2
0
def build_finetune_model(input_shape, num_classes, show_summary):
    #base_model = DenseNet121(weights='imagenet', include_top=False, input_shape=input_shape)
    base_model = NASNetMobile(weights='imagenet', include_top=False, input_shape=input_shape)
    #base_model = InceptionResNetV2(include_top=False, weights='imagenet', input_shape=input_shape)

    dropout = 0.5
    fc_layers = 512

    # defreeze all the layers
    # count_freeze = 0
    for layer in base_model.layers:
        layer.trainable = True
        # count_freeze += 1
        # if count_freeze < 30:
        #     layer.trainable = False
        # else:
        #     layer.trainable = True

    x = base_model.output
    x = Flatten()(x)
    x = Dense(fc_layers, activation='relu')(x)
    x = Dropout(dropout)(x)
    x = Dense(fc_layers, activation='relu')(x)
    # x = Dropout(dropout)(x)
    # x = Dense(fc_layers, activation='relu')(x)

    # New soft max layer
    predictions = Dense(num_classes, activation='softmax', name='output')(x)
    finetune_model = Model(inputs=base_model.input, outputs=predictions)

    finetune_model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.001), metrics=['accuracy'])
    if show_summary:
        finetune_model.summary()

    return finetune_model
Exemplo n.º 3
0
def main():
    """Converts a keras model into ONNX format."""
    # model = alexnet((224, 224, 3))
    model = build_model(
        NASNetMobile(input_shape=(224, 224, 3),
                     include_top=False,
                     weights='imagenet'))
    model.load_weights(KERAS_MODEL_PATH)

    # If we have not specified explicitly image dimensions when creating
    # the model
    #
    # model = load_model(KERAS_MODEL_PATH)
    # model._layers[0].batch_input_shape = (batch_size, image_size, image_size,
    #                                       channels)
    #
    # In order for the input_shape to be saved correctly we have to
    # clone the model into a new one
    #
    # model = clone_model(model)
    #
    # When cloning we loose the weights, load them again
    #
    # model.load_weights(KERAS_MODEL_PATH)

    onnx_model = keras2onnx.convert_keras(model, model.name)

    # target_opset=target_opset,
    # debug_mode=True

    keras2onnx.save_model(onnx_model, ONNX_MODEL_PATH)
Exemplo n.º 4
0
    def build(self) -> Model:
        model = NASNetMobile(include_top=True,
                             weights=None,
                             input_shape=(self.width, self.height,
                                          self.channels),
                             classes=2)

        return model
Exemplo n.º 5
0
def start_training():
    base_model = NASNetMobile(input_shape=(img_shape, img_shape, 3),
                              include_top=False,
                              weights='imagenet')

    model = create_model(base_model, trainable=250)

    save_model(model, model_name, base_path)
Exemplo n.º 6
0
    def buildNasNetMobileBase(self):

        print("building `NasNetMobile` base model...")

        # default INPUT_SIZE = 224
        base_model = NASNetMobile(input_shape=self.INPUT_SHAPE,
                                  weights='imagenet',
                                  include_top=False)

        return base_model
def train_chesspiece_model():
    """Trains the chesspiece model based on NASNetMobile."""
    base_model = NASNetMobile(input_shape=(224, 224, 3),
                              include_top=False,
                              weights='imagenet')

    # First train only the top layers
    for layer in base_model.layers:
        layer.trainable = False

    model = build_model(base_model)

    train_generator, validation_generator = data_generators(
        preprocess_input, (224, 224), 64)

    callbacks = model_callbacks(5, "./models/NASNetMobile_pre.h5", 0.1, 10)

    history = train_model(model,
                          20,
                          train_generator,
                          validation_generator,
                          callbacks,
                          use_weights=False,
                          workers=5)

    plot_model_history(history, "./models/NASNetMobile_pre_acc.png",
                       "./models/NASNetMobile_pre_loss.png")
    evaluate_model(model, validation_generator)

    # Also train 10-12
    for layer in model.layers[:635]:
        layer.trainable = False
    for layer in model.layers[635:]:
        layer.trainable = True

    model.compile(optimizer='Adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    callbacks = model_callbacks(20, "./models/NASNetMobile.h5", 0.2, 8)

    history = train_model(model,
                          100,
                          train_generator,
                          validation_generator,
                          callbacks,
                          use_weights=False,
                          workers=5)

    plot_model_history(history, "./models/NASNetMobile_acc.png",
                       "./models/NASNetMobile_loss.png")
    evaluate_model(model, validation_generator)

    model.save("./models/NASNetMobile_last.h5")
Exemplo n.º 8
0
def model():
    model = NASNetMobile(include_top=False, input_shape=(128, 128, 3))
    x = model.output

    x = Flatten()(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)
    position = Dense(4, activation='sigmoid', name='positional_output')(x)
    model = Model(input=model.input, outputs=position)
    return model
Exemplo n.º 9
0
	def nas_mobile_net(self, percent2retrain):
		'Returns a mobilenet architecture NN'
		nas_mobile_model = NASNetMobile(input_shape=self.input_dim,
                                        weights='imagenet',
                                        include_top=False)
		# freeze base layers
		if percent2retrain < 1:
			for layer in nas_mobile_model.layers[:-int(len(nas_mobile_model.layers)*percent2retrain)]: layer.trainable = False

		# add classification top layer
		model = Sequential()
		model.add(nas_mobile_model)
		model.add(Flatten())
		model.add(Dense(512, activation='relu'))
		model.add(Dropout(0.5))
		model.add(Dense(self.n_classes, activation='sigmoid'))
		return model
Exemplo n.º 10
0
    def __init__(self, input_size, weights):
        input_image = Input(shape=(input_size[0], input_size[1], 3))

        if weights == 'imagenet':
            nasnetmobile = NASNetMobile(input_tensor=input_image, include_top=False, weights='imagenet', pooling=None)
            print('Successfully loaded imagenet backend weights')
        else:
            nasnetmobile = NASNetMobile(input_tensor=input_image, include_top=False, weights=None, pooling=None)
            if weights:
                nasnetmobile.load_weights(weights)
                print('Loaded backend weigths: ' + weights)
        self.feature_extractor = nasnetmobile
Exemplo n.º 11
0
def create_model(model_name, include_top, input_shape):
    if model_name == 'ResNet50':
        base_model = ResNet50(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == 'ResNet101':
        base_model = ResNet101(weights='imagenet', include_top=include_top, input_shape=input_shape, backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils)
    elif model_name == 'ResNet50V2':
        base_model = ResNet50V2(weights='imagenet', include_top=include_top, input_shape=input_shape, backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils)
    elif model_name == 'ResNet101V2':
        base_model = ResNet101V2(weights='imagenet', include_top=include_top, input_shape=input_shape, backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils)
    elif model_name == 'ResNeXt50':
        base_model = ResNeXt50(weights='imagenet', include_top=include_top, input_shape=input_shape, backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils)
    elif model_name == 'ResNeXt101':
        base_model = ResNeXt101(weights='imagenet', include_top=include_top, input_shape=input_shape, backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils)
    elif model_name == "MobileNetV2":
        base_model = MobileNetV2(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "NASNetLarge":
        base_model = NASNetLarge(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "NASNetMobile":
        base_model = NASNetMobile(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "DenseNet121":
        base_model = DenseNet121(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "DenseNet169":
        base_model = DenseNet169(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "DenseNet201":
        base_model = DenseNet201(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "InceptionResNetV2":
        base_model = InceptionResNetV2(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "VGG16":
        base_model = VGG16(weights='imagenet', include_top=include_top, input_shape=input_shape)
    elif model_name == "VGG19":
        base_model = VGG19(weights='imagenet', include_top=include_top, input_shape=input_shape)
    else:
        return None, None

    x = base_model.output
    # x = Flatten()(x)
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(2, activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=predictions)

    return base_model, model
def get_model():
    inputs = Input((config.image_size, config.image_size, config.channels))
    base_model = NASNetMobile(include_top=False,
                              input_shape=(config.image_size,
                                           config.image_size,
                                           config.channels))  #, weights=None
    x = base_model(inputs)
    out1 = GlobalMaxPooling2D()(x)
    out2 = GlobalAveragePooling2D()(x)
    out3 = Flatten()(x)
    out = Concatenate(axis=-1)([out1, out2, out3])
    out = Dropout(0.5)(out)
    out = Dense(config.num_classes, activation="softmax",
                name="classifier")(out)
    model = Model(inputs, out)
    model.compile(optimizer=Adam(0.0001),
                  loss=categorical_crossentropy,
                  metrics=['acc'])

    return model
Exemplo n.º 13
0
def nasnet_mobile(input_shape, num_classes):
    """ Prepares a CNN model with the NasNet Mobile architecture
            source: https://keras.io/applications
    
    Args:
        input_shape:    The dimensions of the input images (w x h x c)
        num_classes:    The number of classes
    Retruns:
        The NasNet Mobile Keras model
    
    """
    base_model = NASNetMobile(include_top=False, input_shape=input_shape)

    for layer in base_model.layers:
        layer.trainable = False

    x = base_model.output
    x = Flatten()(x)
    predictions = Dense(num_classes, activation='softmax', kernel_regularizer=l2(0.1))(x)

    model = Model(inputs=base_model.input, outputs=predictions)

    return model
Exemplo n.º 14
0
                                      color_mode="rgb",
                                      shuffle=False,
                                      batch_size=batch_size)

# 2. Les images en entrée vont être normalisées par rapport à la moyenne des plans RGB des images de ImageNet.
# definir la moyenne des images ImageNet par plan RGB pour normaliser les images de la base Food-11
mean = np.array([123.68, 116.779, 103.939], dtype="float32")
trainAug.mean = mean
testAug.mean = mean

# ENTRAINEMENT DE LA FC

#3. Charger un model pré-appris sur ImageNet sans la dernière couche FC.
print("[INFO] Chargement de NASNetMobile...")
baseModel = NASNetMobile(weights="imagenet",
                         include_top=False,
                         input_tensor=Input(shape=(224, 224, 3)))
print("[INFO] Chargement fini...")

# Redefinition de la FC
#5.Définir une nouvelle couche FC identique à l’ancienne (il faut respecter la structure
#originale) mais cette fois ci initialisée aléatoirement. Le nombre de neurones en sortie
#est égal au nombre de classes de la base à traiter, le taux d’apprentissage doit être une
#valeur assez faible de 0.001.
headModel = baseModel.output
headModel = GlobalAveragePooling2D()(headModel)
headModel = Dense(len(CLASSES), activation="softmax")(headModel)

#6. Reconstruire le nouveau modèle
# on empile la nouvelle FC sur la base
model = Model(inputs=baseModel.input, outputs=headModel)
Exemplo n.º 15
0
from keras.applications import MobileNet, MobileNetV2, DenseNet121, DenseNet169, DenseNet201
from keras.datasets import cifar10
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline

(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print(x_train.shape)  # (50000, 32, 32, 3)
print(x_test.shape)  # (10000, 32, 32, 3)
print(y_train.shape)  # (50000, 1)
print(y_test.shape)  # (10000, 1)

ishape = (32, 32, 3)

ResNet101V2 = NASNetMobile(include_top=False,
                           weights='imagenet',
                           input_shape=ishape)  # (None, 224, 224, 3)
# vgg16.summary()

act = 'relu'
model = Sequential()

model.add(ResNet101V2)
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation(act))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))

for layer in model.layers[:19]:
def get_model(weight_path):

    model = NASNetMobile(weights="imagenet",
                         include_top=False,
                         input_shape=(224, 224, 3))
    #Adding custom Layers
    x = model.output
    x = BatchNormalization()(x)
    x = GlobalMaxPooling2D()(x)

    x1 = Dense(1024,
               activation="elu",
               kernel_regularizer=l2(0.0001),
               bias_regularizer=l2(0.0001))(x)
    x1 = BatchNormalization()(x1)
    x1 = Dropout(0.5)(x1)
    x1 = Dense(256,
               activation="elu",
               kernel_regularizer=l2(0.0001),
               bias_regularizer=l2(0.0001))(x1)
    x1 = BatchNormalization()(x1)
    #x1 = Dropout(0.5)(x1)

    x2 = Dense(1024,
               activation="elu",
               kernel_regularizer=l2(0.0001),
               bias_regularizer=l2(0.0001))(x)
    x2 = BatchNormalization()(x2)
    x2 = Dropout(0.5)(x2)
    x2 = Dense(256,
               activation="elu",
               kernel_regularizer=l2(0.0001),
               bias_regularizer=l2(0.0001))(x2)
    x2 = BatchNormalization()(x2)
    #x2 = Dropout(0.5)(x2)

    out_smile = Dense(1,
                      activation="sigmoid",
                      kernel_regularizer=l2(0.0001),
                      bias_regularizer=l2(0.0001),
                      name='out_smile')(x1)
    out_mouth = Dense(1,
                      activation="sigmoid",
                      kernel_regularizer=l2(0.0001),
                      bias_regularizer=l2(0.0001),
                      name='out_mouth')(x2)

    from keras.models import Model
    # creating the final model
    model_final = Model(input=model.input, output=[out_smile, out_mouth])

    from keras.optimizers import Adam

    # compile the model
    model_final.compile(loss={
        'out_smile': "binary_crossentropy",
        'out_mouth': "binary_crossentropy"
    },
                        optimizer=Adam(lr=0.001, decay=0.0001),
                        metrics=[f1],
                        loss_weights=[0.5, 0.5])

    model_final.load_weights(weight_path)

    return model_final
def load_NASNetMobile_model():

    # # NASNetMobile Model 224x224
    settings.SITE_MODEL = NASNetMobile(weights="imagenet")
    settings.SITE_GRAPH = tf.get_default_graph()
Exemplo n.º 18
0
# vgg16 = VGG16() # (None, 224, 224, 3)
# model = VGG19()
model = Xception()
model = ResNet101()
model = ResNet101V2()
model = ResNet152()
model = ResNet152V2()
model = ResNet50()
model = ResNet50V2()
model = InceptionV3()
model = InceptionResNetV2()
model = MobileNet()
model = MobileNetV2()
model = DenseNet121()
model = DenseNet169()
model = DenseNet201()
model = NASNetLarge()
model = NASNetMobile()

# vgg16.summary()
'''
model= Sequential()
# model.add(vgg16)
# model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(10, activation='softmax'))

model.summary()
'''
Exemplo n.º 19
0
    def __init__(self,
                 image_size=299,
                 batch_size=64,
                 num_classes=100,
                 trainable=True,
                 load_trained=False,
                 max_trainable=False,
                 pretrained_model='pretrained.h5',
                 init_lr=0.001,
                 n_chanels=3,
                 optimizer='adam',
                 init_epoch=0,
                 max_epoch=100,
                 net_type=0):
        try:
            os.mkdir("out_model")
            os.mkdir("logs")
        except:
            print("Created output directory !")

        self.image_size = image_size
        self.batch_size = batch_size
        self.init_lr = init_lr
        self.max_epoch = max_epoch
        self.init_epoch = init_epoch
        self.net_type = net_type

        self.model = None
        self.pre_process = None

        input_shape = (image_size, image_size, n_chanels)

        if net_type == 0:
            self.model = DenseNet121(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.densenet.preprocess_input
        elif net_type == 1:
            self.model = DenseNet169(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.densenet.preprocess_input
        elif net_type == 2:
            self.model = DenseNet201(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.densenet.preprocess_input
        elif net_type == 3:
            self.model = ResNet50(input_shape=input_shape,
                                  include_top=False,
                                  weights='imagenet',
                                  pooling='max')
            self.pre_process = keras.applications.resnet50.preprocess_input
        elif net_type == 4:
            self.model = InceptionV3(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.inception_v3.preprocess_input
        elif net_type == 5:
            self.model = InceptionResNetV2(input_shape=input_shape,
                                           include_top=False,
                                           weights='imagenet',
                                           pooling='max')
            self.pre_process = keras.applications.inception_resnet_v2.preprocess_input
        elif net_type == 6:
            self.model = NASNetLarge(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
            self.pre_process = keras.applications.nasnet.preprocess_input
        elif net_type == 7:
            self.model = NASNetMobile(input_shape=input_shape,
                                      include_top=False,
                                      weights='imagenet',
                                      pooling='max')
            self.pre_process = keras.applications.nasnet.preprocess_input

        x = self.model.output
        # x = GlobalAveragePooling2D()(x)
        x = Dense(1024, activation='relu')(x)  # add a fully-connected layer
        self.predictions = Dense(num_classes,
                                 activation='softmax',
                                 name='out_put')(x)
        self.model = Model(inputs=self.model.input, outputs=self.predictions)

        if load_trained:
            self.model.load_weights(pretrained_model)
            print("Load pretrained model successfully!")

        if trainable == False:
            for layer in self.model.layers:
                layer.trainable = False
            print("Use model for inference is activated!")
        if trainable and not max_trainable:
            for layer in self.model.layers[:-5]:
                layer.trainable = False
            for layer in self.model.layers[-5:]:
                layer.trainable = True
            print("Train last layers is activated!")
        if max_trainable:
            for layer in self.model.layers:
                layer.trainable = True
            print("Train whole network is activated!")

        if (optimizer == 'adam'):
            opt = Adam(lr=init_lr, beta_1=0.9, beta_2=0.999, decay=1e-6)
        else:
            opt = SGD(lr=init_lr, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(optimizer=opt,
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])

        self.earlyStopping = callbacks.EarlyStopping(monitor='val_acc',
                                                     min_delta=0.001,
                                                     patience=10,
                                                     verbose=1)
        self.tensorBoard = callbacks.TensorBoard('./logs',
                                                 batch_size=batch_size,
                                                 write_grads=True,
                                                 write_images=True)
        self.checkpoint = callbacks.ModelCheckpoint(
            './out_model/weights.' + type_models[self.net_type] +
            '.{epoch:02d}-{acc:.2f}-{val_acc:.2f}.hdf5',
            monitor='val_acc',
            verbose=1,
            save_best_only=True,
            save_weights_only=False,
            mode='auto',
            period=1)
        self.lrController = callbacks.ReduceLROnPlateau(monitor='val_acc',
                                                        factor=0.5,
                                                        patience=3,
                                                        verbose=1,
                                                        mode='auto',
                                                        min_delta=0.0001,
                                                        cooldown=0,
                                                        min_lr=0.00001)
        self.history_ = callbacks.History()
        self.callBackList = [
            self.earlyStopping, self.tensorBoard, self.checkpoint,
            self.lrController, self.history_
        ]

        # [self.train_loss, self.train_metrics] = 2*[None]
        self.history = None
        self.dataGenerator = None
Exemplo n.º 20
0
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras.optimizers import Adam

# 1. data
(x_train, y_train),(x_test, y_test) = cifar10.load_data()

print(f"x_train.shape : {x_train.shape}")
print(f"x_test.shape : {x_test.shape}")
print(f"y_train.shape : {y_train.shape}")
print(f"y_test.shape : {y_test.shape}")

x_train = x_train.reshape(50000,32,32,3).astype('float32')/255.0
x_test = x_test.reshape(10000,32,32,3).astype('float32')/255.0

# 2. model
vgg16 = NASNetMobile(weights='imagenet' , include_top=False, input_shape=(32,32,3))


model = Sequential()
model.add(vgg16)

model.add(Flatten())

model.add(Dense(256))
model.add(BatchNormalization())
# model.add(Dropout(0.1))
model.add(Activation('relu'))

model.add(Dense(10, activation='softmax'))

model.summary()
Exemplo n.º 21
0
    def __init__(self,
                 image_size=299,
                 batch_size=64,
                 num_classes=100,
                 trainable=True,
                 load_trained=False,
                 is_mobile=False,
                 max_trainable=False,
                 pretrained_model='pretrained.h5',
                 init_lr=0.001,
                 n_chanels=3,
                 optimizer='adam',
                 init_epoch=0,
                 max_epoch=100):
        try:
            os.mkdir("out_model")
            os.mkdir("logs")
        except:
            print("Created output directory !")
        self.batch_size = batch_size
        self.init_lr = init_lr
        self.max_epoch = max_epoch
        self.init_epoch = init_epoch
        self.model = None

        input_shape = (image_size, image_size, n_chanels)
        if ~is_mobile:
            self.model = DenseNet121(input_shape=input_shape,
                                     include_top=False,
                                     weights='imagenet',
                                     pooling='max')
        else:
            self.model = NASNetMobile(input_shape=input_shape,
                                      include_top=False,
                                      weights='imagenet',
                                      pooling='max')

        x = self.model.output
        # x = GlobalAveragePooling2D()(x)
        # add a fully-connected layer
        x = Dense(1024, activation='relu')(x)
        self.predictions = Dense(num_classes,
                                 activation='softmax',
                                 name='out_put')(x)
        # model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output)
        self.model = Model(inputs=self.model.input, outputs=self.predictions)

        if load_trained:
            self.model.load_weights(pretrained_model)
            print("Load pretrained model successfully!")

        if trainable == False:
            for layer in self.model.layers:
                layer.trainable = False
            print("Use model for inference is activated!")
        if trainable:
            for layer in self.model.layers[:-5]:
                layer.trainable = False
            for layer in self.model.layers[-5:]:
                layer.trainable = True
            print("Train last layers is activated!")

        if max_trainable:
            for layer in self.model.layers:
                layer.trainable = True
            print("Train whole network is activated!")
        if (optimizer == 'adam'):
            opt = Adam(lr=init_lr, beta_1=0.9, beta_2=0.999, decay=1e-6)
        else:
            opt = sgd = SGD(lr=init_lr,
                            decay=1e-6,
                            momentum=0.9,
                            nesterov=True)
        # sgd = SGD(lr=init_lr, decay=1e-6, momentum=0.9, nesterov=True)
        # self.model.compile(optimizer=Adam(lr=0.0005), loss='categorical_crossentropy', metrics=['accuracy'])
        self.model.compile(optimizer=opt,
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])

        self.earlyStopping = keras.callbacks.EarlyStopping(monitor='val_acc',
                                                           min_delta=0.001,
                                                           patience=10,
                                                           verbose=1)
        self.tensorBoard = keras.callbacks.TensorBoard('./logs',
                                                       batch_size=batch_size,
                                                       write_grads=True,
                                                       write_images=True)
        self.checkpoint = keras.callbacks.ModelCheckpoint(
            './out_model/weights.{epoch:02d}-{acc:.2f}-{val_acc:.2f}.hdf5',
            monitor='val_acc',
            verbose=1,
            save_best_only=True,
            save_weights_only=False,
            mode='auto',
            period=1)
        self.callBackList = [
            self.earlyStopping, self.tensorBoard, self.checkpoint
        ]
        #self.callBackList = [self.earlyStopping, self.checkpoint]

        [self.train_loss, self.train_metrics] = 2 * [None]
        self.history = None
        self.dataGenerator = None
Exemplo n.º 22
0
 def build_network(self, model_name, fine_tune):
     if model_name.lower() == 'vgg16':
         if fine_tune:
             base_model = VGG16(include_top=False,
                                input_shape=(None, None, 3),
                                pooling='avg')
             for layer in base_model.layers:
                 if layer.name.startswith('block5'):
                     layer.trainable = True
                 else:
                     layer.trainable = False
             x = base_model.output
             x = Dense(1024, activation='relu')(x)
             x = Dropout(0.5)(x)
             x = Dense(512, activation='relu')(x)
             predictions = Dense(1, activation='sigmoid')(x)
             model = Model(base_model.input, predictions)
             model.summary()
             return model
         else:
             base_model = VGG16(include_top=False,
                                input_shape=(None, None, 3),
                                pooling='avg')
             for layer in base_model.layers:
                 layer.trainable = True
             x = base_model.output
             x = Dense(1024, activation='relu')(x)
             x = Dropout(0.5)(x)
             x = Dense(512, activation='relu')(x)
             predictions = Dense(1, activation='sigmoid')(x)
             model = Model(base_model.input, predictions)
             model.summary()
             return model
     elif model_name.lower() == 'resnet50':
         base_model = ResNet50(include_top=False,
                               input_shape=(None, None, 3),
                               pooling='avg',
                               backend=keras.backend,
                               layers=keras.layers,
                               models=keras.models,
                               utils=keras.utils)
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     # elif model_name.lower()=='resnet34':
     #     base_model=ResNet34(include_top=False, input_shape=self.shape, pooling='avg')
     #     x = base_model.output
     #     x = Dense(1024, activation='relu')(x)
     #     x = Dropout(0.5)(x)
     #     x = Dense(1024, activation='relu')(x)
     #     predictions = Dense(1, activation='sigmoid')(x)
     #     model = Model(base_model.input, predictions)
     #     if fine_tune:
     #         for layer in base_model.layers:
     #             layer.trainable = False
     #     model.summary()
     #     return model
     elif model_name.lower() == 'resnet101':
         base_model = ResNet101(include_top=False,
                                input_shape=(None, None, 3),
                                pooling='avg',
                                backend=keras.backend,
                                layers=keras.layers,
                                models=keras.models,
                                utils=keras.utils)
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'resnet152':
         base_model = ResNet152(include_top=False,
                                input_shape=(None, None, 3),
                                pooling='avg',
                                backend=keras.backend,
                                layers=keras.layers,
                                models=keras.models,
                                utils=keras.utils)
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'inceptionresnetv2':
         base_model = InceptionResNetV2(include_top=False,
                                        input_shape=(None, None, 3),
                                        pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'xception':
         base_model = Xception(include_top=False,
                               input_shape=(None, None, 3),
                               pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'densenet121':
         base_model = DenseNet121(include_top=False,
                                  input_shape=(None, None, 3),
                                  pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'densenet169':
         base_model = DenseNet169(include_top=False,
                                  input_shape=(None, None, 3),
                                  pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'densenet201':
         base_model = DenseNet201(include_top=False,
                                  input_shape=(None, None, 3),
                                  pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'nasnetlarge':
         base_model = NASNetLarge(include_top=False,
                                  input_shape=(None, None, 3),
                                  pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     elif model_name.lower() == 'vgg19':
         base_model = VGG19(include_top=False,
                            input_shape=(None, None, 3),
                            pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
     else:
         base_model = NASNetMobile(include_top=False,
                                   input_shape=(None, None, 3),
                                   pooling='avg')
         x = base_model.output
         x = Dense(1024, activation='relu')(x)
         x = Dropout(0.5)(x)
         x = Dense(1024, activation='relu')(x)
         predictions = Dense(1, activation='sigmoid')(x)
         model = Model(base_model.input, predictions)
         if fine_tune:
             for layer in base_model.layers:
                 layer.trainable = False
         model.summary()
         return model
Exemplo n.º 23
0
def Nas_Net(trainable=None, net="NASNetMobile"):

    # Preprocessing the dataset into keras feedable format

    train_datagen = ImageDataGenerator(rotation_range=rotation,
                                       width_shift_range=width_shift,
                                       height_shift_range=height_shift,
                                       rescale=scale,
                                       shear_range=shear,
                                       zoom_range=zoom,
                                       horizontal_flip=horizontal,
                                       fill_mode=fill,
                                       validation_split=validation)
    test_datagen = ImageDataGenerator(rescale=scale, )

    train_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='training',
    )
    validation_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='validation')

    models_list = ['NASNetLarge', 'NASNetMobile']

    # Loading the NasNet Model

    if net == "NASNetLarge":
        nasnet = NASNetLarge(include_top=False,
                             weights='imagenet',
                             input_shape=input_sh,
                             pooling=pooling_model)
    if net == "NASNetMobile":
        nasnet = NASNetMobile(include_top=False,
                              weights='imagenet',
                              input_shape=input_sh,
                              pooling=pooling_model)
    if net not in models_list:
        raise ValueError('Please provide the raise model ')
    output = nasnet.layers[-1].output
    if pooling_model is None:
        output = keras.layers.Flatten()(output)
    nasnet = Model(nasnet.input, output=output)
    print(nasnet.summary())
    print('\n\n\n')
    # If you chose not for fine tuning
    if trainable is None:
        model = Sequential()
        model.add(nasnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in nasnet.layers:
            layer.trainable = False
        print("The model summary of Nasnet  -->\n\n\n"
              )  # In this the Nasnet layers are not trainable

        for i, layer in enumerate(nasnet.layers):
            print(i, layer.name, layer.trainable)
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()

    if trainable is not None:
        # Make last block of the conv_base trainable:

        for layer in nasnet.layers[:trainable]:
            layer.trainable = False
        for layer in nasnet.layers[trainable:]:
            layer.trainable = True

        print('Last block of the conv_base is now trainable')

        for i, layer in enumerate(nasnet.layers):
            print(i, layer.name, layer.trainable)

        model = Sequential()
        model.add(nasnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in nasnet.layers:
            layer.trainable = False
        print("The model summary of Nasnet -->\n\n\n"
              )  # In this the Nasnet layers are not trainable
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()
Exemplo n.º 24
0
def build_model(name='vgg16', filepath=None, training=False, continuing=True):
    model = None
    base = None
    shape = config.IMAGE_DIMENSIONS

    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')

    if os.path.exists(filepath) and training and continuing:
        model = load_model(filepath)
        return model, checkpoint, shape
    
    name = name.lower()
    if name == 'vgg16':
        base = VGG16()
    elif name == 'vgg19':
        base = VGG19()
    elif name == 'xception':
        base = Xception()
        shape = config.IMAGE_DIMENSIONS_299
    elif name == 'inceptionv3':
        base = InceptionV3()
        shape = config.IMAGE_DIMENSIONS_299
    elif name == 'resnet50':
        base = ResNet50()
    elif name == 'mobilenetv2':
        base = MobileNetV2()
    elif name == 'densenet121':
        base = DenseNet121()
    elif name == 'densenet169':
        base = DenseNet169()
    elif name == 'densenet201':
        base = DenseNet201()
    elif name == 'inceptionresnetv2':
        base = InceptionResNetV2()
        shape = config.IMAGE_DIMENSIONS_299
    elif name == 'nasnetmobile':
        base = NASNetMobile()
    elif name == 'control':
        input = Input(shape=config.IMAGE_SHAPE)
        base = Conv2D(input_shape=config.IMAGE_SHAPE, filters=16, kernel_size=3, activation='relu')(input)
        base = MaxPooling2D()(base)
        base = Flatten()(base)
        base = Model(inputs=input, output = base)

    if name != 'control':
        for layer in base.layers:
            layer.trainable = False
    
    x = Dense(1024, activation='relu')(base.output)
    x = BatchNormalization()(x)
    x = Dropout(0.7)(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dropout(0.5)(x)
    x = Dense(2, activation='softmax')(x)
    
    model = Model(inputs=base.input, outputs=x)

    if os.path.exists(filepath):
        model.load_weights(filepath) 
    
    return model, checkpoint, shape
Exemplo n.º 25
0
    def createCoreModel(self):
        primaryDevice = "/cpu:0"
        if self.numGPUs == 1:
            primaryDevice = "/gpu:0"

        with tf.device(primaryDevice):
            imageNet = Sequential()
            imageNetCore = None
            if self.parameters['neuralNetwork']['core'] == 'resnet':
                imageNetCore = ResNet50(
                    include_top=False,
                    pooling='avg',
                    input_shape=(self.imageWidth, self.imageHeight, 3),
                    weights=('imagenet'
                             if self.parameters['startingWeights']['weights']
                             == 'imagenet' else None))
            elif self.parameters['neuralNetwork']['core'] == 'inceptionnet':
                imageNetCore = InceptionV3(
                    include_top=False,
                    pooling='avg',
                    input_shape=(self.imageWidth, self.imageHeight, 3),
                    weights=('imagenet'
                             if self.parameters['startingWeights']['weights']
                             == 'imagenet' else None))
            elif self.parameters['neuralNetwork']['core'] == 'mobilenet':
                imageNetCore = MobileNetV2(
                    include_top=False,
                    pooling='avg',
                    input_shape=(self.imageWidth, self.imageHeight, 3),
                    weights=('imagenet'
                             if self.parameters['startingWeights']['weights']
                             == 'imagenet' else None))
            elif self.parameters['neuralNetwork']['core'] == 'nasnetmobile':
                imageNetCore = NASNetMobile(
                    include_top=False,
                    pooling='avg',
                    input_shape=(self.imageWidth, self.imageHeight, 3),
                    weights=('imagenet'
                             if self.parameters['startingWeights']['weights']
                             == 'imagenet' else None))
            elif self.parameters['neuralNetwork']['core'] == 'nasnetlarge':
                imageNetCore = NASNetLarge(
                    include_top=False,
                    pooling='avg',
                    input_shape=(self.imageWidth, self.imageHeight, 3),
                    weights=('imagenet'
                             if self.parameters['startingWeights']['weights']
                             == 'imagenet' else None))

            imageNetCore.summary()

            imageNet.add(imageNetCore)
            imageNet.add(Reshape([2048]))
            imageNet.add(BatchNormalization())
            imageNet.add(
                Dropout(self.parameters["neuralNetwork"]["dropoutRate"]))
            imageNet.add(
                Dense(
                    int(self.parameters['neuralNetwork']['vectorSize'] *
                        self.parameters["neuralNetwork"]
                        ["denseLayerMultiplier"])))
            imageNet.add(BatchNormalization())
            # imageNet.add(Dense(int(self.parameters['neuralNetwork']['vectorSize'])))
            imageNet.add(
                Dense(int(self.parameters['neuralNetwork']['vectorSize']),
                      activation=self.parameters["neuralNetwork"]
                      ["finalActivation"]))
            # imageNet.add(Dense(int(self.parameters['neuralNetwork']['vectorSize']), activation=self.parameters["neuralNetwork"]["finalActivation"]))

            imageNet.summary()

        if self.numGPUs > 1:
            model = multi_gpu_model(imageNet, gpus=self.numGPUs)
        else:
            model = imageNet

        return model, imageNet
VGG16_top = VGG16(include_top=False, input_shape=(224, 224, 3))
VGG19_top = VGG19(include_top=False, input_shape=(224, 224, 3))
Res50_top = ResNet50(include_top=False, input_shape=(224, 224, 3))
Xception_top = Xception(include_top=False, input_shape=(299, 299, 3))
InceptionV3_top = InceptionV3(include_top=False, input_shape=(299, 299, 3))
InceptionResNetV2_top = InceptionResNetV2(include_top=False,
                                          input_shape=(299, 299, 3))

# 不太常用的预训练的模型, Keras 也提供预训练的权重的模型

from keras.applications import MobileNet
from keras.applications import DenseNet121, DenseNet169, DenseNet201
from keras.applications import NASNetLarge, NASNetMobile

Mobile_base = MobileNet(include_top=True, input_shape=(224, 224, 3))

Dense121_base = DenseNet121(include_top=True, input_shape=(224, 224, 3))
Dense169_base = DenseNet169(include_top=True, input_shape=(224, 224, 3))
Dense201_base = DenseNet201(include_top=True, input_shape=(224, 224, 3))

NASNetLarge_base = NASNetLarge(include_top=True, input_shape=(331, 331, 3))
NASNetMobile_base = NASNetMobile(include_top=True, input_shape=(224, 224, 3))

# -------------------------------------------------------------------------
# 无顶层权重的网络
Mobile_top = MobileNet(include_top=False, input_shape=(224, 224, 3))

Dense121_top = DenseNet121(include_top=False, input_shape=(224, 224, 3))
Dense169_top = DenseNet169(include_top=False, input_shape=(224, 224, 3))
Dense201_top = DenseNet201(include_top=False, input_shape=(224, 224, 3))
Exemplo n.º 27
0
environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
print("tf : {}".format(tf.__version__))
print("keras : {}".format(keras.__version__))
print("numpy : {}".format(np.__version__))
print("sklearn : {}".format(skl.__version__))

# Load Model
IMG_HEIGHT = 224
IMG_WIDTH = IMG_HEIGHT
CHANNELS = 3
DIMS = (IMG_HEIGHT, IMG_WIDTH, CHANNELS)

MODEL_TO_EVAL = './models/NASNetMobile.hdf5'

base_model = NASNetMobile(input_shape=DIMS, weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D(name='avg_pool')(x)  # comment for RESNET
x = Dense(1, activation='sigmoid', name='predictions')(x)

model = Model(inputs=base_model.input, outputs=x)
model.load_weights(MODEL_TO_EVAL)
model.compile(optimizer=Adam(lr=1e-3), loss=binary_crossentropy, metrics=['binary_accuracy'])
model._make_predict_function()

app = Flask(__name__)

@app.route("/score/")
def score_image():
    filename = request.args['file']
Exemplo n.º 28
0
from keras.applications.mobilenet import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam
from keras.models import load_model
from keras.utils import multi_gpu_model
import tensorflow as tf
from tensorflow.python.client import device_lib

print(device_lib.list_local_devices())
# In[2]:

#base_model=MobileNet(weights='imagenet',include_top=False) #imports the mobilenet model and discards the last 1000 neuron layer.
#base_model=MobileNetV2(weights='imagenet',include_top=False)
base_model = NASNetMobile(
    weights='imagenet', include_top=False
)  #imports the NASNetMobile model and discards the last 1000 neuron layer.

x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(
    x
)  #we add dense layers so that the model can learn more complex functions and classify for better results.
x = Dense(1024, activation='relu')(x)  #dense layer 2
x = Dense(512, activation='relu')(x)  #dense layer 3
preds = Dense(3, activation='softmax')(x)  #final layer with softmax activation

# In[3]:

model = Model(inputs=base_model.input, outputs=preds)
#Training models with weights merge on GPU (recommended for NV-link)
Exemplo n.º 29
0
                for k in j.split('.')[0].split('_')[1].split(';'):
                    temp[int(k)] = 1
                img = np.array(cv2.resize(cv2.imread('imgtest\{}'.format(j)),
                                          (224, 224)),
                               dtype=np.float) / 255.0
                x.append(img)
                y.append(temp)
            yield (np.array(x), np.array(y))


inputs = Input(shape=(224, 224, 3))

# 搭建NasNetMobile模型,并添加一个全连接层进行分类
nasnet = NASNetMobile(include_top=False,
                      weights='NASNet-mobile-no-top.h5',
                      input_tensor=inputs,
                      pooling='max',
                      input_shape=(224, 224, 3))
net = Dense(10, activation='sigmoid')(nasnet.layers[-1].output)

model = Model(inputs=inputs, output=net)

# 打印模型结构参数
model.summary()

# 编译模型,优化器采用Adam,损失函数采用的是交叉熵
model.compile(optimizer=Adam(), loss=binary_crossentropy, metrics=['accuracy'])

# 训练模型
model.fit_generator(get_data_train(train, batch_size=64),
                    validation_data=get_data_test(test, batch_size=64),
Exemplo n.º 30
0
from keras.applications import VGG16,VGG19, Xception, ResNet101,ResNet101V2,ResNet152
from keras.applications import ResNet152V2,ResNet50,ResNet50V2,InceptionV3,InceptionResNetV2
from keras.applications import MobileNet,MobileNetV2,DenseNet121,DenseNet169,DenseNet201
from keras.applications import NASNetLarge, NASNetMobile
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, BatchNormalization, Activation,Flatten
from keras.optimizers import Adam

# applications = [VGG19, Xception, ResNet101, ResNet101V2, ResNet152,ResNet152V2, ResNet50, 
#                 ResNet50V2, InceptionV3, InceptionResNetV2,MobileNet, MobileNetV2, 
#                 DenseNet121, DenseNet169, DenseNet201]

# for i in applications:
#     take_model = i()

# vgg16 = VGG16()
nasnetlarge = NASNetLarge()
nasnetmobile =  NASNetMobile()