def build_model(classes=2): inputs = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3)) x = preprocess_input(inputs) x = DenseNet201(weights=None, classes=classes)(x) model = Model(inputs=inputs, outputs=x) model.compile(loss='categorical_crossentropy', metrics=['accuracy']) return model
def create_model(model_name): if model_name == 'efn_b4': model = efn.EfficientNetB4(weights=None, classes=4) elif model_name == 'efn_b4_p': model = tf.keras.models.Sequential() model.add(efn.EfficientNetB4(input_shape=(380, 380, 3), weights='imagenet', include_top=False)) elif model_name == 'efn_b5_p': model = tf.keras.models.Sequential() model.add(efn.EfficientNetB5(input_shape=(456, 456, 3), weights='imagenet', include_top=False)) elif model_name == 'resnet18': model = ResNet([2, 2, 2, 2], input_shape=(224, 224, 3)) elif model_name == 'densenet121_p': model = tf.keras.models.Sequential() model.add(DenseNet121(input_shape=(224, 224, 3), weights='imagenet', include_top=False)) elif model_name == 'densenet201_p': model = tf.keras.models.Sequential() model.add(DenseNet201(input_shape=(224, 224, 3), weights='imagenet', include_top=False)) if model_name.split('_')[-1] == 'p': model.add(GlobalAveragePooling2D()) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(4, activation='softmax')) model.summary() return model
def transferlearning(modelname, learning_rate, image_shape, training_path, epoch): train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, validation_split=0.2) train_generator = train_datagen.flow_from_directory( training_path, target_size=(image_shape, image_shape), batch_size=128, class_mode='categorical', subset='training', shuffle=True) validation_generator = train_datagen.flow_from_directory( training_path, target_size=(image_shape, image_shape), batch_size=128, class_mode='categorical', subset='validation') num_classes = train_generator.num_classes if modelname == 'VGG19': base_model = VGG19(input_shape=(image_shape, image_shape, 3), include_top=False, weights='imagenet') elif modelname == 'MobileNetV2': base_model = MobileNetV2( input_shape=(image_shape, image_shape, 3), include_top=False, weights='imagenet') elif modelname == 'DenseNet201': base_model = DenseNet201( input_shape=(image_shape, image_shape, 3), include_top=False, weights='imagenet') elif modelname == 'InceptionV3': base_model = InceptionV3( input_shape=(image_shape, image_shape, 3), include_top=False, weights='imagenet') elif modelname == 'ResNet50': base_model = ResNet50(input_shape=(image_shape, image_shape, 3), include_top=False, weights='imagenet') elif modelname == 'Xception': base_model = Xception(input_shape=(image_shape, image_shape, 3), include_top=False, weights='imagenet') base_model.output global_layer = keras.layers.GlobalAveragePooling2D()(base_model.output) prediction_layer = keras.layers.Dense( num_classes, activation='softmax')(global_layer) model = keras.models.Model( inputs=base_model.input, outputs=prediction_layer) model.summary() model.compile(optimizer=Adam(lr=learning_rate), loss='categorical_crossentropy', metrics=["acc"]) history = model.fit_generator( train_generator, epochs=epoch, validation_data=validation_generator, validation_steps=1000) return history
def __init__(self, model_name=None): if model_name == 'Xception': base_model = Xception(weights='imagenet') self.preprocess_input = xception.preprocess_input elif model_name == 'VGG19': base_model = VGG19(weights='imagenet') self.preprocess_input = vgg19.preprocess_input elif model_name == 'ResNet50': base_model = ResNet50(weights='imagenet') self.preprocess_input = resnet.preprocess_input elif model_name == 'ResNet101': base_model = ResNet101(weights='imagenet') self.preprocess_input = resnet.preprocess_input elif model_name == 'ResNet152': base_model = ResNet152(weights='imagenet') self.preprocess_input = resnet.preprocess_input elif model_name == 'ResNet50V2': base_model = ResNet50V2(weights='imagenet') self.preprocess_input = resnet_v2.preprocess_input elif model_name == 'ResNet101V2': base_model = ResNet101V2(weights='imagenet') self.preprocess_input = resnet_v2.preprocess_input elif model_name == 'ResNet152V2': base_model = ResNet152V2(weights='imagenet') self.preprocess_input = resnet_v2.preprocess_input elif model_name == 'InceptionV3': base_model = InceptionV3(weights='imagenet') self.preprocess_input = inception_v3.preprocess_input elif model_name == 'InceptionResNetV2': base_model = InceptionResNetV2(weights='imagenet') self.preprocess_input = inception_resnet_v2.preprocess_input elif model_name == 'DenseNet121': base_model = DenseNet121(weights='imagenet') self.preprocess_input = densenet.preprocess_input elif model_name == 'DenseNet169': base_model = DenseNet169(weights='imagenet') self.preprocess_input = densenet.preprocess_input elif model_name == 'DenseNet201': base_model = DenseNet201(weights='imagenet') self.preprocess_input = densenet.preprocess_input elif model_name == 'NASNetLarge': base_model = NASNetLarge(weights='imagenet') self.preprocess_input = nasnet.preprocess_input elif model_name == 'NASNetMobile': base_model = NASNetMobile(weights='imagenet') self.preprocess_input = nasnet.preprocess_input elif model_name == 'MobileNet': base_model = MobileNet(weights='imagenet') self.preprocess_input = mobilenet.preprocess_input elif model_name == 'MobileNetV2': base_model = MobileNetV2(weights='imagenet') self.preprocess_input = mobilenet_v2.preprocess_input else: base_model = VGG16(weights='imagenet') self.preprocess_input = vgg16.preprocess_input self.model = Model(inputs=base_model.input, outputs=base_model.layers[-2].output)
def create_model(model_name, input_shape=(IMG_SIZE, IMG_SIZE, 3)): if model_name == 'efn_b4': model = efn.EfficientNetB4(weights=None, classes=4) elif model_name == 'efn_b4_p': model = tf.keras.models.Sequential() model.add( efn.EfficientNetB4(input_shape=input_shape, weights='imagenet', include_top=False)) elif model_name == 'efn_b5_p': model = tf.keras.models.Sequential() model.add( efn.EfficientNetB5(input_shape=input_shape, weights='imagenet', include_top=False)) elif model_name == 'efn_b6_p': model = tf.keras.models.Sequential() model.add( efn.EfficientNetB6(input_shape=input_shape, weights='imagenet', include_top=False)) elif model_name == 'efn_b7_p': model = tf.keras.models.Sequential() model.add( efn.EfficientNetB7(input_shape=input_shape, weights='imagenet', include_top=False)) elif model_name == 'densenet121_p': model = tf.keras.models.Sequential() model.add( DenseNet121(input_shape=input_shape, weights='imagenet', include_top=False)) elif model_name == 'densenet201_p': model = tf.keras.models.Sequential() model.add( DenseNet201(input_shape=input_shape, weights='imagenet', include_top=False)) elif model_name == 'inceptionResV2_p': model = tf.keras.models.Sequential() model.add( InceptionResNetV2(input_shape=input_shape, weights='imagenet', include_top=False)) if model_name.split('_')[-1] == 'p': model.add(GlobalAveragePooling2D()) #model.add(Dense(128, activation='relu')) #model.add(Dense(64, activation='relu')) model.add(Dense(4, activation='softmax')) model.summary() return model
def pretrainded_model(type: str, trainable=False): with strategy.scope(): if type == 'VGG16': pretrained_model = VGG16(weights='imagenet', include_top=False, input_shape=[*IMAGE_SIZE, 3]) elif type == 'VGG19': pretrained_model = VGG19(weights='imagenet', include_top=False, input_shape=[*IMAGE_SIZE, 3]) elif type == 'DenseNet121': pretrained_model = DenseNet121(weights='imagenet', include_top=False, input_shape=[*IMAGE_SIZE, 3]) elif type == 'DenseNet169': pretrained_model = DenseNet169(weights='imagenet', include_top=False, input_shape=[*IMAGE_SIZE, 3]) elif type == 'DenseNet201': pretrained_model = DenseNet201(weights='imagenet', include_top=False, input_shape=[*IMAGE_SIZE, 3]) pretrained_model.trainable = trainable model = Sequential([ # To a base pretrained on ImageNet to extract features from images... pretrained_model, # ... attach a new head to act as a classifier. Flatten(), Dense(256, activation='relu'), BatchNormalization(), Dropout(0.2), Dense(256, activation='relu'), BatchNormalization(), Dropout(0.2), Dense(256, activation='relu'), BatchNormalization(), Dropout(0.2), Dense(256, activation='relu'), BatchNormalization(), Dropout(0.2), Dense(256, activation='relu'), BatchNormalization(), Dropout(0.2), tf.keras.layers.Dense(len(CLASSES), activation='softmax', use_bias=False) ]) return model
def build_model(): model = Sequential([ DenseNet201(weights='imagenet', include_top=False, input_shape=(224, 224, 3)), layers.GlobalAveragePooling2D(), layers.Dropout(0.5), layers.BatchNormalization(), layers.Dense(classes, activation='softmax'), ]) model.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=1e-4), metrics=['accuracy']) return model
def download_model(): model = Sequential() conv_base = DenseNet201(input_shape=(224, 224, 3), include_top=False, pooling='max', weights='imagenet') model.add(conv_base) model.add(BatchNormalization()) model.add(Dense(2048, activation='relu', kernel_regularizer=l1_l2(0.01))) model.add(BatchNormalization()) model.add(Dense(8, activation='softmax')) train_layers = [layer for layer in conv_base.layers[::-1][:5]] for layer in conv_base.layers: if layer in train_layers: layer.trainable = True model.save("model/model.h5")
def train(self): model = Sequential() model.add( DenseNet201(weights="imagenet", include_top=False, input_shape=self.input_shape)) model.add(Flatten()) model.add(Dense(1024, activation="relu")) model.add(Dense(1, activation="sigmoid")) plot_model(model) model.summary() model.compile(optimizer=Adam(learning_rate=1e-3), loss="binary_crossentropy", metrics=['accuracy']) history = model.fit(self.train_data, epochs=100, verbose=1, validation_data=self.valid_data) return model, history
def densenet(shape, class_num): base_model = DenseNet201( include_top=False, weights='imagenet', pooling='avg') #, input_tensor=Input(shape=shape)) nw = base_model.output nw = Dense(512, activation='relu')(nw) nw = Dropout(.4)(nw) nw = Dense(512, activation='relu')(nw) if class_num <= 2: output = Dense(class_num, activation='sigmoid', name='output')(nw) else: output = Dense(class_num, activation='softmax', name='output')(nw) base_model.trainable = False '''#for train part of model layer_names = [l.name for l in base_model.layers] idx = layer_names.index('block7a_expand_conv') for layer in base_model.layers[:idx]: layer.trainable = False ''' return Model(inputs=base_model.input, outputs=output)
def get_model(model_name): if model_name == 'MobileNet': base_model = MobileNet(weights='imagenet', include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == 'VGG16': base_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == 'DenseNet': base_model = DenseNet121(weights='imagenet', include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == 'DenseNet201': base_model = DenseNet201(weights='imagenet', include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == 'Inception': base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == 'ResNet': base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(img_size, img_size, 3)) x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.3)(x) x = Dense(256, activation='relu')(x) #dense layer 2 preds = Dense(4, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=preds, name=model_name) model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy']) return model
def create_model( model_name, log_dir, args ): # optimizer, learning rate, activation, neurons, batch size, epochs... input_shape = input_size(model_name, args) if args.head == 'max' or (args.base_trainable and args.head != 't_complex'): pool = 'max' else: pool = 'none' if model_name == 'VGG16': conv_base = VGG16(weights='imagenet', include_top=False, pooling=pool, input_shape=input_shape) elif model_name == 'VGG19': conv_base = VGG19(weights='imagenet', include_top=False, pooling=pool, input_shape=input_shape) elif model_name == 'ResNet50': conv_base = ResNet50(weights='imagenet', include_top=False, pooling=pool, input_shape=input_shape) elif model_name == 'InceptionV3': conv_base = InceptionV3(weights='imagenet', include_top=False, pooling=pool, input_shape=input_shape) elif model_name == 'Xception': conv_base = Xception(weights='imagenet', include_top=False, pooling=pool, input_shape=input_shape) elif model_name == 'InceptionResNetV2': conv_base = InceptionResNetV2(weights='imagenet', include_top=False, pooling=pool, input_shape=input_shape) elif model_name == 'NASNetMobile': conv_base = NASNetMobile(weights='imagenet', include_top=False, pooling=pool, input_shape=input_shape) elif model_name == 'NASNetLarge': conv_base = NASNetLarge(weights='imagenet', include_top=False, pooling=pool, input_shape=input_shape) elif model_name == 'DenseNet201': conv_base = DenseNet201(weights='imagenet', include_top=False, pooling=pool, input_shape=input_shape) elif model_name == 'MobileNetV2': conv_base = MobileNetV2(weights='imagenet', include_top=False, pooling=pool, input_shape=input_shape) else: conv_base = None print("Model name not known!") exit() conv_base.trainable = args.base_trainable model = models.Sequential() if args.base_trainable: if args.head == 't_complex': model = models.Sequential() model.add(conv_base) model.add( layers.Conv2D(filters=1024, kernel_size=(3, 3), padding='same', strides=1)) model.add(layers.Flatten()) # ?? model.add(layers.Dense(1024, activation='sigmoid')) model.add(layers.Dense(256, activation='sigmoid')) model.add(layers.Dense(args.CLASSES_NO, activation='softmax') ) # (samples, new_rows, new_cols, filters) else: model.add(conv_base) model.add(layers.Dense(args.CLASSES_NO, activation='softmax')) elif args.head == 'dense': # outside only? model.add(conv_base) model.add(layers.Flatten()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(128, activation='relu')) model.add(layers.Dense(args.CLASSES_NO, activation='softmax')) elif args.head == 'max': model.add(conv_base) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dense(args.CLASSES_NO, activation='softmax')) elif args.head == 'mod': model = models.Sequential() model.add(conv_base) model.add( layers.Conv2D(filters=2048, kernel_size=(3, 3), padding='valid')) model.add(layers.Flatten()) # ?? model.add(layers.Dropout(0.5)) model.add(layers.Dense(1024, activation='sigmoid')) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dense( args.CLASSES_NO, activation='softmax')) # (samples, new_rows, new_cols, filters) if args.lr_decay: lr_schedule = ExponentialDecay(args.INIT_LEARN_RATE, decay_steps=args.DECAY_STEPS, decay_rate=args.DECAY_RATE, staircase=True) model.compile(loss='categorical_crossentropy', optimizer=SGD(lr_schedule), metrics=['acc']) # To different optimisers? else: model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=args.LEARNING_RATE), metrics=['acc']) with open(os.path.join(log_dir, 'modelsummary.txt'), 'w') as f: with redirect_stdout(f): model.summary() print(model.summary()) return model
def load_pretrained_img_embedder(self): self.img_embedder = DenseNet201(weights=None) self.img_embedder.load_weights( "Models/densenet201_weights_tf_dim_ordering_tf_kernels.h5") self.img_embedder = Model(inputs=self.img_embedder.inputs, outputs=self.img_embedder.layers[-2].output)
vgg16 = NASNetMobile() # vgg16.summary() print("NASNetMobile",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = DenseNet121() # vgg16.summary() print("DenseNet121",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = DenseNet169() # vgg16.summary() print("DenseNet169",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = DenseNet201() # vgg16.summary() print("DenseNet201",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = MobileNetV2() # vgg16.summary() print("MobileNetV2",len(vgg16.trainable_weights)/2) print('----------------------------------------------------------------------------') vgg16 = MobileNet() # vgg16.summary() print("MobileNet",len(vgg16.trainable_weights)/2)
headModel = baseModel.output headModel = Dense(51, activation='softmax')(headModel) elif network == "MobileNetV2": print(network) train, test, lb2, labelsTest = preprocessing(network) baseModel = MobileNetV2(weights=pretraining, include_top=False, input_tensor=Input(shape=(224, 224, 3)), pooling="avg") headModel = baseModel.output headModel = Dense(51, activation='softmax', use_bias=True)(headModel) elif network == "DenseNet201": print(network) train, test, lb2, labelsTest = preprocessing_EfficcientNet() baseModel = DenseNet201(weights=pretraining, include_top=False, input_tensor=Input(shape=(224, 224, 3)), pooling="avg") headModel = baseModel.output headModel = Dense(51, activation='softmax')(headModel) elif network == "NASNetMobile": print(network) train, test, lb2, labelsTest = preprocessing(network) baseModel = NASNetMobile(weights=pretraining, include_top=False, input_tensor=Input(shape=(224, 224, 3)), pooling="avg") headModel = baseModel.output headModel = Dense(51, activation='softmax')(headModel) elif network == "EfficientNetB0": print(network) train, test, lb2, labelsTest = preprocessing_EfficcientNet()
print('Training data:') train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_WIDTH, IMG_HEIGHT), class_mode='binary') print('Testing data:') test_data_gen = test_image_generator.flow_from_directory(batch_size=batch_size, directory=test_dir, target_size=(IMG_WIDTH, IMG_HEIGHT), class_mode='binary') base_model = DenseNet201(input_shape=(IMG_WIDTH, IMG_HEIGHT, 3), include_top=False, weights='imagenet') base_model.trainable = False model = Sequential([ base_model, GlobalAveragePooling2D(), Dense(1, activation='sigmoid') ]) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[BinaryAccuracy(), Precision(), Recall()]) model.summary()
val_dataset = (tf.data.Dataset .from_tensor_slices((val_paths, val_labels)) .map(decode_image, num_parallel_calls=AUTO) .batch(batch_size) ) test_dataset = (tf.data.Dataset .from_tensor_slices(test_paths) .map(decode_image, num_parallel_calls=AUTO) .batch(batch_size) ) model = tf.keras.Sequential([ # Input shape 300x300 image with 3 bytes color DenseNet201(weights = 'imagenet', include_top = False, input_shape = (299, 299, 3)), GlobalAveragePooling2D(), Dense(1024, activation = 'relu'), BatchNormalization(), Dropout(0.5), Dense(512, activation = 'relu'), BatchNormalization(), Dropout(0.5), Dense(100, activation = 'softmax') ]) # create SGD optimizer optim = tf.keras.optimizers.SGD()
def build_fc_densenet(n_classes, h, w, n_layers=201, use_bottleneck=False, bottleneck_blocks=32): """ Build a Fully Convolutional Densenet model. Parameters: n_classes: Number of classes to predict h: Height of input images w: Width of input images n_layers: Numbers of Densenet's layers. Values in [121,169,201]. Densenet201 is used by default or if the value is not in the valid set. use_bottleneck: Whether or not use a bottleneck block as mentioned in the paper. bottleneck_blocks: Number of blocks to use if use_bottleneck parameter is True Return: A tf.keras Model instance """ if n_layers == 121: blocks = [6, 12, 24, 16] base_model = DenseNet121(input_shape=[h, w, 3], include_top=False) elif n_layers == 169: blocks = [6, 12, 32, 32] base_model = DenseNet169(input_shape=[h, w, 3], include_top=False) else: blocks = [6, 12, 48, 32] base_model = DenseNet201(input_shape=[h, w, 3], include_top=False) skips_n = 3 grown_factor = 32 #Encoder skip_names = [ str.format('conv{0}_block{1}_concat', i + 2, blocks[i]) for i in range(skips_n + 1) ] upsample_factors = [4, 2, 2, 2] skip_layers = [base_model.get_layer(name).output for name in skip_names] base = Model(inputs=base_model.inputs, outputs=skip_layers) inputs = Input(shape=[h, w, 3]) skips = base(inputs) x = skips[-1] #bottleneck if use_bottleneck: x = dense_block(x, bottleneck_blocks, name='bottleneck') #Upsample path for i in range(1, 4): print('upsampling', x, skips[-i - 1]) skip = skips[-i - 1] x = transition_up(skip, x) x = dense_block(x, blocks[-i], name='upsample' + str(i)) #4x upsampling x = Conv2DTranspose(64, 3, 4, padding='same', kernel_initializer='he_uniform')(x) x = score(x, n_classes) #ending model model = Model(inputs=inputs, outputs=x) return model
from tensorflow.keras.datasets import cifar10 from tensorflow.keras.utils import to_categorical #1. 데이터 (x_train, y_train), (x_test, y_test) = cifar10.load_data() y_train = to_categorical(y_train) y_test = to_categorical(y_test) x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 3).astype('float32') / 255. x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 3).astype('float32') / 255. #2. 모델 t = DenseNet201(weights='imagenet', include_top=False, input_shape=(x_train.shape[1], x_train.shape[2], 3)) t.trainable = False #학습시키지 않겠다 이미지넷 가져다가 그대로 쓰겠다 # model.trainable=True model = Sequential() model.add(t) model.add(Flatten()) model.add(Dense(256)) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Activation('relu')) model.add(Dense(256)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy',
mirrored_strategy = tf.distribute.MirroredStrategy( devices=["/gpu:0", "/gpu:1"]) with mirrored_strategy.scope(): architectures = [("DenseNet121", DenseNet121(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("DenseNet169", DenseNet169(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("DenseNet201", DenseNet201(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("InceptionResNetV2", InceptionResNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("MobileNet", MobileNet(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("MobileNetV2", MobileNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')), ("ResNet101", ResNet101(input_shape=IMG_SHAPE,
def train_model(path, train_images=None, train_labels=None, test_images=None, test_labels=None, model_name=None, epochs=80, learning_rate=0.0001, input_shape=(224, 224, 3), classes=2, batch_size=16, classifier_activation='softmax', callbacks=None): ''' saves the model as .h5 file\n path = directory for saving the files train_images = a numpy array containing the image data for training\n train_labels = a numpy array containing the labels for training\n test_images = a numpy array containing the image data for test\n test_labels = a numpy array containing the labels for test\n model_name = a string, name of the model -> "vgg19", "resnet50_v2", "inception_resnet_v2", "densenet201", "inception_v3", "xception", "mobilenet_v2"\n epochs\n learning_rate\n ''' base_model = None if model_name == 'vgg19': base_model = VGG19(weights=None, include_top=False, input_shape=input_shape) if model_name == 'resnet50_v2': base_model = ResNet50V2(weights=None, include_top=False, input_shape=input_shape) if model_name == 'inception_resnet_v2': base_model = InceptionResNetV2(weights=None, include_top=False, input_shape=input_shape) if model_name == 'densenet201': base_model = DenseNet201(weights=None, include_top=False, input_shape=input_shape) if model_name == 'inception_v3': base_model = InceptionV3(weights=None, include_top=False, input_shape=input_shape) if model_name == 'xception': base_model = Xception(weights=None, include_top=False, input_shape=input_shape) if model_name == 'mobilenet_v2': base_model = MobileNetV2(weights=None, include_top=False, input_shape=input_shape) x = base_model.output x = tf.keras.layers.GlobalAveragePooling2D()(x) output = tf.keras.layers.Dense(classes, activation=classifier_activation)(x) model = tf.keras.Model(inputs=base_model.input, outputs=output) optimizer = Adam(learning_rate=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-07) model.compile( optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) results = model.fit(train_images, train_labels, epochs=epochs, validation_data=(test_images, test_labels), batch_size=batch_size, callbacks=callbacks) #losses = pd.DataFrame(model.history.history) #losses[['loss','val_loss']].plot() save_model = path + model_name + '.h5' model.save(save_model) return results
def construct_model(pretrainedNN): model = Sequential() if (pretrainedNN == 'VGG16'): model.add( VGG16(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'VGG19'): model.add( VGG19(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'ResNet101'): model.add( ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'ResNet152'): model.add( ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'ResNet50V2'): model.add( ResNet50V2(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'ResNet101V2'): model.add( ResNet101V2(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'ResNet152V2'): model.add( ResNet152V2(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'MobileNet'): model.add( MobileNet(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'MobileNetV2'): model.add( MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'DenseNet121'): model.add( DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'DenseNet169'): model.add( DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3))) elif (pretrainedNN == 'DenseNet201'): model.add( DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3))) else: model.add( ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3))) model.add(Flatten()) model.add(Dense(77, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) return model
model.add(layers.GlobalAveragePooling2D()) model.add(layers.Dropout(0.5)) model.add(layers.BatchNormalization()) model.add(layers.Dense(2, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer=Adam(lr=lr), metrics=['accuracy']) return model K.clear_session() gc.collect() resnet = DenseNet201(weights='imagenet') model = build_model(resnet, lr=1e-4) model.summary() # Learning Rate Reducer learn_control = ReduceLROnPlateau(monitor='val_acc', patience=5, verbose=1, factor=0.2, min_lr=1e-7) # Checkpoint filepath = "weights.best.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,
def DenseNet_greyscale(blocks,input_shape,pooling,trainable): if blocks == 121: blocks = [6, 12, 24, 16] elif blocks == 169: blocks == [6, 12, 32, 32] elif blocks == 201: blocks == [6, 12, 48, 32] img_input = layers.Input(shape=input_shape) bn_axis = 3 x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input) x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x) x = layers.BatchNormalization( axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(x) x = layers.Activation('relu', name='conv1/relu')(x) x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x) x = layers.MaxPooling2D(3, strides=2, name='pool1')(x) x = dense_block(x, blocks[0], name='conv2') x = transition_block(x, 0.5, name='pool2') x = dense_block(x, blocks[1], name='conv3') x = transition_block(x, 0.5, name='pool3') x = dense_block(x, blocks[2], name='conv4') x = transition_block(x, 0.5, name='pool4') x = dense_block(x, blocks[3], name='conv5') x = layers.BatchNormalization( axis=bn_axis, epsilon=1.001e-5, name='bn')(x) x = layers.Activation('relu', name='relu')(x) if pooling == 'avg': x = layers.GlobalAveragePooling2D(name='avg_pool')(x) elif pooling == 'max': x = layers.GlobalMaxPooling2D(name='max_pool')(x) # Create model. if blocks == [6, 12, 24, 16]: model = models.Model(img_input, x, name='densenet121') elif blocks == [6, 12, 32, 32]: model = models.Model(img_input, x, name='densenet169') elif blocks == [6, 12, 48, 32]: model = models.Model(img_input, x, name='densenet201') # Load weights if blocks == [6, 12, 24, 16]: pretrained_model = DenseNet121(include_top=False,pooling=pooling) elif blocks == [6, 12, 32, 32]: pretrained_model = DenseNet169(include_top=False,pooling=pooling) elif blocks == [6, 12, 48, 32]: pretrained_model = DenseNet201(include_top=False,pooling=pooling) w = pretrained_model.layers[2].get_weights()[0].sum(2,keepdims=True) model.layers[2].set_weights([w]) model.layers[2].trainable = trainable model.trainable = trainable for l1,l2 in zip(model.layers[3:],pretrained_model.layers[3:]): l1.set_weights(l2.get_weights()) l1.trainable = trainable return model #test = DenseNet_greyscale(121,(224,224,1),'max',False)