def model_DenseNet169_multich(size, upd_ch): from keras.models import Model, load_model from keras.layers import Dense, Input from keras.applications import DenseNet169 ref_ch = 3 required_layer_name = 'conv1/conv' weights_cache_path = CACHE_PATH + 'model_DenseNet169_{}ch_imagenet_{}.h5'.format( upd_ch, size) if not os.path.isfile(weights_cache_path): model_ref = DenseNet169( include_top=False, weights='imagenet', input_shape=(size, size, ref_ch), pooling='avg', ) model_upd = DenseNet169( include_top=False, weights=None, input_shape=(size, size, upd_ch), pooling='avg', ) for i, layer in enumerate(model_ref.layers): print('Update weights layer [{}]: {}'.format(i, layer.name)) if layer.name == required_layer_name: print('Recalc weights!') config = layer.get_config() use_bias = config['use_bias'] if use_bias: w, b = layer.get_weights() else: w = layer.get_weights()[0] print('Use bias?: {}'.format(use_bias)) print('Shape ref: {}'.format(w.shape)) shape_upd = (w.shape[0], w.shape[1], upd_ch, w.shape[3]) print('Shape upd: {}'.format(shape_upd)) w_new = np.zeros(shape_upd, dtype=np.float32) for j in range(upd_ch): w_new[:, :, j, :] = ref_ch * w[:, :, j % ref_ch, :] / upd_ch if use_bias: model_upd.layers[i].set_weights((w_new, b)) else: model_upd.layers[i].set_weights((w_new, )) continue else: model_upd.layers[i].set_weights(layer.get_weights()) model_upd.save(weights_cache_path) else: model_upd = load_model(weights_cache_path) x = model_upd.layers[-1].output x = Dense(NUM_CLASSES, activation='softmax', name='prediction')(x) model = Model(inputs=model_upd.inputs, outputs=x) # print(model.summary()) return model
def densenet_fpn(input_shape, channels=1, activation="sigmoid"): densenet = DenseNet169(input_shape=input_shape, include_top=False) conv1 = densenet.get_layer("conv1/relu").output conv2 = densenet.get_layer("pool2_relu").output conv3 = densenet.get_layer("pool3_relu").output conv4 = densenet.get_layer("pool4_relu").output conv5 = densenet.get_layer("bn").output conv5 = Activation("relu", name="conv5_relu")(conv5) P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4, conv5) x = concatenate([ prediction_fpn_block(P5, "P5", (8, 8)), prediction_fpn_block(P4, "P4", (4, 4)), prediction_fpn_block(P3, "P3", (2, 2)), prediction_fpn_block(P2, "P2"), ]) x = conv_bn_relu(x, 256, 3, (1, 1), name="aggregation") x = decoder_block_no_bn(x, 128, conv1, 'up4') x = UpSampling2D()(x) x = conv_relu(x, 64, 3, (1, 1), name="up5_conv1") x = conv_relu(x, 64, 3, (1, 1), name="up5_conv2") if activation == 'softmax': name = 'mask_softmax' x = Conv2D(channels, (1, 1), activation=activation, name=name)(x) else: x = Conv2D(channels, (1, 1), activation=activation, name="mask")(x) model = Model(densenet.input, x) return model
def UDenseNet169(input_shape=(None, None, 3), classes=1, decoder_filters=16, decoder_block_type='upsampling', encoder_weights=None, input_tensor=None, activation='sigmoid', **kwargs): backbone = DenseNet169(input_shape=input_shape, input_tensor=input_tensor, weights=encoder_weights, include_top=False) skip_connections = list(reversed([4, 51, 139, 367])) model = build_unet(backbone, classes, decoder_filters, skip_connections, block_type=decoder_block_type, activation=activation, **kwargs) model.name = 'u-densenet169' return model
def load_densenet169(width, height, classes_num): with tf.device('/cpu:0'): model = DenseNet169(weights=None, input_shape=(width, height, 3), classes=classes_num) return model
def get_model(summary=False, img_width=150, fc_layers=[4096, 4096], fc_dropout_layers=[0.5, 0.5]): # Get back the convolutional part of a VGG network trained on ImageNet base_model = DenseNet169(input_tensor=Input(shape=(img_width, img_width, 3)), include_top=False) x = GlobalAveragePooling2D(name='avg_pool')(base_model.output) x = Dense(10, activation='softmax', kernel_regularizer=regularizers.l2(0.01))(x) my_model = Model(input=base_model.input, output=x) layers_to_freeze = 369 for i in range(layers_to_freeze): my_model.layers[i].trainable = False if summary: print("---------------------------------------------------------") for i, layer in enumerate(my_model.layers): print(i, layer.name) print("---------------------------------------------------------") print("---------------------------------------------------------") print("---------------------------------------------------------") my_model.summary() return my_model, layers_to_freeze, 2
def build(self) -> Model: model = DenseNet169(include_top=True, weights=None, input_shape=(self.width, self.height, self.channels), classes=2) return model
def generate_model(stage): K.clear_session() # The denseNet, include_top automaticly eliminates the last layers base_model = DenseNet169(include_top=False, input_shape=(320, 320, 3), weights='imagenet') x = base_model.output # The last layer is replace by a pooling and a dense x = GlobalAveragePooling2D()(x) x = Dense(1, activation='sigmoid', name='predictions')(x) # Model using the functional API model = Model(inputs=base_model.inputs, outputs=x) sgd = optimizers.SGD(lr=1e-4) for layer in base_model.layers: layer.trainable = False model.compile(optimizer=sgd, metrics=['binary_accuracy'], loss='binary_crossentropy') # If in stage 2 or more, load the weights from the input model if stage >= 2: if args.model_path: model.load_weights(args.model_path, by_name=True) print("Loaded from: ", args.model_path) # Set layers to be trainable. if stage == 2: set_trainable = False for layer in base_model.layers: # if "block12" in layer.name: # what block do we want to start unfreezing set_trainable = True if set_trainable: layer.trainable = True else: layer.trainable = False # Recompile with the loaded weights. model.compile(optimizer=sgd, metrics=['binary_accuracy'], loss='binary_crossentropy') if args.print_summary == True: model.summary() return model
def generate_model(stage): K.clear_session() base_model = DenseNet169(include_top=False, input_shape=(320, 320, 3), weights='imagenet') x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(1, activation='sigmoid', name='predictions')(x) model = Model(inputs=base_model.inputs, outputs=x) adam = optimizers.Adam(lr=1e-4) sgd = optimizers.SGD(lr=1e-4) for layer in base_model.layers: layer.trainable = False model.compile(optimizer=sgd, metrics=['binary_accuracy'], loss='binary_crossentropy') if stage >= 2: if args.model_path: model.load_weights(args.model_path, by_name=True) print("Loaded from: ", args.model_path) if stage == 2: set_trainable = False for layer in base_model.layers: # if "block12" in layer.name: # what block do we want to start unfreezing set_trainable = True if set_trainable: layer.trainable = True else: layer.trainable = False model.compile(optimizer=sgd, metrics=['binary_accuracy'], loss='binary_crossentropy') if args.print_summary == True: model.summary() return model
def pretrained_model(self): # base_model = ResNet50(include_top=False,input_shape=self.input_shape,weights='imagenet') base_model = DenseNet169(include_top=False, input_shape=self.input_shape, weights='imagenet') # base_model = Xception(include_top=False,input_shape=self.input_shape,weights='imagenet') # base_model = InceptionResNetV2(include_top=False,input_shape=self.input_shape,weights='imagenet') x = base_model.output x = Flatten()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) predictions = Dense(self.num_classes, activation='softmax')(x) model = Model(base_model.inputs, predictions) return model
def build_model(input_shape, output_num, feature_extractor='vgg16'): if feature_extractor == 'vgg16': arch = VGG16(include_top=False, weights='imagenet', input_shape=input_shape) elif feature_extractor == 'vgg19': arch = VGG19(include_top=False, weights='imagenet', input_shape=input_shape) elif feature_extractor == 'dense121': arch = DenseNet121(include_top=False, weights='imagenet', input_shape=input_shape) elif feature_extractor == 'dense169': arch = DenseNet169(include_top=False, weights='imagenet', input_shape=input_shape) elif feature_extractor == 'dense201': arch = DenseNet201(include_top=False, weights='imagenet', input_shape=input_shape) return create_attention_branch_net(arch, output_num)
def load_DenseNet169_model(): # # DenseNet169 Model 224x224 settings.SITE_MODEL = DenseNet169(weights="imagenet") settings.SITE_GRAPH = tf.get_default_graph()
from keras.preprocessing import image from keras.applications.mobilenet import preprocess_input,decode_predictions if themodel=='DenseNet121': from keras.applications import DenseNet121 if IMG_SIZE !=224: model = DenseNet121(weights='imagenet',include_top=False,input_shape=(224, 224,3)) new_model = change_model(model,new_input_shape=(None, IMG_SIZE, IMG_SIZE, 3)) else: new_model = DenseNet121(weights='imagenet',include_top=False,input_shape=(224, 224,3)) elif themodel=='DenseNet169': from keras.applications import DenseNet169 if IMG_SIZE !=224: model = DenseNet169(weights='imagenet',include_top=False,input_shape=(224, 224,3)) new_model = change_model(model,new_input_shape=(None, IMG_SIZE, IMG_SIZE, 3)) else: new_model = DenseNet169(weights='imagenet',include_top=False,input_shape=(224, 224,3)) elif themodel=='InceptionV3': from keras.applications import InceptionV3 if IMG_SIZE !=224: model = InceptionV3(weights='imagenet',include_top=False,input_shape=(224, 224,3)) new_model = change_model(model,new_input_shape=(None, IMG_SIZE, IMG_SIZE, 3)) else: new_model = InceptionV3(weights='imagenet',include_top=False,input_shape=(224, 224,3)) elif themodel=='InceptionResNetV2': from keras.applications import InceptionResNetV2 if IMG_SIZE !=224: model = InceptionResNetV2(weights='imagenet',include_top=False,input_shape=(224, 224,3)) new_model = change_model(model,new_input_shape=(None, IMG_SIZE, IMG_SIZE, 3))
return precision def f1_m(y_true, y_pred): precision = precision_m(y_true, y_pred) recall = recall_m(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon())) # Teacher model filepath = "teacher.h5" checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min') input_shape = (224, 224, 3) # Input shape of each image ssl._create_default_https_context = ssl._create_unverified_context base_model = DenseNet169(weights="imagenet",include_top=False, input_shape = (224, 224, 3)) #imports the mobilenet model and discards the last 1000 neuron layer. x = AveragePooling2D(pool_size=(3,3), name='avg_pool')(base_model.output) x = Flatten()(x) x = Dense(128, activation='relu', name='fc2')(x) x = BatchNormalization()(x) x = Dropout(0.5, name='dropout_fc2')(x) preds = Dense(14, activation="softmax", name="preds")(x) teacher=Model(inputs=base_model.input,outputs=preds) teacher.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc',f1_m,precision_m, recall_m]) print(teacher.summary())
# vgg16 = VGG16() # (None, 224, 224, 3) # model = VGG19() model = Xception() model = ResNet101() model = ResNet101V2() model = ResNet152() model = ResNet152V2() model = ResNet50() model = ResNet50V2() model = InceptionV3() model = InceptionResNetV2() model = MobileNet() model = MobileNetV2() model = DenseNet121() model = DenseNet169() model = DenseNet201() model = NASNetLarge() model = NASNetMobile() # vgg16.summary() ''' model= Sequential() # model.add(vgg16) # model.add(Flatten()) model.add(Dense(256)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(10, activation='softmax')) model.summary()
target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE), batch_size=12, class_mode='categorical', shuffle=True) callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1), ModelCheckpoint(WEIGHTS_PATH, monitor='val_loss', save_best_only=True, verbose=1), ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)] # TensorBoard(log_dir='./log/xception_v3', write_images=True)] input_tensor = Input(shape=(config.IMAGE_SIZE, config.IMAGE_SIZE, 3)) base_model = DenseNet169(input_shape=(config.IMAGE_SIZE, config.IMAGE_SIZE, 3), input_tensor=input_tensor, include_top=False, weights='imagenet', pooling='avg') # Only train Dense layers # for layer in base_model.layers: # layer.trainable = False x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(256, activation='relu')(x) x = Dropout(0.5)(x) predictions = Dense(12, activation='softmax')(x) # model = Model(inputs=base_model.input, outputs=predictions) model = Model(inputs=input_tensor, outputs=predictions) for layer in model.layers:
def convolutional(instruction=None, read_mode=None, preprocess=True, data_path=None, verbose=0, new_folders=True, image_column=None, training_ratio=0.8, fine_tune=False, augmentation=True, custom_arch=None, pretrained=None, epochs=10, height=None, width=None, save_as_tfjs=None, save_as_tflite=None, generate_plots=True): ''' Body of the convolutional function used that is called in the neural network query if the data is presented in images. :param many parameters: used to preprocess, tune, plot generation, and parameterizing the convolutional neural network trained. :return dictionary that holds all the information for the finished model. ''' # data_path = get_folder_dir() logger("Generating datasets for classes") LR = 0.001 plots = {} if pretrained: if not height: height = 224 if not width: width = 224 if height != 224 or width != 224: raise ValueError( "For pretrained models, both 'height' and 'width' must be 224." ) if preprocess: if custom_arch: raise ValueError( "If 'custom_arch' is not None, 'preprocess' must be set to false." ) read_mode_info = set_distinguisher(data_path, read_mode) read_mode = read_mode_info["read_mode"] training_path = "/proc_training_set" testing_path = "/proc_testing_set" if read_mode == "setwise": processInfo = setwise_preprocessing(data_path, new_folders, height, width) if not new_folders: training_path = "/training_set" testing_path = "/testing_set" # if image dataset in form of csv elif read_mode == "csvwise": if training_ratio <= 0 or training_ratio >= 1: raise BaseException(f"Test ratio must be between 0 and 1.") processInfo = csv_preprocessing(read_mode_info["csv_path"], data_path, instruction, image_column, training_ratio, height, width) # if image dataset in form of one folder containing class folders elif read_mode == "classwise": if training_ratio <= 0 or training_ratio >= 1: raise BaseException(f"Test ratio must be between 0 and 1.") processInfo = classwise_preprocessing(data_path, training_ratio, height, width) else: training_path = "/training_set" testing_path = "/testing_set" processInfo = already_processed(data_path) num_channels = 3 color_mode = 'rgb' if processInfo["gray_scale"]: num_channels = 1 color_mode = 'grayscale' input_shape = (processInfo["height"], processInfo["width"], num_channels) input_single = (processInfo["height"], processInfo["width"]) num_classes = processInfo["num_categories"] loss_func = "" output_layer_activation = "" if num_classes > 2: loss_func = "categorical_crossentropy" output_layer_activation = "softmax" elif num_classes == 2: num_classes = 1 loss_func = "binary_crossentropy" output_layer_activation = "sigmoid" logger("Creating convolutional neural network dynamically") # Convolutional Neural Network # Build model based on custom_arch configuration if given if custom_arch: with open(custom_arch, "r") as f: custom_arch_dict = json.load(f) custom_arch_json_string = json.dumps(custom_arch_dict) model = model_from_json(custom_arch_json_string) # Build an existing state-of-the-art model elif pretrained: arch_lower = pretrained.get('arch').lower() # If user specifies value of pretrained['weights'] as 'imagenet', weights pretrained on ImageNet will be used if 'weights' in pretrained and pretrained.get('weights') == 'imagenet': # Load ImageNet pretrained weights if arch_lower == "vggnet16": base_model = VGG16(include_top=False, weights='imagenet', input_shape=input_shape) x = Flatten()(base_model.output) x = Dense(4096)(x) x = Dropout(0.5)(x) x = Dense(4096)(x) x = Dropout(0.5)(x) pred = Dense(num_classes, activation=output_layer_activation)(x) model = Model(base_model.input, pred) elif arch_lower == "vggnet19": base_model = VGG19(include_top=False, weights='imagenet', input_shape=input_shape) x = Flatten()(base_model.output) x = Dense(4096)(x) x = Dropout(0.5)(x) x = Dense(4096)(x) x = Dropout(0.5)(x) pred = Dense(num_classes, activation=output_layer_activation)(x) model = Model(base_model.input, pred) elif arch_lower == "resnet50": base_model = ResNet50(include_top=False, weights='imagenet', input_shape=input_shape) x = Flatten()(base_model.output) x = GlobalAveragePooling2D()(base_model.output) x = Dropout(0.5)(x) pred = Dense(num_classes, activation=output_layer_activation)(x) model = Model(base_model.input, pred) elif arch_lower == "resnet101": base_model = ResNet101(include_top=False, weights='imagenet', input_shape=input_shape) x = GlobalAveragePooling2D()(base_model.output) x = Dropout(0.5)(x) pred = Dense(num_classes, activation=output_layer_activation)(x) model = Model(base_model.input, pred) elif arch_lower == "resnet152": base_model = ResNet152(include_top=False, weights='imagenet', input_shape=input_shape) x = GlobalAveragePooling2D()(base_model.output) x = Dropout(0.5)(x) pred = Dense(num_classes, activation=output_layer_activation)(x) model = Model(base_model.input, pred) elif arch_lower == "mobilenet": base_model = MobileNet(include_top=False, weights='imagenet', input_shape=input_shape) x = fine_tuned_model(base_model) pred = Dense(num_classes, activation=output_layer_activation)(x) model = Model(base_model.input, pred) elif arch_lower == "mobilenetv2": base_model = MobileNetV2(include_top=False, weights='imagenet', input_shape=input_shape) x = fine_tuned_model(base_model) pred = Dense(num_classes, activation=output_layer_activation)(x) model = Model(base_model.input, pred) elif arch_lower == "densenet121": base_model = DenseNet121(include_top=False, weights='imagenet', input_shape=input_shape) x = fine_tuned_model(base_model) pred = Dense(num_classes, activation=output_layer_activation)(x) model = Model(base_model.input, pred) elif arch_lower == "densenet169": base_model = DenseNet169(include_top=False, weights='imagenet', input_shape=input_shape) x = fine_tuned_model(base_model) pred = Dense(num_classes, activation=output_layer_activation)(x) model = Model(base_model.input, pred) elif arch_lower == "densenet201": base_model = DenseNet201(include_top=False, weights='imagenet', input_shape=input_shape) x = fine_tuned_model(base_model) pred = Dense(num_classes, activation=output_layer_activation)(x) model = Model(base_model.input, pred) else: raise ModuleNotFoundError("arch \'" + pretrained.get('arch') + "\' not supported.") else: # Randomly initialized weights if arch_lower == "vggnet16": model = VGG16(include_top=True, weights=None, classes=num_classes, classifier_activation=output_layer_activation) elif arch_lower == "vggnet19": model = VGG19(include_top=True, weights=None, classes=num_classes, classifier_activation=output_layer_activation) elif arch_lower == "resnet50": model = ResNet50(include_top=True, weights=None, classes=num_classes) elif arch_lower == "resnet101": model = ResNet101(include_top=True, weights=None, classes=num_classes) elif arch_lower == "resnet152": model = ResNet152(include_top=True, weights=None, classes=num_classes) elif arch_lower == "mobilenet": model = MobileNet(include_top=True, weights=None, classes=num_classes) elif arch_lower == "mobilenetv2": model = MobileNetV2(include_top=True, weights=None, classes=num_classes) elif arch_lower == "densenet121": model = DenseNet121(include_top=True, weights=None, classes=num_classes) elif arch_lower == "densenet169": model = DenseNet169(include_top=True, weights=None, classes=num_classes) elif arch_lower == "densenet201": model = DenseNet201(include_top=True, weights=None, classes=num_classes) else: raise ModuleNotFoundError("arch \'" + pretrained.get('arch') + "\' not supported.") else: model = Sequential() # model.add( # Conv2D( # 64, # kernel_size=3, # activation="relu", # input_shape=input_shape)) # model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Conv2D(64, kernel_size=3, activation="relu")) # model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Flatten()) # model.add(Dense(num_classes, activation="softmax")) # model.compile( # optimizer="adam", # loss=loss_func, # metrics=['accuracy']) model.add( Conv2D(filters=64, kernel_size=5, activation="relu", input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(filters=64, kernel_size=3, activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=64, kernel_size=3, activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(units=256, activation="relu")) model.add(Dropout(0.25)) model.add(Dense(units=num_classes, activation="softmax")) if pretrained and 'weights' in pretrained and pretrained.get( 'weights') == 'imagenet': for layer in base_model.layers: layer.trainable = False opt = Adam(learning_rate=LR) model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy']) logger("Located image data") if augmentation: train_data = ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_data = ImageDataGenerator(rescale=1. / 255) logger('Dataset augmented through zoom, shear, flip, and rescale') else: train_data = ImageDataGenerator() test_data = ImageDataGenerator() logger("->", "Optimal image size identified: {}".format(input_shape)) X_train = train_data.flow_from_directory( data_path + training_path, target_size=input_single, color_mode=color_mode, batch_size=(16 if processInfo["train_size"] >= 16 else 1), class_mode=loss_func[:loss_func.find("_")]) X_test = test_data.flow_from_directory( data_path + testing_path, target_size=input_single, color_mode=color_mode, batch_size=(16 if processInfo["test_size"] >= 16 else 1), class_mode=loss_func[:loss_func.find("_")]) if epochs <= 0: raise BaseException("Number of epochs has to be greater than 0.") print("\n") logger('Training image model') # model.summary() history = model.fit_generator( X_train, steps_per_epoch=X_train.n // X_train.batch_size, validation_data=X_test, validation_steps=X_test.n // X_test.batch_size, epochs=epochs, verbose=verbose) if fine_tune: logger( '->', 'Training accuracy: {}'.format( history.history['accuracy'][len(history.history['accuracy']) - 1])) logger( '->', 'Validation accuracy: {}'.format(history.history['val_accuracy'][ len(history.history['val_accuracy']) - 1])) for layer in base_model.layers: layer.trainable = True opt = Adam(learning_rate=LR / 10) model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy']) print("\n\n") logger('Training fine tuned model') fine_tuning_epoch = epochs + 10 history_fine = model.fit_generator( X_train, steps_per_epoch=X_train.n // X_train.batch_size, validation_data=X_test, validation_steps=X_test.n // X_test.batch_size, epochs=fine_tuning_epoch, initial_epoch=history.epoch[-1], verbose=verbose) #frozen model acc and loss history acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] #fine tuned model acc and loss history acc += history_fine.history['accuracy'] val_acc += history_fine.history['val_accuracy'] loss += history_fine.history['loss'] val_loss += history_fine.history['val_loss'] if generate_plots: plots = generate_fine_tuned_classification_plots( acc, val_acc, loss, val_loss, epochs) models = [] losses = [] accuracies = [] model_data = [] model_data.append(model) models.append(history) losses.append( history.history["val_loss"][len(history.history["val_loss"]) - 1]) accuracies.append( history.history['val_accuracy'][len(history.history['val_accuracy']) - 1]) # final_model = model_data[accuracies.index(max(accuracies))] # final_hist = models[accuracies.index(max(accuracies))] if generate_plots and not fine_tune: plots = generate_classification_plots(models[len(models) - 1]) print("\n") logger( '->', 'Final training accuracy: {}'.format( history.history['accuracy'][len(history.history['accuracy']) - 1])) logger( '->', 'Final validation accuracy: {}'.format(history.history['val_accuracy'][ len(history.history['val_accuracy']) - 1])) # storing values the model dictionary number_of_examples = len(X_test.filenames) number_of_generator_calls = math.ceil(number_of_examples / (1.0 * X_test.batch_size)) test_labels = [] for i in range(0, int(number_of_generator_calls)): test_labels.extend(np.array(X_test[i][1])) predIdx = model.predict(X_test) if output_layer_activation == "sigmoid": real = [int(x) for x in test_labels] ans = [] for i in range(len(predIdx)): ans.append(int(round(predIdx[i][0]))) elif output_layer_activation == "softmax": real = [] for ans in test_labels: real.append(ans.argmax()) ans = [] for r in predIdx: ans.append(r.argmax()) else: print("NOT THE CASE") logger("Stored model under 'convolutional_NN' key") if save_as_tfjs: tfjs.converters.save_keras_model(model, "tfjsmodel") logger("Saved tfjs model under 'tfjsmodel' directory") if save_as_tflite: converter = tf.lite.TFLiteConverter.from_keras_model(model) tflite_model = converter.convert() open("model.tflite", "wb").write(tflite_model) logger("Saved tflite model as 'model.tflite' ") clearLog() K.clear_session() return { 'id': generate_id(), 'data_type': read_mode, 'data_path': data_path, 'data': { 'train': X_train, 'test': X_test }, 'shape': input_shape, 'res': { 'real': real, 'ans': ans }, 'model': model, 'plots': plots, 'losses': { 'training_loss': history.history['loss'], 'val_loss': history.history['val_loss'] }, 'accuracy': { 'training_accuracy': history.history['accuracy'], 'validation_accuracy': history.history['val_accuracy'] }, 'num_classes': (2 if num_classes == 1 else num_classes), 'data_sizes': { 'train_size': processInfo['train_size'], 'test_size': processInfo['test_size'] } }
def build_model(name='vgg16', filepath=None, training=False, continuing=True): model = None base = None shape = config.IMAGE_DIMENSIONS checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') if os.path.exists(filepath) and training and continuing: model = load_model(filepath) return model, checkpoint, shape name = name.lower() if name == 'vgg16': base = VGG16() elif name == 'vgg19': base = VGG19() elif name == 'xception': base = Xception() shape = config.IMAGE_DIMENSIONS_299 elif name == 'inceptionv3': base = InceptionV3() shape = config.IMAGE_DIMENSIONS_299 elif name == 'resnet50': base = ResNet50() elif name == 'mobilenetv2': base = MobileNetV2() elif name == 'densenet121': base = DenseNet121() elif name == 'densenet169': base = DenseNet169() elif name == 'densenet201': base = DenseNet201() elif name == 'inceptionresnetv2': base = InceptionResNetV2() shape = config.IMAGE_DIMENSIONS_299 elif name == 'nasnetmobile': base = NASNetMobile() elif name == 'control': input = Input(shape=config.IMAGE_SHAPE) base = Conv2D(input_shape=config.IMAGE_SHAPE, filters=16, kernel_size=3, activation='relu')(input) base = MaxPooling2D()(base) base = Flatten()(base) base = Model(inputs=input, output = base) if name != 'control': for layer in base.layers: layer.trainable = False x = Dense(1024, activation='relu')(base.output) x = BatchNormalization()(x) x = Dropout(0.7)(x) x = Dense(512, activation='relu')(x) x = BatchNormalization()(x) x = Dropout(0.5)(x) x = Dense(2, activation='softmax')(x) model = Model(inputs=base.input, outputs=x) if os.path.exists(filepath): model.load_weights(filepath) return model, checkpoint, shape
def eval(args=None): args = parser.parse_args() # load up our csv with validation factors data_dir = join(getcwd(), DATA_DIR) eval_csv = join(data_dir, EVAL_CSV) true_labels = [] ########################################### df = pd.read_csv(args.input_filename, names=['img', 'label'], header=None) samples = [tuple(x) for x in df.values] # for img, label in samples: # #assert ("negative" in img) is (label is 0) # enc = ImageString(img) # true_labels.append(enc._parse_normal_label()) # cat_dir = join(proc_val_dir, enc.normal) # if not os.path.exists(cat_dir): # os.makedirs(cat_dir) # shutil.copy2(enc.img_filename, join(cat_dir, enc.flat_file_name())) ########################################### eval_datagen = ImageDataGenerator( rescale=1. / 255 # , histogram_equalization=True ) eval_generator = eval_datagen.flow_from_directory(EVAL_DIR, class_mode='binary', shuffle=False, target_size=(IMG_HEIGHT, IMG_WIDTH), batch_size=BATCH_SIZE) n_samples = eval_generator.samples base_model = DenseNet169(input_shape=DIMS, weights='imagenet', include_top=False) #weights='imagenet' x = base_model.output x = GlobalAveragePooling2D(name='avg_pool')(x) # comment for RESNET # x = WildcatPool2d()(x) x = Dense(1, activation='sigmoid', name='predictions')(x) model = Model(inputs=base_model.input, outputs=x) model.load_weights(MODEL_TO_EVAL8) model.compile( optimizer=Adam(lr=1e-3), loss=binary_crossentropy # , loss=kappa_error , metrics=['binary_accuracy']) score, acc = model.evaluate_generator(eval_generator, n_samples / BATCH_SIZE) print(model.metrics_names) print('==> Metrics with eval') print("loss :{:0.4f} \t Accuracy:{:0.4f}".format(score, acc)) y_pred = model.predict_generator(eval_generator, n_samples / BATCH_SIZE) # print(y_pred) # df_filenames = pd.Series(np.array(eval_generator.filenames), name='filenames') # df_classes = pd.Series(np.array(y_pred), name='classes') # prediction_data = pd.concat([df_filenames, df_classes,]) # prediction_data.to_csv(args.output_path + "/prediction.csv") mura = Mura(eval_generator.filenames, y_true=eval_generator.classes, y_pred1=y_pred, y_pred2=y_pred, y_pred3=y_pred, y_pred4=y_pred, y_pred5=y_pred, output_path=args.output_path) print(mura.metrics_by_encounter())
def Dense_Net(trainable=None, net = "DenseNet121"): # Preprocessing the dataset into keras feedable format train_datagen = ImageDataGenerator( rotation_range = rotation, width_shift_range = width_shift, height_shift_range= height_shift, rescale= scale, shear_range= shear, zoom_range= zoom, horizontal_flip= horizontal, fill_mode=fill, validation_split=validation ) test_datagen = ImageDataGenerator( rescale= scale, ) train_generator = train_datagen.flow_from_directory( path, target_size=target, batch_size=batch, class_mode='categorical', subset='training', ) validation_generator = train_datagen.flow_from_directory( path, target_size=target, batch_size=batch, class_mode='categorical', subset='validation' ) models_list = ['DenseNet121','DenseNet169','DenseNet201'] # Loading the DenseNet Model if net == "DenseNet121": densenet = DenseNet121(include_top=False, weights='imagenet', input_shape=input_sh,pooling = pooling_model) if net == "DenseNet169": densenet = DenseNet169(include_top=False, weights='imagenet', input_shape=input_sh,pooling = pooling_model) if net == "DenseNet201": densenet = DenseNet201(include_top=False, weights='imagenet', input_shape=input_sh,pooling = pooling_model) if net not in models_list: raise ValueError('Please provide the raise model ') output = densenet.layers[-1].output if pooling_model is None: output = keras.layers.Flatten()(output) densenet = Model(densenet.input, output=output) print(densenet.summary()) print('\n\n\n') # If you chose not for fine tuning if trainable is None: model = Sequential() model.add(densenet) model.add(Dense(hidden, activation='relu', input_dim=input_sh)) model.add(Dropout(dropout_num)) model.add(Dense(hidden, activation='relu')) model.add(Dropout(dropout_num )) if classes == 1: model.add(Dense(classes, activation='sigmoid', name='Output')) else: model.add(Dense(classes, activation='softmax', name='Output')) for layer in densenet.layers: layer.trainable = False print("The model summary of Densenet -->\n\n\n") # In this the Densenet layers are not trainable for i, layer in enumerate(densenet.layers): print(i, layer.name, layer.trainable) model.compile(loss=loss_param, # Change according to data optimizer=optimizers.RMSprop(), metrics=['accuracy']) print("The summary of final Model \n\n\n") print(model.summary()) print('\n\n\n') fit_history = model.fit_generator( train_generator, steps_per_epoch=len(train_generator.filenames) // batch, epochs=epoch, shuffle=True, validation_data=validation_generator, validation_steps=len(train_generator.filenames) // batch, class_weight=n, callbacks=[ EarlyStopping(patience=patience_param, restore_best_weights=True), ReduceLROnPlateau(patience=patience_param) ]) os.chdir(output_path) model.save("model.h5") print(fit_history.history.keys()) plt.figure(1, figsize = (15,8)) plt.subplot(221) plt.plot(fit_history.history['accuracy']) plt.plot(fit_history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'valid']) plt.subplot(222) plt.plot(fit_history.history['loss']) plt.plot(fit_history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'valid']) plt.show() if trainable is not None: # Make last block of the conv_base trainable: for layer in densenet.layers[:trainable]: layer.trainable = False for layer in densenet.layers[trainable:]: layer.trainable = True print('Last block of the conv_base is now trainable') for i, layer in enumerate(densenet.layers): print(i, layer.name, layer.trainable) model = Sequential() model.add(densenet) model.add(Dense(hidden, activation='relu', input_dim=input_sh)) model.add(Dropout(dropout_num)) model.add(Dense(hidden, activation='relu')) model.add(Dropout(dropout_num )) model.add(Dense(hidden, activation='relu')) model.add(Dropout(dropout_num )) if classes == 1: model.add(Dense(classes, activation='sigmoid', name='Output')) else: model.add(Dense(classes, activation='softmax', name='Output')) for layer in densenet.layers: layer.trainable = False print("The model summary of Densenet -->\n\n\n") # In this the Densenet layers are not trainable model.compile(loss=loss_param, # Change according to data optimizer=optimizers.RMSprop(), metrics=['accuracy']) print("The summary of final Model \n\n\n") print(model.summary()) print('\n\n\n') fit_history = model.fit_generator( train_generator, steps_per_epoch=len(train_generator.filenames) // batch, epochs=epoch, shuffle=True, validation_data=validation_generator, validation_steps=len(train_generator.filenames) // batch, class_weight=n, callbacks=[ EarlyStopping(patience=patience_param, restore_best_weights=True), ReduceLROnPlateau(patience=patience_param) ]) os.chdir(output_path) model.save("model.h5") print(fit_history.history.keys()) plt.figure(1, figsize = (15,8)) plt.subplot(221) plt.plot(fit_history.history['accuracy']) plt.plot(fit_history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'valid']) plt.subplot(222) plt.plot(fit_history.history['loss']) plt.plot(fit_history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'valid']) plt.show()
def transform_dl_classification(dataframe, spark): classes = [ CLASSNAME_NORMAL, CLASSNAME_COVID19, CLASSNAME_LUNG_OPACITY, CLASSNAME_VIRAL_PNEUMONIA ] batch_size = 16 epochs = 50 udf_function_get_hdfs_origin = udf(hdfs_origin, StringType()) udf_function_classify = udf(classify, StringType()) # Preparing the distributed dataframe dataframe_keras = dataframe.withColumn("height", dataframe.image.height) \ .withColumn("width", dataframe.image.width) \ .withColumn("n_channels", dataframe.image.nChannels) \ .withColumn("class_name", udf_function_classify("label")) \ .withColumn("origin", udf_function_get_hdfs_origin("image")) dataframe_keras = dataframe_keras.filter( func.col("class_name") != CLASSNAME_INVALID) dataframe_keras = dataframe_keras.drop("image", "label") dataframe_keras.cache() dataframe_keras_master = dataframe_keras.toPandas() # Data generators # Based on distributed dataframe, batch_size and classes to predict [train_datagen, train_gen] = train_generator_from_dataframe(dataframe_keras_master, batch_size, classes) [test_datagen, test_gen] = test_generator_from_dataframe(dataframe_keras_master, batch_size, classes) # Constructing the neural net # Based on transfer learning dense169 = DenseNet169(input_shape=(299, 299, 3), include_top=False, weights='imagenet') dense169.trainable = False model = Sequential([ dense169, Flatten(), Dense(units=256, activation='relu'), Dense(units=256, activation='relu'), Dense(units=4, activation='softmax') ]) # Compiling the model and initiating training model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy', 'mse']) model.fit(train_gen, steps_per_epoch=len(train_gen) // batch_size, validation_steps=len(test_gen) // batch_size, validation_data=test_gen, epochs=epochs, callbacks=[model_callbacks()]) predictions_y = model.predict(test_gen) [conf_matrix, accuracy] = model_efficacy(predictions_y, test_gen, classes) model.save('./outputs/model/') return [ spark.createDataFrame(conf_matrix), spark.createDataFrame(accuracy) ]
0: 1., 1: 5., # weigh covid weights as 5x more than the others 2: 1., 3: 1. }, callbacks=[checkpoint] ) """#V7: DenseNet169 FineTune""" from keras.applications import DenseNet121,DenseNet169,DenseNet201 from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten,GlobalMaxPooling2D,BatchNormalization,InputLayer from keras.optimizers import Adam from keras import Sequential, Model densenet = DenseNet169(include_top=False,input_shape=(IMAGE_SHAPE,IMAGE_SHAPE,3)) x = densenet.layers[-1].output x = Flatten()(x) x = Dense(512,activation='relu')(x) x = Dropout(0.5)(x) x = Dense(256,activation='relu')(x) x = Dropout(0.5)(x) x = Dense(4,activation='softmax')(x) model = Model(inputs=densenet.inputs,output=x) model.compile(optimizer=Adam(lr=3e-5),loss='categorical_crossentropy',metrics=['accuracy'],weighted_metrics=['accuracy']) from keras.callbacks import ModelCheckpoint checkpoint = ModelCheckpoint("model_checkpoints/v8/densenet169_finetune_weights_{epoch:02d}-{val_accuracy:.2f}.hdf5",save_best_only=True)
def __init__(self, image_size=299, batch_size=64, num_classes=100, trainable=True, load_trained=False, max_trainable=False, pretrained_model='pretrained.h5', init_lr=0.001, n_chanels=3, optimizer='adam', init_epoch=0, max_epoch=100, net_type=0): try: os.mkdir("out_model") os.mkdir("logs") except: print("Created output directory !") self.image_size = image_size self.batch_size = batch_size self.init_lr = init_lr self.max_epoch = max_epoch self.init_epoch = init_epoch self.net_type = net_type self.model = None self.pre_process = None input_shape = (image_size, image_size, n_chanels) if net_type == 0: self.model = DenseNet121(input_shape=input_shape, include_top=False, weights='imagenet', pooling='max') self.pre_process = keras.applications.densenet.preprocess_input elif net_type == 1: self.model = DenseNet169(input_shape=input_shape, include_top=False, weights='imagenet', pooling='max') self.pre_process = keras.applications.densenet.preprocess_input elif net_type == 2: self.model = DenseNet201(input_shape=input_shape, include_top=False, weights='imagenet', pooling='max') self.pre_process = keras.applications.densenet.preprocess_input elif net_type == 3: self.model = ResNet50(input_shape=input_shape, include_top=False, weights='imagenet', pooling='max') self.pre_process = keras.applications.resnet50.preprocess_input elif net_type == 4: self.model = InceptionV3(input_shape=input_shape, include_top=False, weights='imagenet', pooling='max') self.pre_process = keras.applications.inception_v3.preprocess_input elif net_type == 5: self.model = InceptionResNetV2(input_shape=input_shape, include_top=False, weights='imagenet', pooling='max') self.pre_process = keras.applications.inception_resnet_v2.preprocess_input elif net_type == 6: self.model = NASNetLarge(input_shape=input_shape, include_top=False, weights='imagenet', pooling='max') self.pre_process = keras.applications.nasnet.preprocess_input elif net_type == 7: self.model = NASNetMobile(input_shape=input_shape, include_top=False, weights='imagenet', pooling='max') self.pre_process = keras.applications.nasnet.preprocess_input x = self.model.output # x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) # add a fully-connected layer self.predictions = Dense(num_classes, activation='softmax', name='out_put')(x) self.model = Model(inputs=self.model.input, outputs=self.predictions) if load_trained: self.model.load_weights(pretrained_model) print("Load pretrained model successfully!") if trainable == False: for layer in self.model.layers: layer.trainable = False print("Use model for inference is activated!") if trainable and not max_trainable: for layer in self.model.layers[:-5]: layer.trainable = False for layer in self.model.layers[-5:]: layer.trainable = True print("Train last layers is activated!") if max_trainable: for layer in self.model.layers: layer.trainable = True print("Train whole network is activated!") if (optimizer == 'adam'): opt = Adam(lr=init_lr, beta_1=0.9, beta_2=0.999, decay=1e-6) else: opt = SGD(lr=init_lr, decay=1e-6, momentum=0.9, nesterov=True) self.model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) self.earlyStopping = callbacks.EarlyStopping(monitor='val_acc', min_delta=0.001, patience=10, verbose=1) self.tensorBoard = callbacks.TensorBoard('./logs', batch_size=batch_size, write_grads=True, write_images=True) self.checkpoint = callbacks.ModelCheckpoint( './out_model/weights.' + type_models[self.net_type] + '.{epoch:02d}-{acc:.2f}-{val_acc:.2f}.hdf5', monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1) self.lrController = callbacks.ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=3, verbose=1, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0.00001) self.history_ = callbacks.History() self.callBackList = [ self.earlyStopping, self.tensorBoard, self.checkpoint, self.lrController, self.history_ ] # [self.train_loss, self.train_metrics] = 2*[None] self.history = None self.dataGenerator = None
eval_csv = join(data_dir, EVAL_CSV) df = pd.read_csv(eval_csv, names=['img', 'label'], header=None) eval_imgs = df.img.values.tolist() eval_labels = df.label.values.tolist() eval_datagen = ImageDataGenerator(rescale=1. / 255) eval_generator = eval_datagen.flow_from_directory(EVAL_DIR, class_mode='binary', shuffle=False, target_size=(IMG_HEIGHT, IMG_WIDTH), batch_size=BATCH_SIZE) n_samples = eval_generator.samples base_model = DenseNet169(input_shape=DIMS, weights='imagenet', include_top=False) x = base_model.output x = GlobalAveragePooling2D(name='avg_pool')(x) # comment for RESNET x = Dense(1, activation='sigmoid', name='predictions')(x) model = Model(inputs=base_model.input, outputs=x) model.load_weights(MODEL_TO_EVAL1) model.compile(optimizer=Adam(lr=1e-3), loss=binary_crossentropy, metrics=['binary_accuracy']) score, acc = model.evaluate_generator(eval_generator, n_samples / BATCH_SIZE) #print(model.metrics_names) print('==> Metrics with eval') print("loss :{:0.4f} \t Accuracy:{:0.4f}".format(score, acc)) y_pred1 = model.predict_generator(eval_generator, n_samples / BATCH_SIZE)
VGG16_top = VGG16(include_top=False, input_shape=(224, 224, 3)) VGG19_top = VGG19(include_top=False, input_shape=(224, 224, 3)) Res50_top = ResNet50(include_top=False, input_shape=(224, 224, 3)) Xception_top = Xception(include_top=False, input_shape=(299, 299, 3)) InceptionV3_top = InceptionV3(include_top=False, input_shape=(299, 299, 3)) InceptionResNetV2_top = InceptionResNetV2(include_top=False, input_shape=(299, 299, 3)) # 不太常用的预训练的模型, Keras 也提供预训练的权重的模型 from keras.applications import MobileNet from keras.applications import DenseNet121, DenseNet169, DenseNet201 from keras.applications import NASNetLarge, NASNetMobile Mobile_base = MobileNet(include_top=True, input_shape=(224, 224, 3)) Dense121_base = DenseNet121(include_top=True, input_shape=(224, 224, 3)) Dense169_base = DenseNet169(include_top=True, input_shape=(224, 224, 3)) Dense201_base = DenseNet201(include_top=True, input_shape=(224, 224, 3)) NASNetLarge_base = NASNetLarge(include_top=True, input_shape=(331, 331, 3)) NASNetMobile_base = NASNetMobile(include_top=True, input_shape=(224, 224, 3)) # ------------------------------------------------------------------------- # 无顶层权重的网络 Mobile_top = MobileNet(include_top=False, input_shape=(224, 224, 3)) Dense121_top = DenseNet121(include_top=False, input_shape=(224, 224, 3)) Dense169_top = DenseNet169(include_top=False, input_shape=(224, 224, 3)) Dense201_top = DenseNet201(include_top=False, input_shape=(224, 224, 3))
print(np.array(X).shape) print(np.array(Y).shape) print(Y[0]) """# Build model build model and get features """ x = np.array(X) y = np.array(Y) # initial pre trained model pre_trained_models = {} pre_trained_models["DenseNet169"] = DenseNet169(include_top=False, input_shape=(224, 224, 3), pooling="avg") print(np.array(x).shape) """# Split train and test data slpit and make flatten features """ X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.2) X_train = pre_trained_models["DenseNet169"].predict(X_train) X_test = pre_trained_models["DenseNet169"].predict(X_test) flatten_feature_train = []
class create_net: def net(self, init1, init2, case, height, channels, classes, width): #model=VGG16(include_top=False, weights='imagenet') name = case if self.channels == 3: init_weights = 'imagenet' i_t = True pool = None cl = 1000 else: init_weights = None i_t = True pool = None cl = 2000 if self.main_model == "vgg16": init_model = VGG16(include_top=i_t, weights=init_weights, input_shape=(height, width, self.channels)) model = self.fine_tuned(init_model) elif self.main_model == "vgg19": init_model = VGG19(include_top=i_t, weights=init_weights, input_shape=(height, width, self.channels)) model = self.fine_tuned(init_model) elif self.main_model == "resnet50": init_model = ResNet50( include_top=i_t, weights=init_weights, input_shape=(height, width, self.channels)) #weights initial imagenet model = self.fine_tuned(init_model) elif self.main_model == "resnet101": init_model = ResNet101( include_top=i_t, weights=init_weights, input_shape=(height, width, self.channels)) #weights initial imagenet model = self.fine_tuned(init_model) elif self.main_model == "resnet152": init_model = ResNet152( include_top=i_t, weights=init_weights, input_shape=(height, width, self.channels)) #weights initial imagenet model = self.fine_tuned(init_model) elif self.main_model == "densenet121": init_model = DenseNet121(include_top=i_t, weights=init_weights, input_shape=(height, width, self.channels), pooling=pool, classes=cl) #weights initial imagenet model = self.fine_tuned(init_model) elif self.main_model == "densenet169": init_model = DenseNet169(include_top=i_t, weights=init_weights, input_shape=(height, width, self.channels), pooling=pool, classes=cl) #weights initial imagenet model = self.fine_tuned(init_model) elif self.main_model == "densenet201": init_model = DenseNet201(include_top=i_t, weights=init_weights, input_shape=(height, width, self.channels), pooling=pool, classes=cl) #weights initial imagenet model = self.fine_tuned(init_model) elif self.main_model == "mobilenet": init_model = MobileNet( include_top=i_t, weights=init_weights, input_shape=(height, width, self.channels)) #weights initial imagenet model = self.fine_tuned(init_model) elif self.main_model == 'denseres171': init_weights = np.array([ str(self.store_model + "/weights_main_resnet50.h5"), str(self.store_model + "/weights_main_densenet121.h5") ]) model = self.denseres_171(i_t, init_weights, pool, cl, height, width) else: print('Error: no main model file') if self.load_weights_main: logging.info( "Load main weights from: {}".format(self.store_model + self.load_weights_main)) model.load_weights(self.store_model + self.load_weights_main) for p in (model).layers: print(p.name.title(), p.input_shape, p.output_shape) return model
def generate_model_base(preset, width, height, channel, weights_init): ''' モデルを作成する # Arguments preset: プリセットモデルの名前 width: 入力画像の幅 height: 入力画像の高さ channel: 入力画像のチャンネル数 class_num: 分類クラス数 weights_init: 初期値(None, imagenet) # Returns keras.models.Model オブジェクト ''' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # os.environ["CUDA_VISIBLE_DEVICES"] = "-1" from keras.layers import Dense, BatchNormalization, Dropout, Input, Conv2D # from keras.layers import GlobalAveragePooling2D from keras.models import Model input_tensor = Input(shape=(width, height, channel)) conv_base = None # output_layer = None prediction_layer = None if preset.upper() == "bench".upper(): conv_base = create_bench_model(input_tensor) prediction_layer = conv_base elif preset.upper() == "VGG16".upper(): from keras.applications import VGG16 conv_base = None if channel == 3: conv_base = VGG16(weights=weights_init, include_top=True, input_shape=(width, height, channel) ) else: conv_base = VGG16(weights=weights_init, include_top=True, input_shape=(width, height, 3) ) conv_base.layers.pop(0) conv_base.layers.pop(0) input_layer = Input(shape=(width, height, channel), name='multi_input') block1_conv1_new = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='glorot_uniform', name='block1_conv1_new') conv_base = insert_intermediate_layer_in_keras(conv_base, [0, 0], [input_layer, block1_conv1_new]) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.get_output_at(-1) # x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) x = Dropout(0.2, name='fc_dropout')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "VGG19".upper(): from keras.applications import VGG19 conv_base = None if channel == 3: conv_base = VGG19(weights=weights_init, include_top=True, input_shape=(width, height, channel) ) else: conv_base = VGG19(weights=weights_init, include_top=True, input_shape=(width, height, 3) ) conv_base.layers.pop(0) conv_base.layers.pop(0) input_layer = Input(shape=(width, height, channel), name='multi_input') block1_conv1_new = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='glorot_uniform', name='block1_conv1_new') conv_base = insert_intermediate_layer_in_keras(conv_base, [0, 0], [input_layer, block1_conv1_new]) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.get_output_at(-1) # x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) x = Dropout(0.2, name='fc_dropout')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "VGG16BN".upper(): from model import VGG16BN conv_base = None if channel == 3: conv_base = VGG16BN(weights=weights_init, include_top=False, pooling='avg', kernel_initializer='glorot_uniform', input_shape=(width, height, channel) ) else: conv_base = VGG16BN(weights=weights_init, include_top=False, pooling='avg', kernel_initializer='glorot_uniform', input_shape=(width, height, 3) ) conv_base.layers.pop(0) conv_base.layers.pop(0) input_layer = Input(shape=(width, height, channel), name='multi_input') block1_conv1_new = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='glorot_uniform', name='block1_conv1_new') conv_base = insert_intermediate_layer_in_keras(conv_base, [0, 0], [input_layer, block1_conv1_new]) # conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.get_output_at(-1) # x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) x = Dropout(0.2, name='fc_dropout')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "VGG19BN".upper(): from model import VGG19BN conv_base = None if channel == 3: conv_base = VGG19BN(weights=weights_init, include_top=False, pooling='avg', kernel_initializer='glorot_uniform', input_shape=(width, height, channel) ) else: conv_base = VGG19BN(weights=weights_init, include_top=False, pooling='avg', kernel_initializer='glorot_uniform', input_shape=(width, height, 3) ) conv_base.layers.pop(0) conv_base.layers.pop(0) input_layer = Input(shape=(width, height, channel), name='multi_input') block1_conv1_new = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='glorot_uniform', name='block1_conv1_new') conv_base = insert_intermediate_layer_in_keras(conv_base, [0, 0], [input_layer, block1_conv1_new]) # conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.get_output_at(-1) # x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) x = Dropout(0.2, name='fc_dropout')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "ResNet20".upper(): # from keras.applications import ResNet50 from model.resnet import ResNet20 conv_base = ResNet20(weights=weights_init, include_top=True, input_shape=(width, height, channel), input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = Dropout(0.5, name='fc_dropout')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "ResNet50".upper(): # from keras.applications import ResNet50 from model.resnet import ResNet50 conv_base = ResNet50(weights=weights_init, include_top=True, input_shape=(width, height, channel), input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = Dropout(0.5, name='fc_dropout')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "ResNet101".upper(): from model.resnet import ResNet101 conv_base = ResNet101(weights=weights_init, include_top=True, input_shape=(width, height, channel), input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) x = Dropout(0.5, name='fc_dropout')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "ResNet152".upper(): from model.resnet import ResNet152 conv_base = ResNet152(weights=weights_init, include_top=True, input_shape=(width, height, channel), input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) x = Dropout(0.5, name='fc_dropout')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "ResNet50V2".upper(): from model.resnet_v2 import ResNet50V2 conv_base = ResNet50V2(weights=weights_init, include_top=True, input_shape=(width, height, channel), input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) x = Dropout(0.5, name='fc_dropout')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "ResNet101V2".upper(): from model.resnet_v2 import ResNet101V2 conv_base = ResNet101V2(weights=weights_init, include_top=True, input_shape=(width, height, channel), input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) x = Dropout(0.5, name='fc_dropout')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "ResNet152V2".upper(): from model.resnet_v2 import ResNet152V2 conv_base = ResNet152V2(weights=weights_init, include_top=True, input_shape=(width, height, channel), input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) x = Dropout(0.5, name='fc_dropout')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "ResNeXt50".upper(): from model.resnext import ResNeXt50 conv_base = ResNeXt50(weights=weights_init, include_top=True, input_shape=(width, height, channel), input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) x = Dropout(0.5, name='fc_dropout')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "ResNeXt101".upper(): from model.resnext import ResNeXt101 conv_base = ResNeXt101(weights=weights_init, include_top=True, input_shape=(width, height, channel), input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) x = Dropout(0.5, name='fc_dropout')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "InceptionV3".upper(): from keras.applications import InceptionV3 conv_base = InceptionV3(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "InceptionResNetV2".upper(): from keras.applications import InceptionResNetV2 conv_base = InceptionResNetV2(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "DenseNet121".upper(): from keras.applications import DenseNet121 conv_base = DenseNet121(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "DenseNet169".upper(): from keras.applications import DenseNet169 conv_base = DenseNet169(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "DenseNet201".upper(): from keras.applications import DenseNet201 conv_base = DenseNet201(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "Xception".upper(): from keras.applications import Xception conv_base = Xception(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEDenseNetImageNet121".upper(): from model import SEDenseNetImageNet121 conv_base = SEDenseNetImageNet121(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEDenseNetImageNet169".upper(): from model import SEDenseNetImageNet169 conv_base = SEDenseNetImageNet169(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEDenseNetImageNet201".upper(): from model import SEDenseNetImageNet201 conv_base = SEDenseNetImageNet201(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEDenseNetImageNet264".upper(): from model import SEDenseNetImageNet264 conv_base = SEDenseNetImageNet264(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEDenseNetImageNet161".upper(): from model import SEDenseNetImageNet161 conv_base = SEDenseNetImageNet161(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEInceptionResNetV2".upper(): from model import SEInceptionResNetV2 conv_base = SEInceptionResNetV2(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEInceptionV3".upper(): from model import SEInceptionV3 conv_base = SEInceptionV3(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEMobileNet".upper(): from model import SEMobileNet conv_base = SEMobileNet(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEResNet6".upper(): from model import SEResNet6 conv_base = SEResNet6(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEResNet8".upper(): from model import SEResNet8 conv_base = SEResNet8(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEResNet10".upper(): from model import SEResNet10 conv_base = SEResNet10(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEResNet18".upper(): from model import SEResNet18 conv_base = SEResNet18(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEResNet34".upper(): from model import SEResNet34 conv_base = SEResNet34(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEResNet50".upper(): from model import SEResNet50 conv_base = SEResNet50(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEResNet101".upper(): from model import SEResNet101 conv_base = SEResNet101(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEResNet154".upper(): from model import SEResNet154 conv_base = SEResNet154(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) elif preset.upper() == "SEResNext".upper(): from model import SEResNext conv_base = SEResNext(weights=weights_init, include_top=True, input_tensor=input_tensor ) conv_base.layers.pop() output_layer = conv_base.layers[-1] x = output_layer.output x = BatchNormalization(name='fc_bachnorm')(x) # x = GlobalAveragePooling2D(name='avg_pool')(x) prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) else: raise ValueError('unknown model name : {}'.format(preset)) # x = output_layer.output # # x = Flatten()(x) # # x = Dense(512, activation='relu', kernel_initializer='glorot_uniform')(x) # # # x = Dropout(0.7)(x) # # x = BatchNormalization(name='fc_bachnorm')(x) # prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x) model = Model(inputs=conv_base.input, outputs=prediction_layer, name='classification_model') # #weights_filepath = 'work/test/vgg19_weights_tf_dim_ordering_tf_kernels.h5' # weights_filepath = 'work/test/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5' # model.load_weights(weights_filepath, by_name=True, skip_mismatch=True) return model
def build_network(self, model_name, fine_tune): if model_name.lower() == 'vgg16': if fine_tune: base_model = VGG16(include_top=False, input_shape=(None, None, 3), pooling='avg') for layer in base_model.layers: if layer.name.startswith('block5'): layer.trainable = True else: layer.trainable = False x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) model.summary() return model else: base_model = VGG16(include_top=False, input_shape=(None, None, 3), pooling='avg') for layer in base_model.layers: layer.trainable = True x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) model.summary() return model elif model_name.lower() == 'resnet50': base_model = ResNet50(include_top=False, input_shape=(None, None, 3), pooling='avg', backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils) x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) if fine_tune: for layer in base_model.layers: layer.trainable = False model.summary() return model # elif model_name.lower()=='resnet34': # base_model=ResNet34(include_top=False, input_shape=self.shape, pooling='avg') # x = base_model.output # x = Dense(1024, activation='relu')(x) # x = Dropout(0.5)(x) # x = Dense(1024, activation='relu')(x) # predictions = Dense(1, activation='sigmoid')(x) # model = Model(base_model.input, predictions) # if fine_tune: # for layer in base_model.layers: # layer.trainable = False # model.summary() # return model elif model_name.lower() == 'resnet101': base_model = ResNet101(include_top=False, input_shape=(None, None, 3), pooling='avg', backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils) x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) if fine_tune: for layer in base_model.layers: layer.trainable = False model.summary() return model elif model_name.lower() == 'resnet152': base_model = ResNet152(include_top=False, input_shape=(None, None, 3), pooling='avg', backend=keras.backend, layers=keras.layers, models=keras.models, utils=keras.utils) x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) if fine_tune: for layer in base_model.layers: layer.trainable = False model.summary() return model elif model_name.lower() == 'inceptionresnetv2': base_model = InceptionResNetV2(include_top=False, input_shape=(None, None, 3), pooling='avg') x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) if fine_tune: for layer in base_model.layers: layer.trainable = False model.summary() return model elif model_name.lower() == 'xception': base_model = Xception(include_top=False, input_shape=(None, None, 3), pooling='avg') x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) if fine_tune: for layer in base_model.layers: layer.trainable = False model.summary() return model elif model_name.lower() == 'densenet121': base_model = DenseNet121(include_top=False, input_shape=(None, None, 3), pooling='avg') x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) if fine_tune: for layer in base_model.layers: layer.trainable = False model.summary() return model elif model_name.lower() == 'densenet169': base_model = DenseNet169(include_top=False, input_shape=(None, None, 3), pooling='avg') x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) if fine_tune: for layer in base_model.layers: layer.trainable = False model.summary() return model elif model_name.lower() == 'densenet201': base_model = DenseNet201(include_top=False, input_shape=(None, None, 3), pooling='avg') x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) if fine_tune: for layer in base_model.layers: layer.trainable = False model.summary() return model elif model_name.lower() == 'nasnetlarge': base_model = NASNetLarge(include_top=False, input_shape=(None, None, 3), pooling='avg') x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) if fine_tune: for layer in base_model.layers: layer.trainable = False model.summary() return model elif model_name.lower() == 'vgg19': base_model = VGG19(include_top=False, input_shape=(None, None, 3), pooling='avg') x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) if fine_tune: for layer in base_model.layers: layer.trainable = False model.summary() return model else: base_model = NASNetMobile(include_top=False, input_shape=(None, None, 3), pooling='avg') x = base_model.output x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(base_model.input, predictions) if fine_tune: for layer in base_model.layers: layer.trainable = False model.summary() return model