def create_backbone_model(input_shape): # Very good efficiency/cost ratio for this model # I should test it without avg pooling, but time is short and results are # goot enough return EfficientNetB0( include_top=False, weights='imagenet', pooling='avg', input_shape=input_shape)
def define_classifier_architecture(architecture_name, image_size, weights, classifier_kwargs=None): if architecture_name == 'MobileNet': model = MobileNetV2(input_shape=image_size, include_top=False, weights=weights, **classifier_kwargs) elif architecture_name == 'VGG16': model = VGG16(input_shape=image_size, include_top=False, weights=weights, **classifier_kwargs) elif architecture_name == 'VGG19': model = VGG19(input_shape=image_size, include_top=False, weights=weights, **classifier_kwargs) elif architecture_name == 'NASNetMobile': model = NASNetMobile(input_shape=image_size, include_top=False, weights=weights, **classifier_kwargs) elif architecture_name == 'NASNetLarge': model = NASNetLarge(input_shape=image_size, include_top=False, weights=weights, **classifier_kwargs) elif architecture_name == 'InceptionV3': model = InceptionV3(input_shape=image_size, include_top=False, weights=weights, **classifier_kwargs) elif architecture_name == 'InceptionResNetV2': model = InceptionResNetV2(input_shape=image_size, include_top=False, weights=weights, **classifier_kwargs) elif architecture_name == 'Resnet50': model = ResNet50(input_shape=image_size, include_top=False, weights=weights, **classifier_kwargs) elif architecture_name == 'EfficientNetB0': model = EfficientNetB0(input_shape=image_size, include_top=False, weights=weights, **classifier_kwargs) else: raise ValueError( f"Classifier '{architecture_name}' is wrong or not implemented.") return model
def __init__(self, image_shape, name, trainable=False): super(BaseModel, self).__init__(name=name) if name == "EfficientNetB0": self.base_model = EfficientNetB0(include_top=False, input_shape=image_shape) self.preprocess_input = preprocess_input_efficientnetB0 elif name == "InceptionV3": self.base_model = InceptionV3(include_top=False, input_shape=image_shape) self.preprocess_input = preprocess_input_inception_v3 elif name == "VGG16": self.base_model = VGG16(include_top=False, input_shape=image_shape) self.preprocess_input = preprocess_input_vgg16 elif name == "VGG19": self.base_model = VGG19(include_top=False, input_shape=image_shape) self.preprocess_input = preprocess_input_vgg19 self.base_model.trainable = trainable
def create_efficientnetb0(height, width, pretrained: bool, mode: XModelMode = XModelMode.SIMPLE): shape = (height, width, 3) tf.keras.layers.BatchNormalization = BatchNormalization base_model = EfficientNetB0(input_shape=shape, include_top=False, weights="imagenet" if pretrained else None) inputs = tf.keras.Input(shape=shape, name="input") base_model, features = upsample( base_model, inputs, ["block2b_activation", "block3b_activation", "block5c_activation", "top_activation"], # 144, 240, 672, 1280 # ["block2b_add", "block3b_add", "block5c_add", "block6d_add"], # 24, 40, 112, 192 # resnet50: 256, 512, 1024, 2048 mode, ) return base_model, inputs, features
def __init__(self, is_new): super(Model, self).__init__() """ This model will be used to classify temple run images into the action that should be taken at this point in the game. """ print("Setting up model...") # hyperparamters self.batch_size = 20 self.num_classes = 6 self.img_height = 180 self.img_width = 96 self.image_shape = (self.img_height, self.img_width, 3) self.class_names = [ 'jmp', 'lean_left', 'lean_right', 'slide', 'turn_left', 'turn_right' ] # create the model if is_new: # check if making a new model or loading old (trained) model self.normalize = tf.keras.layers.experimental.preprocessing.Rescaling( 1. / 255, input_shape=self.image_shape) # normalizing layer # self.base_model = ResNet50(input_shape=self.image_shape, include_top=False, weights='imagenet') # resnet50 self.base_model = EfficientNetB0(input_shape=self.image_shape, include_top=False, weights='imagenet') # resnet50 self.base_model.trainable = False # freeze the base layer self.pool_layer = tf.keras.layers.GlobalAveragePooling2D() self.out_layer = tf.keras.layers.Dense(self.num_classes, activation='relu') self.model = tf.keras.Sequential( [self.base_model, self.pool_layer, self.out_layer]) # final model print(self.model.summary()) else: self.model = tf.keras.models.load_model( '..models/tflow_model5') # load the model from saved version print(self.model.summary())
def build_efn_b0_model(num_classes): inputs = tf.keras.layers.Input(shape=(50, 50, 3)) efn_b0_model = EfficientNetB0(include_top=False, input_tensor=inputs, weights="imagenet") # Freeze the pretrained weights efn_b0_model.trainable = False # Rebuild top x = tf.keras.layers.GlobalAveragePooling2D(name="avg_pool")(efn_b0_model.output) x = tf.keras.layers.BatchNormalization()(x) top_dropout_rate = 0.5 x = tf.keras.layers.Dropout(top_dropout_rate, name="top_dropout")(x) outputs = tf.keras.layers.Dense(num_classes, activation="softmax", name="pred")(x) # Compile efn_b0_model = tf.keras.Model(inputs, outputs, name="EfficientNet") optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) efn_b0_model.compile( optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"] ) return efn_b0_model
seed = seed, subset = 'validation' ) # Found 72000 images belonging to 1 classes. test_data = test_gen.flow_from_directory( 'C:/LPD_competition/t/test', target_size = (128, 128), class_mode = None, batch_size = batch, seed = seed, shuffle = False ) #2. 모델 eff = EfficientNetB0(include_top = False, input_shape = (128, 128, 3)) eff.trainable = False model = Sequential() model.add(eff) model.add(MaxPooling2D(2, padding = 'same')) model.add(Conv2D(1280, 2, padding = 'same')) model.add(MaxPooling2D(2, padding = 'same')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(5120, activation = 'relu', kernel_initializer = 'he_normal')) model.add(Dropout(dropout)) model.add(Dense(2560, activation = 'relu', kernel_initializer = 'he_normal')) model.add(Dense(1280, activation = 'relu', kernel_initializer = 'he_normal')) model.add(Dense(1000, activation = 'softmax'))
) ''' import tensorflow as tf #from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.efficientnet import EfficientNetB0 from tensorflow.keras.preprocessing import image #from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.efficientnet import preprocess_input, decode_predictions import numpy as np #model = ResNet50(weights='imagenet') #model = EfficientNetB0( #model = tf.keras.applications.EfficientNetB0( model = EfficientNetB0( weights='imagenet' ) print(model.summary()) #img_path = 'elephant.jpg' img_path = 'dog.jpg' img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) preds = model.predict(x) # 결과를 튜플의 리스트(클래스, 설명, 확률)로 디코딩합니다 # (배치 내 각 샘플 당 하나의 리스트) print('Predicted:', decode_predictions(preds, top=3)[0]) # 예측결과: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
def create_feature_extactor(config: Dict): """ Create the feature extractor based on pretrained existing keras models. :param config: dict holding the model and data config :return: feature extractor model """ input_shape = (config["data"]["image_target_size"][0], config["data"]["image_target_size"][1], 3) feature_extractor_type = config["model"]["feature_extractor"]["type"] weights = "imagenet" feature_extractor = Sequential(name='feature_extractor') if feature_extractor_type == "mobilenetv2": feature_extractor.add( MobileNetV2(include_top=False, input_shape=input_shape, weights=None, pooling='avg')) elif feature_extractor_type == "efficientnetb0": feature_extractor.add( EfficientNetB0(include_top=False, input_shape=input_shape, weights=weights, pooling='avg')) elif feature_extractor_type == "efficientnetb1": feature_extractor.add( EfficientNetB1(include_top=False, input_shape=input_shape, weights=weights, pooling='avg')) elif feature_extractor_type == "efficientnetb2": feature_extractor.add( EfficientNetB2(include_top=False, input_shape=input_shape, weights=weights, pooling='avg')) elif feature_extractor_type == "efficientnetb3": feature_extractor.add( EfficientNetB3(include_top=False, input_shape=input_shape, weights=weights, pooling='avg')) elif feature_extractor_type == "efficientnetb4": feature_extractor.add( EfficientNetB4(include_top=False, input_shape=input_shape, weights=weights, pooling='avg')) elif feature_extractor_type == "efficientnetb5": feature_extractor.add( EfficientNetB5(include_top=False, input_shape=input_shape, weights=weights, pooling='avg')) elif feature_extractor_type == "efficientnetb6": feature_extractor.add( EfficientNetB6(include_top=False, input_shape=input_shape, weights=weights, pooling='avg')) elif feature_extractor_type == "efficientnetb7": feature_extractor.add( EfficientNetB7(include_top=False, input_shape=input_shape, weights=weights, pooling='avg')) elif feature_extractor_type == "resnet50": feature_extractor.add( ResNet50(include_top=False, input_shape=input_shape, weights=weights, pooling='avg')) elif feature_extractor_type == "simple_cnn": feature_extractor.add(tf.keras.layers.Input(shape=input_shape)) feature_extractor.add( SeparableConv2D(64, kernel_size=3, activation='relu', input_shape=input_shape)) for i in range(3): feature_extractor.add( SeparableConv2D(32, kernel_size=3, activation='relu')) feature_extractor.add( SeparableConv2D(32, kernel_size=3, activation='relu')) feature_extractor.add(MaxPool2D(pool_size=(2, 2))) feature_extractor.add( SeparableConv2D(32, kernel_size=3, activation='relu')) feature_extractor.add( SeparableConv2D(32, kernel_size=3, activation='relu')) elif feature_extractor_type == "fsconv": feature_extractor.add(tf.keras.layers.Input(shape=input_shape)) feature_extractor.add(Conv2D(32, kernel_size=3, activation='relu')) feature_extractor.add(MaxPool2D(strides=(2, 2))) feature_extractor.add(Conv2D(124, kernel_size=3, activation='relu')) feature_extractor.add(MaxPool2D(strides=(2, 2))) feature_extractor.add(Conv2D(512, kernel_size=3, activation='relu')) feature_extractor.add(MaxPool2D(strides=(2, 2))) elif feature_extractor_type == "mnist_cnn": input_shape = (config["data"]["image_target_size"][0], config["data"]["image_target_size"][1], 1) # feature_extractor.add(tf.keras.layers.Input(shape=input_shape)) # feature_extractor.add(Conv2D(8, kernel_size=3, activation='relu', input_shape=input_shape)) # feature_extractor.add(MaxPool2D(strides=(2, 2))) # feature_extractor.add(Conv2D(16, kernel_size=3, activation='relu', input_shape=input_shape)) # feature_extractor.add(MaxPool2D(strides=(2, 2))) feature_extractor.add(Flatten(input_shape=(28, 28))) feature_extractor.add(tf.keras.layers.Dense(128, activation='relu')) feature_extractor.add(tf.keras.layers.Dense(64, activation='relu')) else: raise Exception("Choose valid model architecture!") if config["model"]["feature_extractor"]["global_max_pooling"]: feature_extractor.add(GlobalMaxPool2D()) if config["model"]["feature_extractor"]["num_output_features"] > 0: activation = config["model"]["feature_extractor"]["output_activation"] feature_extractor.add( Dense(config["model"]["feature_extractor"]["num_output_features"], activation=activation)) # feature_extractor.build(input_shape=input_shape) return feature_extractor
def download_for_url(self, path: str, **kwargs): """ Download the file at the given URL :param path: the path to download :param kwargs: various kwargs for customizing the underlying behavior of the model download and setup :return: the absolute path to the model """ path_split = path.split('/') type = path_split[0] weights_file = path_split[1] include_top = 'no_top' in weights_file if type == 'vgg19': ret = VGG19(include_top=include_top, **kwargs) elif type == 'vgg16': ret = VGG16(include_top=include_top, **kwargs) elif type == 'resnet50': ret = ResNet50(include_top=include_top, **kwargs) elif type == 'resnet101': ret = ResNet101(include_top=include_top, **kwargs) elif type == 'resnet152': ret = ResNet152(include_top=include_top, **kwargs) elif type == 'resnet50v2': ret = ResNet50V2(include_top=include_top, **kwargs) elif type == 'resnet101v2': ret = ResNet101V2(include_top=include_top, **kwargs) elif type == 'resnet152v2': ret = ResNet152V2(include_top=include_top, **kwargs) elif type == 'densenet121': ret = DenseNet121(include_top=include_top) elif type == 'densenet169': ret = DenseNet169(include_top=include_top, **kwargs) elif type == 'densenet201': ret = DenseNet201(include_top=include_top, **kwargs) elif type == 'inceptionresnetv2': ret = InceptionResNetV2(include_top=include_top, **kwargs) elif type == 'efficientnetb0': ret = EfficientNetB0(include_top=include_top, **kwargs) elif type == 'efficientnetb1': ret = EfficientNetB1(include_top=include_top, **kwargs) elif type == 'efficientnetb2': ret = EfficientNetB2(include_top=include_top, **kwargs) elif type == 'efficientnetb3': ret = EfficientNetB3(include_top=include_top, **kwargs) elif type == 'efficientnetb4': ret = EfficientNetB4(include_top=include_top, **kwargs) elif type == 'efficientnetb5': ret = EfficientNetB5(include_top=include_top, **kwargs) elif type == 'efficientnetb6': ret = EfficientNetB6(include_top=include_top, **kwargs) elif type == 'efficientnetb7': efficient_net = EfficientNetB7(include_top=include_top, **kwargs) elif type == 'mobilenet': ret = MobileNet(include_top=include_top, **kwargs) elif type == 'mobilenetv2': ret = MobileNetV2(include_top=include_top) # MobileNetV3() missing 2 required positional arguments: 'stack_fn' and 'last_point_ch' #elif type == 'mobilenetv3': # mobile_net = MobileNetV3(include_top=include_top, **kwargs) elif type == 'inceptionv3': ret = InceptionV3(include_top=include_top, **kwargs) elif type == 'nasnet': ret = NASNetLarge(include_top=include_top, **kwargs) elif type == 'nasnet_mobile': ret = NASNetMobile(include_top=include_top, **kwargs) elif type == 'xception': ret = Xception(include_top=include_top, **kwargs) model_path = os.path.join(keras_path, weights_file) ret.save(model_path) return model_path
metrics=['accuracy']) model.fit(train_images, train_labels, epochs=30) ''' import tensorflow as tf from tensorflow.keras.applications.efficientnet import EfficientNetB0 from tensorflow.keras.preprocessing import image #from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.efficientnet import preprocess_input, decode_predictions import numpy as np #model = ResNet50(weights='imagenet') #model = EfficientNetB0( #model = tf.keras.applications.EfficientNetB0( model = EfficientNetB0(weights=None, input_shape=(96, 96, 3), classes=10) checkpoint_path = "training_1/cp.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) latest = tf.train.latest_checkpoint(checkpoint_dir) print(latest) model.load_weights(latest) print(model.summary()) train_images = preprocess_input(train_images) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
def train(args): ''' Trains model and saves it to <model_save> directory ''' if args['resume_training'] is None: efficientnet_backbone = EfficientNetB0(include_top=False, weights='imagenet', input_shape=(224, 224, 3), pooling='max') for layer in efficientnet_backbone.layers[:-20]: layer.trainable = False fcl = Dense(360, activation='softmax', kernel_initializer='glorot_uniform', name='output')(efficientnet_backbone.output) model = Model(inputs=efficientnet_backbone.input, outputs=fcl) else: model = load_model(args['resume_training'], custom_objects={'angle_loss': angle_loss}) # Used for iterative training strategy # model = load_model("model_4.h5", custom_objects={'angle_loss': angle_loss}) # for layer in model.layers[-25:]: # layer.trainable = True total_img_paths = glob.glob(os.path.join(args['image_dir'], "*.jpg")) random.shuffle(total_img_paths) split = int(args['val_split'] * len(total_img_paths)) # Splitting into training and valididation sets val_img_paths = total_img_paths[:split] train_img_paths = total_img_paths[split:] # Defining training and validation generators train_gen = DataGenerator(train_img_paths, rotate=True, batch_size=args['batch_size'], preprocess_function=preprocess_input, dim=args['img_size'], shuffle=True, show_intermediate=False, regress=args['regress']) val_gen = DataGenerator(val_img_paths, rotate=True, batch_size=args['batch_size'], preprocess_function=preprocess_input, dim=args['img_size'], shuffle=True, show_intermediate=False, regress=args['regress']) checkpoint_dir = '../model_checkpoints' if args[ 'resume_training'] is None else args['resume_training'] # Defining callbacks rlr = ReduceLROnPlateau(monitor='val_angle_loss', patience=1, verbose=1, min_lr=1e-6) es = EarlyStopping(monitor='val_angle_loss', patience=2, verbose=1, restore_best_weights=True) tsb = TensorBoard(log_dir=args['tb_dir'], histogram_freq=0, write_images=True, write_graph=False, update_freq='batch') ckpt = ModelCheckpoint(filepath=checkpoint_dir, monitor='val_angle_loss', verbose=1, save_best_only=True) # Compiling the model model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[angle_loss]) print(model.summary()) # Start training print("Starting training..") model.fit(train_gen, steps_per_epoch=len(train_img_paths) // args['batch_size'], epochs=args['n_epochs'], callbacks=[es, rlr, ckpt, tsb], validation_data=val_gen, validation_batch_size=args['batch_size'], validation_steps=len(val_img_paths) // args['batch_size']) # Saving the trained model print("Saving model to:", args['model_save']) model.save(os.path.join(args['model_save'], "model.h5"))
seed = seed, subset = 'validation' ) # Found 72000 images belonging to 1 classes. test_data = test_gen.flow_from_directory( 'C:/data/LPD_competition/test', target_size = (256, 256), class_mode = None, batch_size = batch, seed = seed, shuffle = False ) #2. 모델 eff = EfficientNetB0(include_top = False, input_shape = (256, 256, 3)) eff.trainable = False model = Sequential() model.add(eff) # model.add(MaxPooling2D(2, padding = 'same')) # model.add(Conv2D(1280, 2, padding = 'same')) # model.add(MaxPooling2D(2, padding = 'same')) # model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(1500, activation = 'relu', kernel_initializer = 'he_normal')) # model.add(Dropout(dropout)) # model.add(Dense(2560, activation = 'relu', kernel_initializer = 'he_normal')) # model.add(Dense(1280, activation = 'relu', kernel_initializer = 'he_normal')) model.add(Dense(1000, activation = 'softmax'))
import tensorflow as tf import matplotlib.pyplot as plt from pathlib import Path from typing import List, Dict from datetime import datetime from fastapi import FastAPI, BackgroundTasks, File, UploadFile from tensorflow.keras.applications.efficientnet import EfficientNetB0, decode_predictions IMAGE_SIZE = (224, 224) plt.switch_backend('Agg') app = FastAPI() model = EfficientNetB0(weights=None) model.load_weights('weights/effnet-b0.ckpt') def save_prediction(image: np.ndarray, classes: List[str], probs: List[float], savepath: Path) -> None: fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5)) ax1.set_title('Input image') ax1.imshow(image) ax2.set_title('Top probabilities') ax2.barh(classes, probs) ax2.invert_yaxis() fig.tight_layout() plt.savefig(savepath)