Ejemplo n.º 1
0
def build_model(num_classes, model="B7"):
    inputs = layers.Input(shape=(WIDTH, HEIGHT, 3))
    if model == "B7":
        model = EfficientNetB7(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B6":
        model = EfficientNetB6(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B5":
        model = EfficientNetB5(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B4":
        model = EfficientNetB4(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B3":
        model = EfficientNetB3(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B2":
        model = EfficientNetB2(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B1":
        model = EfficientNetB1(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B0":
        model = EfficientNetB0(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    # Freeze the pretrained weights
    model.trainable = False

    # Rebuild top
    x = layers.GlobalAveragePooling2D(name="avg_pool")(model.output)
    x = layers.BatchNormalization()(x)

    top_dropout_rate = 0.2
    x = layers.Dropout(top_dropout_rate, name="top_dropout", seed=SEED)(x)
    outputs = layers.Dense(num_classes,
                           activation="softmax",
                           name="predictions")(x)

    # Compile
    model = tf.keras.Model(inputs, outputs, name="EfficientNet")
    optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
    model.compile(
        optimizer=optimizer,
        loss="sparse_categorical_crossentropy",
        metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)])
    return model
Ejemplo n.º 2
0
def get_parent(input_shape):
    if input_shape[0] == 256:
        parent = EfficientNetB0(include_top=False)
    elif input_shape[0] == 384:
        parent = EfficientNetB4(include_top=False)
    elif input_shape[0] == 512:
        parent = EfficientNetB5(include_top=False)
    else:
        print('Not a valid input shape for EfficientNet parent')
        parent = None
    return parent
def model_dense(image_shape=(512, 512, 3)):
    in_src = Input(shape=image_shape)
    #d = BatchNormalization()(in_src)
    m = EfficientNetB5(include_top=False,
                       weights='imagenet',
                       input_shape=(512, 512, 3))(in_src)
    #Here I am making all the layer of the last layer to be non trainable
    # for layer in m.layers[:len(m.layers)-46]:
    #     layer.trainable = False
    #x = tf.keras.layers.GlobalMaxPool2D()(model)
    x = tf.keras.layers.GlobalAveragePooling2D()(m)
    #x = Flatten()(model)
    x = Dense(2048, activation='relu')(x)
    x = Dense(1024, activation='relu')(x)
    x = Dense(3, activation='softmax')(x)
    model = Model(in_src, x)
    model.load_weights("saved-model-05-0.35.hdf5")
    opt = Adam(lr=0.0002, beta_1=0.5)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    return model
Ejemplo n.º 4
0
filepath = "best.model.hdf5"
checkpoint = ModelCheckpoint(filepath, 
                             save_best_only = True, 
                             save_weights_only = True,
                             monitor = 'val_loss', 
                             mode = 'min', verbose = 1)

early_stopping = EarlyStopping(monitor='val_loss', mode="min", verbose=1, patience=15)

reduce_lr = ReduceLROnPlateau(monitor = 'val_loss', factor = 0.3, 
                              patience = 3,
                              mode = 'min', verbose = 1)

model = Sequential()
model.add(EfficientNetB5(include_top = False, weights = "imagenet",
                        input_shape=(img_size, img_size, 3)))
model.add(tf.keras.layers.AveragePooling2D(pool_size=(3, 3)))
model.add(tf.keras.layers.Flatten())
model.add(Dropout(0.5))
model.add(tf.keras.layers.Dense(128, activation = "relu"))
model.add(tf.keras.layers.Dense(64, activation = "relu"))
model.add(tf.keras.layers.Dense(5, activation = "softmax"))
model.compile(optimizer = 'adam',
            loss = "categorical_crossentropy",
            metrics = ["accuracy"])

model.summary()

#class_weights = class_weight.compute_class_weight('balanced', np.unique(train_data.classes),train_data.classes)
class_weights = {0:3.85975197,1: 1.95299487,2: 1.81047065,3: 0.32543726,4: 1.6563135}
Ejemplo n.º 5
0
                                                  shuffle=True)

#control
image_size = (160, 160, 3)
bts = 32
optimizer = Adam(learning_rate=0.001)

train_generator = idg.flow(x_train, y_train, batch_size=bts, seed=2048)
valid_generator = idg2.flow(x_val, y_val)
test_generator = idg2.flow(target)

#2. MODEL
from tensorflow.keras.applications import EfficientNetB5
from tensorflow.keras import regularizers
TF = EfficientNetB5(weights="imagenet",
                    include_top=False,
                    input_shape=image_size)
TF.trainable = True
x = TF.output
x = Conv2D(256,
           2,
           padding='SAME',
           activation='swish',
           activity_regularizer=regularizers.l2(1e-4),
           kernel_regularizer=regularizers.l2(1e-4))(x)
x = GlobalAveragePooling2D()(x)
x = Flatten()(x)

x = Dense(3096, activation='swish')(x)
x = GaussianDropout(rate=0.2)(x)
Ejemplo n.º 6
0
x_val = scaler.transform(x_val)

#to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_val = to_categorical(y_val)

#reshape
x_train = x_train.reshape(-1, 32, 32, 3)
x_test = x_test.reshape(-1, 32, 32, 3)
x_val = x_val.reshape(-1, 32, 32, 3)
print(x_train.shape, x_test.shape, x_val.shape)

#2. 모델링
TF = EfficientNetB5(weights='imagenet',
                    include_top=False,
                    input_shape=(32, 32, 3))  #레이어 16개
TF.trainable = False  #훈련시키지 않고 가중치만 가져오겠다.
model = Sequential()
model.add(TF)
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(10,
                activation='softmax'))  #activation='softmax')) #mnist사용할 경우
model.summary()
print(len(TF.weights))  # 26
print(len(TF.trainable_weights))  # 0

#컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping
Ejemplo n.º 7
0
def create_efficientnet(width, height, depth, model_base,
                        first_layers_to_freeze, num_classes, learning_rate,
                        epochs):
    inputShape = (height, width, depth)

    inputs = K.Input(shape=inputShape)

    if model_base == "b0":
        effnet = EfficientNetB0(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b1":
        effnet = EfficientNetB1(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b2":
        effnet = EfficientNetB2(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b3":
        effnet = EfficientNetB3(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b4":
        effnet = EfficientNetB4(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b5":
        effnet = EfficientNetB5(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b6":
        effnet = EfficientNetB6(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    else:
        effnet = EfficientNetB7(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")

    # # Print architecture of effnet
    # for i, layer in enumerate(effnet.layers[:]):
    # 	print(i, layer.name, layer.output_shape)
    # print(f"Effnet len: {len(effnet.layers[:])}")

    # b0: 20; b2: 33; b4: 236; b6: 45; b7: 265
    for i, layer in enumerate(effnet.layers[:first_layers_to_freeze]):
        layer.trainable = False
    for i, layer in enumerate(effnet.layers[first_layers_to_freeze:]):
        if not isinstance(layer, K.layers.BatchNormalization):
            layer.trainable = True

    model = Sequential()
    model.add(effnet)
    model.add(K.layers.Dropout(0.25))
    model.add(K.layers.Dense(effnet.layers[-1].output_shape[3]))
    model.add(K.layers.LeakyReLU())
    model.add(K.layers.GlobalAveragePooling2D())
    model.add(K.layers.BatchNormalization())
    model.add(K.layers.Dropout(0.5))
    model.add(K.layers.Dense(num_classes, activation='softmax'))

    # Freeze the batchnorm layer of our model
    for i, layer in enumerate(model.layers[:]):
        if isinstance(layer, K.layers.BatchNormalization):
            layer.trainable = False

    opt = Adam(lr=learning_rate, decay=learning_rate / epochs)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    model.summary()

    return model
Ejemplo n.º 8
0
                                                  train_size=0.9,
                                                  random_state=128,
                                                  shuffle=True)

#control
bts = 128
optimizer = Adam(learning_rate=1e-3)

train_generator = idg.flow(x_train, y_train, batch_size=bts, seed=2048)
valid_generator = idg2.flow(x_val, y_val)
test_generator = idg2.flow(target)

#2. MODEL
from tensorflow.keras.applications import EfficientNetB5
TF = EfficientNetB5(include_top=False,
                    weights='imagenet',
                    input_shape=x_train.shape[1:])
TF.trainable = True
x = TF.output
x = GlobalAveragePooling2D()(x)
x = Flatten()(x)
x = Dense(2048, activation='relu')(x)
x = Dropout(0.3)(x)
x = Dense(1000, activation='softmax')(x)
model = Model(inputs=TF.input, outputs=x)
model.summary()

#COMPILE
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
model.compile(loss='categorical_crossentropy',
              optimizer=optimizer,
Ejemplo n.º 9
0
training_datagen = ImageDataGenerator(rotation_range=10,brightness_range=(1.1, 0.9),shear_range=10.0,zoom_range=[0.8, 1.2],vertical_flip=True,fill_mode='wrap',
    width_shift_range=0.2,
    height_shift_range=0.2,
    horizontal_flip=True)

valid_datagen = ImageDataGenerator()

t_gen = training_datagen.flow(X_train, y_train, batch_size=4)
v_gen = valid_datagen.flow(X_test, y_test, batch_size=4)
#t_gen = training_datagen.flow(X_train, y_train)
#v_gen = valid_datagen.flow(X_test, y_test)

image_input = tf.keras.Input(shape=(456, 456, 3))
#model = ResNet50(weights='imagenet')
model = EfficientNetB5(input_tensor=image_input, include_top=False,weights="imagenet")
model.summary()
model.trainable = False
last_layer = model.output
last_layer = tf.keras.layers.GlobalAveragePooling2D(name= 'avg_pool_last')(last_layer)
#x= tf.keras.layers.Flatten(name='flatten')(last_layer)
last_layer = tf.keras.layers.BatchNormalization(name = 'batch_last')(last_layer)
top_dropout_rate = 0.2
last_layer = tf.keras.layers.Dropout(top_dropout_rate, name = 'top_dropout')(last_layer)
out = tf.keras.layers.Dense(num_classes, activation='softmax', name='output_layer')(last_layer)
#image_input = Input(shape=(500, 500, 3))
custom_resnet_model = tf.keras.Model(inputs=image_input,outputs= out)
custom_resnet_model.summary()

#for layer in custom_resnet_model.layers[:-4]:
#	layer.trainable = False
Ejemplo n.º 10
0
def create_efficientnet(width, height, depth, model_base,
                        first_layers_to_freeze):
    inputShape = (height, width, depth)

    inputs = K.Input(shape=inputShape)

    if model_base == "b0":
        effnet = EfficientNetB0(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b1":
        effnet = EfficientNetB1(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b2":
        effnet = EfficientNetB2(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b3":
        effnet = EfficientNetB3(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b4":
        effnet = EfficientNetB4(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b5":
        effnet = EfficientNetB5(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b6":
        effnet = EfficientNetB6(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    else:
        effnet = EfficientNetB7(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")

    # # Print architecture of effnet
    # for i, layer in enumerate(effnet.layers[:]):
    # 	print(i, layer.name, layer.output_shape)

    # b0: 20; b2: 33; b4: 147; b6: 45; b7: 265

    for i, layer in enumerate(effnet.layers[:first_layers_to_freeze]):
        layer.trainable = False
    for i, layer in enumerate(effnet.layers[first_layers_to_freeze:]):
        layer.trainable = True

    effnet.summary()

    model = Sequential()
    model.add(effnet)
    model.add(K.layers.Dropout(0.25))
    model.add(K.layers.Dense(effnet.layers[-1].output_shape[3]))
    model.add(K.layers.LeakyReLU())
    model.add(K.layers.GlobalAveragePooling2D())
    model.add(K.layers.Dropout(0.5))
    model.add(K.layers.Dense(1, activation='linear'))

    return model