def __init__(self,
                 model_id='mobilenet_v1',
                 input_shape=(256, 256, 3),
                 output_stride=16):
        assert model_id == 'mobilenet_v1'

        super(MobileNetV1, self).__init__()
        self.output_stride = output_stride
        self.features = EfficientNetB2(
            input_shape=input_shape,
            include_top=False,
            weights='imagenet',
            classifier_activation=None,
        )
        # self.features.trainable = False

        self.Dblock1 = DetectionBlock(144)
        self.concat1 = tf.keras.layers.Concatenate()
        self.Dblock2 = DetectionBlock(144)
        self.concat2 = tf.keras.layers.Concatenate()
        self.Dblock3 = DetectionBlock(144)

        self.conv1 = Conv2D(17, 1, 1, padding='same')
        self.bn1 = BatchNormalization()
        self.dconv1 = Conv2DTranspose(17, 4, 2, padding='same')
        self.dconv2 = Conv2DTranspose(17, 4, 2, padding='same')
        self.dconv3 = Conv2DTranspose(17, 4, 2, padding='same')
def make_model(ensemble_size=3):
    input_layer = layers.Input(shape=(256, 256, 3))
    ensemble = []
    for i in range(ensemble_size):
        ensemble.append(
            EfficientNetB2(weights='imagenet',
                           include_top=False,
                           drop_connect_rate=0.4,
                           pooling='avg',
                           input_tensor=input_layer))
        for layer in ensemble[i].layers:
            layer._name = str(layer._name) + '_' + str(i)
        freeze_model(ensemble[i], 4)

    ensemble_outputs = [ensemble[i].output for i in range(ensemble_size)]
    x = layers.Concatenate()(ensemble_outputs)
    x = layers.GaussianNoise(1.0)(x)
    x = layers.Dense(2, activation='softmax', kernel_regularizer='l1_l2')(x)

    model = Model(input_layer, x)
    #model.summary()
    model.compile(optimizer=optimizers.Adam(1e-4),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    return model
示例#3
0
def EfficientNet(cfg):
    regularizer = l2(cfg.TRAIN.WD)

    if cfg.MODEL.SIZE == 0:
        backbone = EfficientNetB0(include_top=False,
                                  input_shape=tuple(cfg.DATASET.INPUT_SHAPE))
    elif cfg.MODEL.SIZE == 1:
        backbone = EfficientNetB1(include_top=False,
                                  input_shape=tuple(cfg.DATASET.INPUT_SHAPE))
    elif cfg.MODEL.SIZE == 2:
        backbone = EfficientNetB2(include_top=False,
                                  input_shape=tuple(cfg.DATASET.INPUT_SHAPE))
    elif cfg.MODEL.SIZE == 3:
        backbone = EfficientNetB3(include_top=False,
                                  input_shape=tuple(cfg.DATASET.INPUT_SHAPE))
    elif cfg.MODEL.SIZE == 4:
        backbone = EfficientNetB4(include_top=False,
                                  input_shape=tuple(cfg.DATASET.INPUT_SHAPE))

    backbone = add_regularization(backbone, regularizer)

    d, w, _ = scaling_parameters(cfg.DATASET.INPUT_SHAPE)

    width_coefficient = cfg.MODEL.WIDTH_COEFFICIENT * w
    depth_divisor = cfg.MODEL.DEPTH_DIVISOR
    head_filters = cfg.MODEL.HEAD_CHANNELS
    head_kernel = cfg.MODEL.HEAD_KERNEL
    head_activation = cfg.MODEL.HEAD_ACTIVATION
    keypoints = cfg.DATASET.OUTPUT_SHAPE[-1]
    regularizer = l2(cfg.TRAIN.WD)

    x = backbone.layers[-1].output
    for i in range(cfg.MODEL.HEAD_BLOCKS):
        x = layers.Conv2DTranspose(round_filters(head_filters,
                                                 width_coefficient,
                                                 depth_divisor),
                                   head_kernel,
                                   strides=2,
                                   padding='same',
                                   use_bias=False,
                                   kernel_initializer=CONV_KERNEL_INITIALIZER,
                                   kernel_regularizer=regularizer,
                                   name='head_block{}_conv'.format(i + 1))(x)
        x = layers.BatchNormalization(name='head_block{}_bn'.format(i + 1))(x)
        x = layers.Activation(head_activation,
                              name='head_block{}_activation'.format(i + 1))(x)

    x = layers.Conv2D(keypoints,
                      cfg.MODEL.FINAL_KERNEL,
                      padding='same',
                      use_bias=True,
                      kernel_initializer=DENSE_KERNEL_INITIALIZER,
                      kernel_regularizer=regularizer,
                      name='final_conv')(x)

    return Model(backbone.input, x, name=f'EfficientNetLite_{cfg.MODEL.SIZE}')
示例#4
0
def build_model(num_classes, model="B7"):
    inputs = layers.Input(shape=(WIDTH, HEIGHT, 3))
    if model == "B7":
        model = EfficientNetB7(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B6":
        model = EfficientNetB6(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B5":
        model = EfficientNetB5(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B4":
        model = EfficientNetB4(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B3":
        model = EfficientNetB3(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B2":
        model = EfficientNetB2(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B1":
        model = EfficientNetB1(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B0":
        model = EfficientNetB0(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    # Freeze the pretrained weights
    model.trainable = False

    # Rebuild top
    x = layers.GlobalAveragePooling2D(name="avg_pool")(model.output)
    x = layers.BatchNormalization()(x)

    top_dropout_rate = 0.2
    x = layers.Dropout(top_dropout_rate, name="top_dropout", seed=SEED)(x)
    outputs = layers.Dense(num_classes,
                           activation="softmax",
                           name="predictions")(x)

    # Compile
    model = tf.keras.Model(inputs, outputs, name="EfficientNet")
    optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
    model.compile(
        optimizer=optimizer,
        loss="sparse_categorical_crossentropy",
        metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)])
    return model
示例#5
0
def build_network(image_size, n_classes):
    image = layers.Input([*image_size, 3], dtype=tf.float32, name='input')

    effnet = EfficientNetB2(include_top=False,
                            weights='imagenet',
                            pooling='avg')
    feature = effnet(image)
    feature = layers.Dropout(0.5)(feature)
    logit = layers.Dense(n_classes)(feature)
    model = tf.keras.Model(inputs=image, outputs=logit)
    return model
    def make_conv_base(self, weights):
        cb = EfficientNetB2(weights='imagenet',
                                    include_top=False,
                                    drop_connect_rate=0.4,
                                    pooling='avg',
                                    input_shape=(self.tile_size, self.tile_size, 3))
        x = cb.output
        x = layers.Dropout(0.3)(x)
        x = layers.Dense(2, activation='softmax', kernel_regularizer='l1_l2')(x)

        model = Model(cb.input, x)
        model.compile(optimizer=optimizers.Adam(), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
        model.load_weights(weights)
        self.conv_base = Model(model.input, model.layers[-3].output)
示例#7
0
def spy_model(name):
    "Creates the model."
    with tf.compat.v1.Session(graph=tf.Graph()) as session:
        if name == "MobileNet":
            model = MobileNet()
        elif name == "EfficientNetB2":
            model = EfficientNetB2()
        else:
            raise ValueError("Unknown model name %r." % name)

        graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
            sess=session,
            input_graph_def=session.graph_def,
            output_node_names=[model.output.op.name])

    return graph_def, model
示例#8
0
def make_model(weights=None):
    cb = EfficientNetB2(weights='imagenet',
                        include_top=False,
                        drop_connect_rate=0.4,
                        pooling='avg',
                        input_shape=(256, 256, 3))
    x = cb.output
    x = layers.GaussianDropout(0.3)(x)
    x = layers.Dense(2, activation='softmax', kernel_regularizer='l1_l2')(x)
    model = Model(cb.input, x)
    #model.summary()
    model.compile(optimizer=optimizers.Adam(1e-4),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    if weights:
        model.load_weights(weights)
    return model
def make_model(num_classes):
    reg = regularizers.l1_l2(0.01, 0.01)
    cb = EfficientNetB2(weights='imagenet', include_top=False, drop_connect_rate=0.4, pooling='avg', input_shape=(256, 256, 3))
    #cb.trainable = False
    freeze_model(cb, 3)
    x = cb.output
    #x = layers.GaussianNoise(0.5)(x)
    #x = layers.BatchNormalization()(x)
    #x = layers.Dropout(0.3)(x)
    #x = layers.Dense(128, activation='relu', kernel_regularizer=reg)(x)
    x = layers.Dropout(0.4)(x)
    x = layers.Dense(num_classes, activation='softmax', kernel_regularizer=reg)(x)
    model = Model(cb.input, x)
    #model.summary()
    #pr = tf.keras.metrics.AUC(name='PR', curve='PR')
    model.compile(optimizer=optimizers.Adam(1e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    return model
示例#10
0
文件: models.py 项目: azeus404/thesis
def EfficientNetB2model(no_classes, shape):
    """
    EfficientNetB2
    https://keras.io/examples/vision/image_classification_efficientnet_fine_tuning/
    Uses a fixed input size 224,224
    """
    base_model = EfficientNetB2(include_top=False,
                                weights='imagenet',
                                input_shape=shape)
    base_model.trainable = False
    inputs = Input(shape=shape)
    x = base_model(inputs, training=False)
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(no_classes, activation='softmax',
                        name='predictions')(x)
    model = Model(inputs, outputs=predictions)
    return model
示例#11
0
 def getEfficientNetModelB2():
     model = EfficientNetB2(include_top=True,
                            weights=None,
                            classes=3,
                            classifier_activation='softmax')
     return model
def make_feature_extractor():
    conv_base = EfficientNetB2(weights='imagenet',
                               include_top=False,
                               input_shape=(image_rez, image_rez, 3))
    output = layers.GlobalAveragePooling2D()(conv_base.output)
    return Model(conv_base.input, output), output.shape[-1]
示例#13
0
test_value = df[df['date'] >=  '2020-09-01']

x_pred = test_value.iloc[:,1:-1].astype('int64').to_numpy()
y_pred = test_value.iloc[:,-1].astype('int64').to_numpy()
x_pred = x_pred.reshape(x_pred.shape[0], 7,1,3)

def RMSE(y_test, y_predict): 
    return np.sqrt(mean_squared_error(y_test, y_predict)) 

x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,  train_size=0.8, random_state = 77, shuffle=True ) 
x_train = x_train.reshape(x_train.shape[0], 7, 1, 3)
print(x_train.shape, x_val.shape, x_pred.shape)

# 2. 모델구성
leaky_relu = tf.nn.leaky_relu
mobile = EfficientNetB2(include_top=False,weights='imagenet',input_shape=(7,1,3))
# mobile = VGG16(include_top=False,weights='imagenet',input_shape=x_train.shape[1:])
mobile.trainable = True
a = mobile.output
a = GlobalAveragePooling2D()(a)
a = Flatten()(a)
a = Dense(1024, activation= 'swish')(a)
a = Dropout(0.2)(a)
a = Dense(1, activation= 'swish')(a)
model = Model(inputs = mobile.input, outputs = a)

# 3. 컴파일 훈련
modelpath = '../data/h5/regressor_LSTM.hdf5'
# es= EarlyStopping(monitor='val_loss', patience=10)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=5, factor=0.5, verbose=1)
示例#14
0
kfold = KFold(n_splits=5, shuffle=True)

train_generator = idg.flow(x_train, y_train, batch_size=32)
# seed => random_state
valid_generator = idg2.flow(x_valid, y_valid)
test_generator = x_pred

mc = ModelCheckpoint('C:/LPD_competition/lotte_m2_projcet.h5',
                     save_best_only=True,
                     verbose=1)

from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Flatten, BatchNormalization, Dense, Activation
from tensorflow.keras.applications import VGG19, MobileNet, InceptionResNetV2, InceptionV3, EfficientNetB2
mobile_net = EfficientNetB2(weights="imagenet",
                            include_top=False,
                            input_shape=(128, 128, 3))

# for layer in mobile_net.layers:
# layer.trainable = False

top_model = mobile_net.output
top_model = GlobalAveragePooling2D()(top_model)
# top_model = Flatten()(top_model)
top_model = Dense(4048, activation="swish")(top_model)
# top_model = Dense(1024, activation="relu")(top_model)
# top_model = Dense(512, activation="relu")(top_model)
top_model = Dense(1000, activation="softmax")(top_model)

model = Model(inputs=mobile_net.input, outputs=top_model)
# construct the training image generator for data augmentation
aug = ImageDataGenerator(rotation_range=20,
                         zoom_range=0.15,
                         width_shift_range=0.2,
                         height_shift_range=0.2,
                         shear_range=0.15,
                         horizontal_flip=True,
                         fill_mode="nearest")

# load the MobileNetV2 network, ensuring the head FC layer sets are
# leftt off
# baseModel = MobileNetV2(weights="imagenet", include_top=False,
# 	input_tensor=Input(shape=(224, 224, 3)))

baseModel = EfficientNetB2(weights='imagenet',
                           include_top=False,
                           input_tensor=Input(shape=(260, 260, 3)))
# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)

# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)

# loop over all layers in the base model and freeze them so they will
示例#16
0
                                                      y,
                                                      train_size=0.8,
                                                      shuffle=True,
                                                      random_state=SEED)

train_generator = idg.flow(x_train, y_train, batch_size=8)
valid_generator = idg2.flow(x_valid, y_valid)
test_generator = idg3.flow(x_pred, shuffle=False)

from keras.utils.generic_utils import get_custom_objects
from tensorflow.python.keras.activations import swish
from keras.layers import Activation
get_custom_objects().update({'swish': Activation(swish)})

md = EfficientNetB2(input_shape=IMAGE_SIZE,
                    weights="imagenet",
                    include_top=False)
for layer in md.layers:
    layer.trainable = True
c = md.output
c = GlobalAveragePooling2D()(c)
c = Flatten()(c)
c = Dense(6048, activation='swish')(c)
c = Dropout(0.2)(c)
c = Dense(1000, activation="softmax")(c)
model = Model(inputs=md.input, outputs=c)

from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau

from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
cp = ModelCheckpoint('C:/LPD_competition/lotte_projcet0323.h5',
示例#17
0

#EfficientNetB0
model = EfficientNetB0()

model.trainable = True

model.summary()
print(len(model.weights))
print(len(model.trainable_weights))


#EfficientNetB1
model = EfficientNetB1()

model.trainable = True

model.summary()
print(len(model.weights))
print(len(model.trainable_weights))

'''
#EfficientNetB2
model = EfficientNetB2()

model.trainable = True

model.summary()
print(len(model.weights))
print(len(model.trainable_weights))
示例#18
0
import cv2

from PIL import Image

from tensorflow.keras.applications import EfficientNetB2
from tensorflow.keras.applications.efficientnet import preprocess_input
from keras.layers import Conv2D, Dense, Flatten, BatchNormalization,\
    Activation, GaussianDropout, GlobalAveragePooling2D, Input, Dropout
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint

from sklearn.model_selection import train_test_split, KFold

eff = EfficientNetB2(
    include_top=False,
    input_shape=(256, 256, 3)
)

eff.trainable = True

es = EarlyStopping(
    monitor='val_loss',
    patience=30,
    verbose=1
)

rl = ReduceLROnPlateau(
    monitor='val_loss',
    patience=10,
    verbose=1
)
示例#19
0
def create_efficientnet(width, height, depth, model_base,
                        first_layers_to_freeze, num_classes, learning_rate,
                        epochs):
    inputShape = (height, width, depth)

    inputs = K.Input(shape=inputShape)

    if model_base == "b0":
        effnet = EfficientNetB0(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b1":
        effnet = EfficientNetB1(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b2":
        effnet = EfficientNetB2(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b3":
        effnet = EfficientNetB3(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b4":
        effnet = EfficientNetB4(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b5":
        effnet = EfficientNetB5(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b6":
        effnet = EfficientNetB6(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    else:
        effnet = EfficientNetB7(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")

    # # Print architecture of effnet
    # for i, layer in enumerate(effnet.layers[:]):
    # 	print(i, layer.name, layer.output_shape)
    # print(f"Effnet len: {len(effnet.layers[:])}")

    # b0: 20; b2: 33; b4: 236; b6: 45; b7: 265
    for i, layer in enumerate(effnet.layers[:first_layers_to_freeze]):
        layer.trainable = False
    for i, layer in enumerate(effnet.layers[first_layers_to_freeze:]):
        if not isinstance(layer, K.layers.BatchNormalization):
            layer.trainable = True

    model = Sequential()
    model.add(effnet)
    model.add(K.layers.Dropout(0.25))
    model.add(K.layers.Dense(effnet.layers[-1].output_shape[3]))
    model.add(K.layers.LeakyReLU())
    model.add(K.layers.GlobalAveragePooling2D())
    model.add(K.layers.BatchNormalization())
    model.add(K.layers.Dropout(0.5))
    model.add(K.layers.Dense(num_classes, activation='softmax'))

    # Freeze the batchnorm layer of our model
    for i, layer in enumerate(model.layers[:]):
        if isinstance(layer, K.layers.BatchNormalization):
            layer.trainable = False

    opt = Adam(lr=learning_rate, decay=learning_rate / epochs)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    model.summary()

    return model
示例#20
0
x_train, x_valid, y_train, y_valid = train_test_split(x,
                                                      y,
                                                      train_size=0.9,
                                                      shuffle=True,
                                                      random_state=66)

train_generator = idg.flow(x_train, y_train, batch_size=28, seed=2048)
# seed => random_state
valid_generator = idg2.flow(x_valid, y_valid)
test_generator = idg2.flow(x_pred)

mc = ModelCheckpoint('C:/LPD_competition/lotte_0322_2.h5',
                     save_best_only=True,
                     verbose=1)
mobile = EfficientNetB2(include_top=False,
                        weights='imagenet',
                        input_shape=x_train.shape[1:])
mobile.trainable = True
a = mobile.output
a = GlobalAveragePooling2D()(a)
a = Flatten()(a)
a = Dense(4048, activation='swish')(a)
a = Dropout(0.2)(a)
a = Dense(1000, activation='softmax')(a)

model = Model(inputs=mobile.input, outputs=a)

# early_stopping = EarlyStopping(patience= 30)
# lr = ReduceLROnPlateau(patience= 15, factor=0.5)

# model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.1), metrics=['acc'])
示例#21
0
def create_efficientnet(width, height, depth, model_base,
                        first_layers_to_freeze):
    inputShape = (height, width, depth)

    inputs = K.Input(shape=inputShape)

    if model_base == "b0":
        effnet = EfficientNetB0(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b1":
        effnet = EfficientNetB1(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b2":
        effnet = EfficientNetB2(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b3":
        effnet = EfficientNetB3(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b4":
        effnet = EfficientNetB4(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b5":
        effnet = EfficientNetB5(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b6":
        effnet = EfficientNetB6(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    else:
        effnet = EfficientNetB7(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")

    # # Print architecture of effnet
    # for i, layer in enumerate(effnet.layers[:]):
    # 	print(i, layer.name, layer.output_shape)

    # b0: 20; b2: 33; b4: 147; b6: 45; b7: 265

    for i, layer in enumerate(effnet.layers[:first_layers_to_freeze]):
        layer.trainable = False
    for i, layer in enumerate(effnet.layers[first_layers_to_freeze:]):
        layer.trainable = True

    effnet.summary()

    model = Sequential()
    model.add(effnet)
    model.add(K.layers.Dropout(0.25))
    model.add(K.layers.Dense(effnet.layers[-1].output_shape[3]))
    model.add(K.layers.LeakyReLU())
    model.add(K.layers.GlobalAveragePooling2D())
    model.add(K.layers.Dropout(0.5))
    model.add(K.layers.Dense(1, activation='linear'))

    return model