Exemplo n.º 1
0
def EffNetB5(freeze=False):
    model = EfficientNetB5(input_shape=(456, 456, 3),
                           weights='imagenet',
                           include_top=False,
                           pooling=None)
    for layers in model.layers:
        layers.trainable = not freeze
    inputs = model.input
    x = model.output
    x = GlobalAveragePooling2D()(x)
    out_layer = Dense(1, activation=None,
                      name='normal_regressor')(Dropout(0.4)(x))
    model = Model(inputs, out_layer)
    return model
Exemplo n.º 2
0
def get_fe(fe, input_image):
    if fe == 'effnetb0':
        return EfficientNetB0(input_tensor=input_image, include_top=False), [
            'swish_last', 'block5_i_MB_swish_1', 'block3_i_MB_swish_1'
        ]
    elif fe == 'effnetb1':
        return EfficientNetB1(input_tensor=input_image, include_top=False), [
            'swish_last', 'block5_i_MB_swish_1', 'block3_i_MB_swish_1'
        ]
    elif fe == 'effnetb2':
        return EfficientNetB2(input_tensor=input_image, include_top=False), [
            'swish_last', 'block5_i_MB_swish_1', 'block3_i_MB_swish_1'
        ]
    elif fe == 'effnetb3':
        return EfficientNetB3(input_tensor=input_image, include_top=False), [
            'swish_last', 'block5_i_MB_swish_1', 'block3_i_MB_swish_1'
        ]
    elif fe == 'effnetb4':
        return EfficientNetB4(input_tensor=input_image, include_top=False), [
            'swish_last', 'block5_i_MB_swish_1', 'block3_i_MB_swish_1'
        ]
    elif fe == 'effnetb5':
        return EfficientNetB5(input_tensor=input_image, include_top=False), [
            'swish_last', 'block5_i_MB_swish_1', 'block3_i_MB_swish_1'
        ]
    elif fe == 'd53':
        return d53(input_image)
    elif fe == 'mnetv2':
        mnet = MobileNetV2(input_tensor=input_image, weights='imagenet')
        return mnet, [
            'out_relu', 'block_13_expand_relu', 'block_6_expand_relu'
        ]
    elif fe == 'mnet':
        mnet = MobileNet(input_tensor=input_image, weights='imagenet')
        return mnet, ['conv_pw_13_relu', 'conv_pw_11_relu', 'conv_pw_5_relu']
    elif fe == 'r50':
        r50 = ResNet50(input_tensor=input_image, weights='imagenet')
        return r50, ['activation_49', 'activation_40', 'activation_22']
    raise ValueError('Pls put the correct fe')
def get_model(model_name="vgg16"):
    tf.keras.backend.clear_session()
    if (model_name == "vgg16"):
        from tensorflow.keras.applications.vgg16 import preprocess_input
        model = VGG16(weights='imagenet', include_top=False)
        return (model, preprocess_input)
    elif (model_name == "vgg19"):
        return (tf.keras.applications.vgg19.VGG19(weights='imagenet',
                                                  include_top=False),
                tf.keras.applications.vgg16.preprocess_input)
    elif (model_name == "resnet50"):
        return (tf.keras.applications.resnet50.ResNet50(weights="imagenet",
                                                        include_top=False),
                tf.keras.applications.resnet50.preprocess_input)
    elif (model_name == "mobilenet"):
        return (tf.keras.applications.mobilenet.MobileNet(weights="imagenet",
                                                          include_top=False),
                tf.keras.applications.mobilenet.preprocess_input)
    # elif (model_name == "mobilenetv2"):
    #     return (tf.keras.applications.mobilenet_v2.MobileNetV2(weights="imagenet", include_top=False), tf.keras.applications.mobilenet_v2.preprocess_input)
    elif (model_name == "xception"):
        return (tf.keras.applications.xception.Xception(weights="imagenet",
                                                        include_top=False),
                tf.keras.applications.xception.preprocess_input)
    elif (model_name == "densenet121"):
        return (tf.keras.applications.densenet.DenseNet121(weights="imagenet",
                                                           include_top=False),
                tf.keras.applications.densenet.preprocess_input)
    elif (model_name == "inceptionv3"):
        return (tf.keras.applications.inception_v3.InceptionV3(
            weights="imagenet", include_top=False),
                tf.keras.applications.inception_v3.preprocess_input)
    elif (model_name == "efficientnetb0"):
        return (EfficientNetB0(weights='imagenet',
                               include_top=False), effpreprocess)
    elif (model_name == "efficientnetb5"):
        return (EfficientNetB5(weights='imagenet',
                               include_top=False), effpreprocess)
Exemplo n.º 4
0
                '../input/aptos2019-blindness-detection/train_images/' +
                sample + '.png')
            img = cv2.resize(img, (SIZE, SIZE))
            batch_images.append(img)
        batch_images = np.array(batch_images, np.float32) / 255
        batch_y = np.array(batch_y, np.float32)
        return batch_images, batch_y


#%% [markdown]
# ## Basemodel - EfficientnetB5

#%%
in_lay = Input(shape=(256, 256, 3))
base_model = EfficientNetB5(weights=None,
                            input_shape=(SIZE, SIZE, 3),
                            include_top=False)
base_model.load_weights(
    "../input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5"
)
pt_depth = base_model.get_output_shape_at(0)[-1]
pt_features = base_model(in_lay)
bn_features = BatchNormalization()(pt_features)

#%% [markdown]
# ## Attention model

#%%
# here we do an attention mechanism to turn pixels in the GAP on an off
attn_layer = Conv2D(64, kernel_size=(1, 1), padding='same',
                    activation='relu')(Dropout(0.5)(bn_features))
Exemplo n.º 5
0
    BASE_MODEL = VGG19(include_top=False, weights="imagenet",
                       input_shape=(HEIGHT, WIDTH, DEPTH))
    preprocessing_function = vgg19_preprocess_input
elif NAME == "VGG16":
    HEIGHT, WIDTH = 224, 224
    BASE_MODEL = VGG16(include_top=False, weights="imagenet",
                       input_shape=(HEIGHT, WIDTH, DEPTH))
    preprocessing_function = vgg16_preprocess_input
elif NAME == "Densenet":
    HEIGHT, WIDTH = 128, 128
    BASE_MODEL = DenseNet201(include_top=False, weights="imagenet",
                             input_shape=(HEIGHT, WIDTH, DEPTH))
    preprocessing_function = densenet_preprocess_input
elif NAME == "efficientnet":
    HEIGHT, WIDTH = 128, 128
    BASE_MODEL = EfficientNetB5(include_top=False, weights="imagenet",
                                input_shape=(HEIGHT, WIDTH, DEPTH))
    preprocessing_function = preprocess_input
else:
    HEIGHT, WIDTH = 224, 224
    BASE_MODEL = VGG19(include_top=False, weights="imagenet",
                       input_shape=(HEIGHT, WIDTH, DEPTH))
    preprocessing_function = vgg19_preprocess_input


##################################################################################################
# Read details from CSV
###################################################################################################

DATASET = pd.read_csv(TRAINING_CSV, dtype=str)

# DEBUG_MODE SET
Exemplo n.º 6
0
        def _create_model():
            print('Creating model')
            # load the pretrained model, without the classification (top) layers
            if self.transfer_model == 'Xception':
                base_model = tf.keras.applications.Xception(
                    weights='imagenet',
                    include_top=False,
                    input_shape=(*self.target_size, 3))
                based_model_last_block = 116  # last block 126, two blocks 116
            elif self.transfer_model == 'Inception_Resnet':
                base_model = tf.keras.applications.InceptionResNetV2(
                    weights='imagenet',
                    include_top=False,
                    input_shape=(*self.target_size, 3))
                based_model_last_block = 287  # last block 630, two blocks 287
            elif self.transfer_model == 'Resnet':
                base_model = tf.keras.applications.ResNet50(
                    weights='imagenet',
                    include_top=False,
                    input_shape=(*self.target_size, 3))
                based_model_last_block = 155  # last block 165, two blocks 155
            elif self.transfer_model == 'B0':
                base_model = EfficientNetB0(weights='imagenet',
                                            include_top=False,
                                            input_shape=(*self.target_size, 3))
                based_model_last_block = 213  # last block 229, two blocks 213
            elif self.transfer_model == 'B3':
                base_model = EfficientNetB3(weights='imagenet',
                                            include_top=False,
                                            input_shape=(*self.target_size, 3))
                based_model_last_block = 354  # last block 370, two blocks 354
            elif self.transfer_model == 'B5':
                base_model = EfficientNetB5(weights='imagenet',
                                            include_top=False,
                                            input_shape=(*self.target_size, 3))
                based_model_last_block = 417  # last block 559, two blocks 417
            else:
                base_model = tf.keras.applications.InceptionV3(
                    weights='imagenet',
                    include_top=False,
                    input_shape=(*self.target_size, 3))
                based_model_last_block = 249  # last block 280, two blocks 249

            # Set only the top layers as trainable (if we want to do fine-tuning,
            # we can train the base layers as a second step)
            base_model.trainable = False

            # Target size infered from the base model
            self.target_size = base_model.input_shape[1:3]

            # Add the classification layers using Keras functional API
            x = base_model.output
            x = tf.keras.layers.GlobalAveragePooling2D()(x)
            # Hidden layer for classification
            if hidden_size == 0:
                x = tf.keras.layers.Dropout(rate=dropout)(x)
            elif bn_after_ac:
                x = tf.keras.layers.Dense(
                    hidden_size,
                    activation=activation,
                    kernel_regularizer=tf.keras.regularizers.l2(
                        l=l2_lambda))(x)
                x = tf.keras.layers.BatchNormalization()(x)
                x = tf.keras.layers.Dropout(rate=dropout)(x)
            else:
                x = tf.keras.layers.Dense(
                    hidden_size,
                    use_bias=False,
                    kernel_regularizer=tf.keras.regularizers.l2(
                        l=l2_lambda))(x)
                # scale: When the next layer is linear (also e.g. nn.relu), this can be disabled since the scaling can be done by the next layer.
                x = tf.keras.layers.BatchNormalization(
                    scale=activation != 'relu')(x)
                x = tf.keras.layers.Activation(activation=activation)(x)
                x = tf.keras.layers.Dropout(rate=dropout)(x)

            predictions = tf.keras.layers.Dense(len(self.categories),
                                                activation='softmax')(
                                                    x)  # Output layer
            # Define the optimizer and the loss and the optimizer
            loss = 'sparse_categorical_crossentropy'
            metrics = ['sparse_categorical_accuracy']
            optimizer = tf.keras.optimizers.Adam(lr=learning_rate)

            return tf.keras.Model(
                inputs=base_model.input, outputs=predictions
            ), base_model, based_model_last_block, loss, metrics, optimizer
            'beta_regularizer': regularizers.serialize(self.beta_regularizer),
            'gamma_regularizer':
            regularizers.serialize(self.gamma_regularizer),
            'beta_constraint': constraints.serialize(self.beta_constraint),
            'gamma_constraint': constraints.serialize(self.gamma_constraint)
        }
        base_config = super(GroupNormalization, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    def compute_output_shape(self, input_shape):
        return input_shape


# Load in EfficientNetB5
effnet = EfficientNetB5(weights=None,
                        include_top=False,
                        input_shape=(IMG_WIDTH, IMG_HEIGHT, CHANNELS))
effnet.load_weights(
    '../input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5'
)
# Replace all Batch Normalization layers by Group Normalization layers
for i, layer in enumerate(effnet.layers):
    if "batch_normalization" in layer.name:
        effnet.layers[i] = GroupNormalization(groups=32,
                                              axis=-1,
                                              epsilon=0.00001)


def build_model():
    """
    A custom implementation of EfficientNetB5
                (0.8, 1.2))),  # 25%的图像对比度随机变为0.8或1.2倍
            iaa.Sometimes(0.25, iaa.Multiply(
                (0.8, 1.2)))  # 25%的图片像素值乘以0.8-1.2中间的数值,用以增加图片明亮度或改变颜色
        ])
        imgs_aug0 = data_preprocessor1.augment_images(imgs)
        imgs_aug1 = imgs_aug0[:, 140:-140, 140:-140, :]
        imgs_aug2 = data_preprocessor2.augment_images(imgs_aug1)
        plt.imshow(np.squeeze(imgs_aug2, 0))
        plt.show()
        yield imgs_aug2.astype(np.float32) - x_mean, np_utils.to_categorical(
            labels, num_classes=50)


# datagen(train_path, batch_size, x_mean)

model = EfficientNetB5(weights='imagenet')
model.layers.pop()
model.layers.pop()
x = model.layers[-1].output
out = Dense(50, activation=None)(x)
out = Activation('softmax')(out)
model = Model(inputs=model.input, outputs=out)
# plot_model(model, to_file='model.png',show_shapes=True)

sgd = optimizers.SGD(lr=0.0001, decay=0, momentum=0.9)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=[metrics.mae, metrics.categorical_accuracy])

filepath = "weights-50classes-alien-{epoch:02d}.h5"
    x_train3[i] = cv2.cvtColor(s, cv2.COLOR_GRAY2RGB)

# converted to RGB space for testing data
test3 = np.full((np.size(test, 0), imag_h, imag_w, 3), 0.0)
for i, s in enumerate(test):
    test3[i] = cv2.cvtColor(s, cv2.COLOR_GRAY2RGB)

# one-hot encoding
num_cls = 5
Y_train = np_utils.to_categorical(y_train, num_cls)
Y_test = np_utils.to_categorical(y_test, num_cls)
#------------------------------------end of importing data-----------------------------#

#------------------------------------start building model-----------------------------#
model = EfficientNetB5(weights=None,
                       input_shape=(imag_h, imag_w, 3),
                       include_top=False)

#可控制那些layer可train那些不行(Enet is a pre-trained model)
# for layer in model.layers:
#     layer.trainable=False

ENet_out = model.output
ENet_out = MaxPooling2D(pool_size=(2, 2))(ENet_out)
ENet_out = Flatten()(ENet_out)

Hidden1_in = Dense(1024, activation="relu")(ENet_out)
Hidden1_in = Dropout(0.5)(Hidden1_in)
# predictions = Dense(units = 1, activation="sigmoid")(Hidden1_in)
predictions = Dense(units=num_cls, activation="softmax")(Hidden1_in)
model_f = Model(input=model.input, output=predictions)
Exemplo n.º 10
0
            iaa.Resize({
                "height": 456,
                "width": 456
            }),
            iaa.Sometimes(0.25, iaa.ContrastNormalization(
                (0.8, 1.2))),  # 25%的图像对比度随机变为0.8或1.2倍
            iaa.Sometimes(0.25, iaa.Multiply(
                (0.8, 1.2)))  # 25%的图片像素值乘以0.8-1.2中间的数值,用以增加图片明亮度或改变颜色
        ])
        imgs_aug0 = data_preprocessor1.augment_images(imgs)
        imgs_aug1 = imgs_aug0[:, 140:-140, 140:-140, :]
        imgs_aug2 = data_preprocessor2.augment_images(imgs_aug1)
        yield imgs_aug2.astype(np.float32) - x_mean, labels


model = EfficientNetB5(weights='imagenet', classes=1000)
model.layers.pop()
model.layers.pop()
x = model.layers[-1].output
out = Dense(emb_size, activation='softmax')
model = Model(inputs=model.input, output=out)
# plot_model(model, to_file='model.png',show_shapes=True)

# sgd = optimizers.SGD(lr=0.0001, decay=0, momentum=0.9)
model.compile(loss=batch_all_triplet_loss,
              optimizer='adam',
              metrics=[triplet_accuracy, mean_norm])

filepath = "weights-50classes-alien-{epoch:02d}.h5"

callbacks_list = []