Exemple #1
0
def create_model():

    base_model = efn.EfficientNetB4(weights="imagenet",
                                    include_top=False,
                                    input_shape=(512, 512, 3),
                                    pooling='avg')
    x = base_model.output
    x = Dense(1, activation="relu")(x)
    model = Model(inputs=base_model.input, outputs=x)
    model.load_weights('./Experiments/PH_1/EX_55/EX_55.hdf5')
    #    model.save_weights("tmp.h5")

    #     first: train only the top layers (which were randomly initialized)
    #     i.e. freeze all convolutional InceptionV3 layers
    #    for layer in base_model.layers:
    #        layer.trainable = False

    #    regularizer = l2(l2=0.005)
    #    for layer in model.layers:
    #        #'kernel_regularizer'
    #        for attr in ['bias_regularizer']:
    #            if hasattr(layer, attr) and layer.trainable:
    #                setattr(layer, attr, regularizer)
    #
    #
    #    out = model_from_json(model.to_json())
    #    out.load_weights("tmp.h5", by_name=True)
    return model
def get_model(model='b2', shape=(320,320)):
    K.clear_session()
    h,w = shape
    if model == 'b0':
        base_model = efn.EfficientNetB0(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b1':
        base_model = efn.EfficientNetB1(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b2':
        base_model = efn.EfficientNetB2(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b3':
        base_model =  efn.EfficientNetB3(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b4':
        base_model =  efn.EfficientNetB4(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b5':
        base_model =  efn.EfficientNetB5(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b6':
        base_model =  efn.EfficientNetB6(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    else:
        base_model =  efn.EfficientNetB7(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))


    x = base_model.output
    y_pred = Dense(4, activation='sigmoid')(x)
    return Model(inputs=base_model.input, outputs=y_pred)
Exemple #3
0
def get_efficientnet_model(
    model_name='efficientnetb0',
    input_shape=(224, 224, 3),
    input_tensor=None,
    include_top=True,
    classes=1000,
    weights='imagenet',
):

    layer_names = [
        'block3a_expand_activation',  #C2
        'block4a_expand_activation',  #C3
        'block6a_expand_activation',  #C4
        'top_activation'  #C5
    ]

    Args = {
        'input_shape': input_shape,
        'weights': weights,
        'include_top': include_top,
        'input_tensor': input_tensor
    }

    if model_name == 'efficientnetb0':
        backbone = efn.EfficientNetB0(**Args)

    elif model_name == 'efficientnetb1':
        backbone = efn.EfficientNetB1(**Args)

    elif model_name == 'efficientnetb2':
        backbone = efn.EfficientNetB2(**Args)

    elif model_name == 'efficientnetb3':
        backbone = efn.EfficientNetB3(**Args)

    elif model_name == 'efficientnetb4':
        backbone = efn.EfficientNetB4(**Args)

    elif model_name == 'efficientnetb5':
        backbone = efn.EfficientNetB5(**Args)

    elif model_name == 'efficientnetb6':
        backbone = efn.EfficientNetB6(**Args)

    elif model_name == 'efficientnetb7':
        backbone = efn.EfficientNetB7(**Args)

    else:
        raise ValueError('No such model {}'.format(model_name))

    several_layers = []

    several_layers.append(backbone.get_layer(layer_names[0]).output)
    several_layers.append(backbone.get_layer(layer_names[1]).output)
    several_layers.append(backbone.get_layer(layer_names[2]).output)
    several_layers.append(backbone.get_layer(layer_names[3]).output)

    model = keras.models.Model(inputs=[backbone.input], outputs=several_layers)
    return model
def create_model():
    K.clear_session()
    
    base_model =  efn.EfficientNetB4(weights = 'imagenet', include_top = False, pooling = 'avg', input_shape = input_image_shape)
    x = base_model.output
    x = Dropout(0.2)(x)
    y_pred = Dense(6, activation = 'sigmoid')(x)

    return Model(inputs = base_model.input, outputs = y_pred)
def effnet_retinanet(num_classes, backbone='EfficientNetB0', inputs=None, modifier=None, **kwargs):
    """ Constructs a retinanet model using a resnet backbone.

    Args
        num_classes: Number of classes to predict.
        backbone: Which backbone to use (one of ('resnet50', 'resnet101', 'resnet152')).
        inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
        modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).

    Returns
        RetinaNet model with a ResNet backbone.
    """
    # choose default input
    if inputs is None:
        if keras.backend.image_data_format() == 'channels_first':
            inputs = keras.layers.Input(shape=(3, None, None))
        else:
            # inputs = keras.layers.Input(shape=(224, 224, 3))
            inputs = keras.layers.Input(shape=(None, None, 3))

    # get last conv layer from the end of each block [28x28, 14x14, 7x7]
    if backbone == 'EfficientNetB0':
        model = efn.EfficientNetB0(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB1':
        model = efn.EfficientNetB1(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB2':
        model = efn.EfficientNetB2(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB3':
        model = efn.EfficientNetB3(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB4':
        model = efn.EfficientNetB4(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB5':
        model = efn.EfficientNetB5(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB6':
        model = efn.EfficientNetB6(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB7':
        model = efn.EfficientNetB7(input_tensor=inputs, include_top=False, weights=None)
    else:
        raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone))

    layer_outputs = ['block4a_expand_activation', 'block6a_expand_activation', 'top_activation']

    layer_outputs = [
        model.get_layer(name=layer_outputs[0]).output,  # 28x28
        model.get_layer(name=layer_outputs[1]).output,  # 14x14
        model.get_layer(name=layer_outputs[2]).output,  # 7x7
    ]
    # create the densenet backbone
    model = keras.models.Model(inputs=inputs, outputs=layer_outputs, name=model.name)

    # invoke modifier if given
    if modifier:
        model = modifier(model)

    # create the full model
    return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=model.outputs, **kwargs)
Exemple #6
0
def create_model(path):
    
    base_model = efn.EfficientNetB4(weights=None,
                        include_top=False,
                        input_shape=(512, 512, 3), pooling='avg')
##    base_model = InceptionV3(weights=None , include_top=False, input_tensor=None, input_shape=(299, 299, 3), pooling='avg') 
    x = base_model.output
    x = Dense(1, activation="relu")(x)
    model = Model(inputs = base_model.input, outputs = x)
    model.summary()
    model.load_weights(path)
    return model
Exemple #7
0
def frozen_efficientnet(input_size,
                        n_classes,
                        local_weights="/efficientnet/EfficientNetB4.h5"):
    if local_weights and path.exists(local_weights):
        print(f'Using {local_weights} as local weights.')
        model_ = efn.EfficientNetB4(include_top=False,
                                    input_tensor=Input(shape=input_size),
                                    weights=local_weights)
    else:
        print(
            f'Could not find local weights {local_weights} for ResNet. Using remote weights.'
        )
        model_ = efn.EfficientNetB4(include_top=False,
                                    input_tensor=Input(shape=input_size))
    for layer in model_.layers:
        layer.trainable = False
    x = Flatten(input_shape=model_.output_shape[1:])(model_.layers[-1].output)
    x = Dense(n_classes, activation='softmax')(x)
    frozen_model = Model(model_.input, x)

    return frozen_model
Exemple #8
0
def construct_mlp(input_size, num_classes, num_frames,
                  dropout_size=0.5, ef_mode=4, l2_reg=1e-5):
    """
    Construct a MLP model for urban sound tagging.
    Parameters
    ----------
    num_frames
    input_size
    num_classes
    dropout_size
    ef_mode
    l2_reg
    Returns
    -------
    model
    """

    # Add hidden layers
    from keras.layers import Flatten, Conv1D, Conv2D, GlobalMaxPooling1D, GlobalAveragePooling1D, LSTM, Concatenate, GlobalAveragePooling2D, LeakyReLU

    import efficientnet.keras as efn

    if ef_mode == 0:
        base_model = efn.EfficientNetB0(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 1:
        base_model = efn.EfficientNetB1(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 2:
        base_model = efn.EfficientNetB2(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 3:
        base_model = efn.EfficientNetB3(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 4:
        base_model = efn.EfficientNetB4(weights='noisy-student', include_top=False, pooling='avg')  #imagenet or weights='noisy-student'
    elif ef_mode == 5:
        base_model = efn.EfficientNetB5(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 6:
        base_model = efn.EfficientNetB6(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 7:
        base_model = efn.EfficientNetB7(weights='noisy-student', include_top=False, pooling='avg')

    input1 = Input(shape=input_size, dtype='float32', name='input')
    input2 = Input(shape=(num_frames,85), dtype='float32', name='input2') #1621
    y = TimeDistributed(base_model)(input1)
    y = TimeDistributed(Dropout(dropout_size))(y)
    y = Concatenate()([y, input2])
    y = TimeDistributed(Dense(num_classes, activation='sigmoid', kernel_regularizer=regularizers.l2(l2_reg)))(y)
    y = AutoPool1D(axis=1, name='output')(y)

    m = Model(inputs=[input1, input2], outputs=y)
    m.summary()
    m.name = 'urban_sound_classifier'

    return m
Exemple #9
0
    def getB4Net(self, shape, model_name):
        effnet = efn.EfficientNetB4(weights=None,\
                                include_top=False,\
                                input_shape=shape)
        #effnet.load_weights(self.weight_path + 'efficientnet-b4_imagenet_1000_notop.h5')

        for i, layer in enumerate(effnet.layers):
            effnet.layers[i].name = str(model_name) + "_" + layer.name
            if "batch_normalization" in layer.name:
                effnet.layers[i] = GroupNormalization(groups=self.batch_size,
                                                      axis=-1,
                                                      epsilon=0.00001)
        return effnet
def model_fn(objective, optimizer, metrics):
    base_model = efn.EfficientNetB4(
        include_top=False,
        # base_model = seresnext50(include_top=False,
        # base_model = xception(include_top=False,
        # base_model = densenet201(include_top=False,
        # base_model = inceptionresnetv2(include_top=False,
        input_shape=(input_size, input_size, 3),
        classes=num_classes,
        weights='imagenet',
    )
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    predictions = Dense(9, activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=predictions)
    model.compile(loss=objective, optimizer=optimizer, metrics=metrics)
    model.summary()
    return model
def get_model(EfficientNet):
    K.clear_session()
    dic = {
        'EfficientNetB2':
        efn.EfficientNetB2(weights='imagenet',
                           include_top=False,
                           pooling='avg',
                           input_shape=(260, 260, 3)),
        'EFficientNetB4':
        efn.EfficientNetB4(weights='imagenet',
                           include_top=False,
                           pooling='avg',
                           input_shape=(260, 260, 3))
    }
    base_model = dic[EfficientNet]
    x = base_model.output
    y_pred = Dense(4, activation='sigmoid')(x)
    return Model(inputs=base_model.input, outputs=y_pred)
Exemple #12
0
def get_model_effnet(img_shape, img_input, weights, effnet_version):

    if effnet_version == 'B0':
        effnet = efn.EfficientNetB0(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B1':
        effnet = efn.EfficientNetB1(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B2':
        effnet = efn.EfficientNetB2(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B3':
        effnet = efn.EfficientNetB3(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B4':
        effnet = efn.EfficientNetB4(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B5':
        effnet = efn.EfficientNetB5(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B6':
        effnet = efn.EfficientNetB6(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    else:
        effnet = efn.EfficientNetB7(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)

    return effnet
Exemple #13
0
 def create_model(self,
                  total_model_weight_path=None,
                  base_model_weight_path=None,
                  bottleneck_weight_path=None,
                  input_layer=None):
     model = en.EfficientNetB4(include_top=False,
                               weights=None,
                               input_tensor=None,
                               input_shape=(self.size[0], self.size[1], 3))
     conf = model.layers[-3].get_config()
     conf['filters'] = 64
     x = model.layers[-4].output
     x = Conv2D.from_config(conf)(x)
     x = BatchNormalization(axis=3)(x)
     x = Activation(activation='swish')(x)
     x = GlobalMaxPooling2D()(x)
     x = Dense(64, activation='relu')(x)
     #x = Dense(256,activation='relu')(x)
     predictions = Dense(1, activation='sigmoid')(x)
     model = Model(model.input, predictions)
     #return model
     '''
     l = model.layers[:]
     l.append(Conv2D.from_config(conf))
     l.append(BatchNormalization(axis=3))
     l.append(Activation(activation='swish'))
     l.append(GlobalMaxPooling2D())
     l.append(Dense(64,activation='relu'))
     l.append(Dense(1, activation='sigmoid'))
     '''
     if total_model_weight_path:
         print("Load total weight from ", total_model_weight_path)
         model.load_weights(total_model_weight_path)
     model.summary()
     self.multi_gpus = True
     self.single_model = model
     self.model = multi_gpu_model(model, gpus=2)
     self.model.compile(loss=focal_loss(alpha=0.5, gamma=2),
                        optimizer=optimizers.Adam(lr=1e-3),
                        metrics=['accuracy'])
def EfficientUNet(input_shape):
    backbone = efn.EfficientNetB4(weights=None,
                                  include_top=False,
                                  input_shape=input_shape)

    input = backbone.input
    x00 = backbone.input  # (256, 512, 3)
    x10 = backbone.get_layer('stem_activation').output  # (128, 256, 4)
    x20 = backbone.get_layer('block2d_add').output  # (64, 128, 32)
    x30 = backbone.get_layer('block3d_add').output  # (32, 64, 56)
    x40 = backbone.get_layer('block5f_add').output  # (16, 32, 160)
    x50 = backbone.get_layer('block7b_add').output  # (8, 16, 448)

    x01 = H([x00, U(x10)], 'X01')
    x11 = H([x10, U(x20)], 'X11')
    x21 = H([x20, U(x30)], 'X21')
    x31 = H([x30, U(x40)], 'X31')
    x41 = H([x40, U(x50)], 'X41')

    x02 = H([x00, x01, U(x11)], 'X02')
    x12 = H([x11, U(x21)], 'X12')
    x22 = H([x21, U(x31)], 'X22')
    x32 = H([x31, U(x41)], 'X32')

    x03 = H([x00, x01, x02, U(x12)], 'X03')
    x13 = H([x12, U(x22)], 'X13')
    x23 = H([x22, U(x32)], 'X23')

    x04 = H([x00, x01, x02, x03, U(x13)], 'X04')
    x14 = H([x13, U(x23)], 'X14')

    x05 = H([x00, x01, x02, x03, x04, U(x14)], 'X05')

    x_out = Concatenate(name='bridge')([x01, x02, x03, x04, x05])
    x_out = Conv2D(4, (3, 3),
                   padding="same",
                   name='final_output',
                   activation="sigmoid")(x_out)

    return Model(inputs=input, outputs=x_out)
Exemple #15
0
def create_base_model(base_model_name, pretrained=True, IMAGE_SIZE=[300, 300]):
    if pretrained is False:
        weights = None
    else:
        weights = "imagenet"
    if base_model_name == 'B0':
        base = efn.EfficientNetB0(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B1':
        base = efn.EfficientNetB1(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B2':
        base = efn.EfficientNetB2(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B3':
        base = efn.EfficientNetB3(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B4':
        base = efn.EfficientNetB4(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B5':
        base = efn.EfficientNetB5(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B6':
        base = efn.EfficientNetB6(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B7':
        base = efn.EfficientNetB7(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    base = remove_dropout(base)
    base.trainable = True
    return base
Exemple #16
0
def EfficientNet_(input_shapes, num_classes):
    efficient_net = efn.EfficientNetB4(include_top=False, weights="imagenet")
    # x = efficient_net.output

    image = Input((IMSIZE[0], IMSIZE[1], 3), name='RGB')
    x = efficient_net(image).output
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.5)(x)
    x = Dense(512,
              activation='relu',
              kernel_initializer=TruncatedNormal(stddev=0.01))(x)
    x = Dropout(0.5)(x)
    x = Dense(128,
              activation='relu',
              kernel_initializer=TruncatedNormal(stddev=0.01))(x)
    x = Dropout(0.5)(x)
    x = Dense(32,
              activation='relu',
              kernel_initializer=TruncatedNormal(stddev=0.01))(x)
    pred = Dense(num_classes, activation='softmax')(x)
    model = Model(inputs=efficient_net.input, outputs=pred)
    return model
def model_fn(objective, optimizer, metrics):
    base_model = efn.EfficientNetB4(
        include_top=False,
        # base_model = seresnext50(include_top=False,
        # base_model = xception(include_top=False,
        # base_model = densenet201(include_top=False,
        # base_model = inceptionresnetv2(include_top=False,
        input_shape=(input_size, input_size, 3),
        classes=num_classes,
        weights='imagenet',
    )
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    predictions = Dense(num_classes, activation='softmax')(x)
    model1 = Model(inputs=base_model.input, outputs=predictions)
    #     model2 = multi_gpu_model(model1, gpus=3)
    #     model2 = model1
    model1.compile(loss=objective, optimizer=optimizer, metrics=metrics)
    lookahead = Lookahead(k=5, alpha=0.5)  # Initialize Lookahead
    lookahead.inject(model1)  # add into model
    model1.summary()
    return model1
Exemple #18
0
def model_fn(FLAGS, objective, optimizer, metrics):

    # base_model = efn.EfficientNetB3(include_top=False,
    #                                 input_shape=(FLAGS.input_size, FLAGS.input_size, 3),
    #                                 classes=FLAGS.num_classes, )
    # # input_size =  380
    model = efn.EfficientNetB4(
        include_top=False,
        input_shape=(FLAGS.input_size, FLAGS.input_size, 3),
        classes=FLAGS.num_classes,
    )
    # # input_size =  456
    # base_model = efn.EfficientNetB5(include_top=False,
    #                                 input_shape=(FLAGS.input_size, FLAGS.input_size, 3),
    #                                 classes=FLAGS.num_classes, )

    # # input_size =  528
    # base_model = efn.EfficientNetB6(include_top=False,
    #                                 input_shape=(FLAGS.input_size, FLAGS.input_size, 3),
    #                                 classes=FLAGS.num_classes, )

    # for i, layer in enumerate(model.layers):
    #     if "batch_normalization" in layer.name:
    #         model.layers[i] = GroupNormalization(groups=32, axis=-1, epsilon=0.00001)
    x = model.output

    # 插入双线性池化操作
    # x = bilinear_pooling(x)

    # x = GlobalAveragePooling2D()(x)
    # x = Dropout(0.4)(x)
    predictions = Dense(FLAGS.num_classes, activation='softmax')(
        x)  # activation="linear",activation='softmax'
    model = Model(input=model.input, output=predictions)

    model.compile(loss=objective, optimizer=optimizer, metrics=metrics)
    model.summary()
    return model
Exemple #19
0
            'beta_regularizer': regularizers.serialize(self.beta_regularizer),
            'gamma_regularizer':
            regularizers.serialize(self.gamma_regularizer),
            'beta_constraint': constraints.serialize(self.beta_constraint),
            'gamma_constraint': constraints.serialize(self.gamma_constraint)
        }
        base_config = super(GroupNormalization, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    def compute_output_shape(self, input_shape):
        return input_shape


# Load in EfficientNetB4
effnet = efn.EfficientNetB4(weights='imagenet',
                            include_top=False,
                            input_shape=(size, size, chinnels))

# Replace all Batch Normalization layers by Group Normalization layers
for i, layer in enumerate(effnet.layers):
    if "batch_normalization" in layer.name:
        effnet.layers[i] = GroupNormalization(groups=32,
                                              axis=-1,
                                              epsilon=0.00001)


def build_model():
    """
    A custom implementation of EfficientNetB5
    for the APTOS 2019 competition
    (Regression)
Exemple #20
0
model = None

if b_name == "0":
    model = efn.EfficientNetB0(weights=weights)

if b_name == "1":
    model = efn.EfficientNetB1(weights=weights)

if b_name == "2":
    model = efn.EfficientNetB2(weights=weights)

if b_name == "3":
    model = efn.EfficientNetB3(weights=weights)

if b_name == "4":
    model = efn.EfficientNetB4(weights=weights)

if b_name == "5":
    model = efn.EfficientNetB5(weights=weights)

if b_name == "6":
    model = efn.EfficientNetB6(weights=weights)

if b_name == "7":
    model = efn.EfficientNetB7(weights=weights)

image_size = model.input_shape[1]


def read_image(path):
    try:
Exemple #21
0
def create_model(dim=(256, 256), weights=np.ones(5), split=False):

    f_loss = categorical_focal_loss(alpha=weights)

    IMG_WIDTH, IMG_HEIGHT, CHANNELS = *dim, 3
    input_shape = (IMG_WIDTH, IMG_HEIGHT, CHANNELS)
    elu = keras.layers.ELU(alpha=1.0)

    # create the base pre-trained model
    # Load in EfficientNetB5
    effnet = efn.EfficientNetB4(weights=None,
                                include_top=False,
                                input_shape=(IMG_WIDTH, IMG_HEIGHT, CHANNELS))
    effnet.load_weights(
        '/media/parth/DATA/datasets/aptos_2019/efficientnet-b4_imagenet_1000_notop.h5'
    )

    # Replace all Batch Normalization layers by Group Normalization layers
    for i, layer in enumerate(effnet.layers):
        if "batch_normalization" in layer.name:
            effnet.layers[i] = GroupNormalization(groups=32,
                                                  axis=-1,
                                                  epsilon=0.00001)

    if split == True:

        input1 = Input(input_shape)
        input2 = Input(input_shape)
        input3 = Input(input_shape)
        input4 = Input(input_shape)
        conv1 = Conv2D(16, 3, padding='same')(input1)
        conv2 = Conv2D(16, 3, padding='same')(input2)
        conv3 = Conv2D(16, 3, padding='same')(input3)
        conv4 = Conv2D(16, 3, padding='same')(input4)
        concat = concatenate([conv1, conv2, conv3, conv4])
        enet_input = Conv2D(3, 3, padding='same')(concat)
        x = effnet(enet_input)
        x = GlobalAveragePooling2D()(x)
        x = Dense(256)(x)
        x = Dropout(0.25)(x)
        x = Dense(5)(x)
        predictions = Softmax()(x)

        model = Model(inputs=[input1, input2, input3, input4],
                      outputs=predictions)
        model.compile(loss=f_loss,
                      optimizer=RAdam(learning_rate=0.00005),
                      metrics=[f_loss, 'acc'])
        print(model.summary())

        return model

    else:

        x = effnet.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(256)(x)
        x = Dropout(0.25)(x)
        x = Dense(5)(x)
        predictions = Softmax()(x)

        model = Model(inputs=effnet.input, outputs=predictions)
        model.compile(loss=f_loss,
                      optimizer=RAdam(lr=0.00005),
                      metrics=[f_loss, 'acc'])
        print(model.summary())

        return model
Exemple #22
0
def get_backbone(name):
    """ Chooses a backbone/ base network.

        Args:
            name: the name of the base network.

        Returns:
            backbone: the Keras model of the chosen network.
    """
    if name == 'EfficientNetB0':
        backbone = efn.EfficientNetB0(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB1':
        backbone = efn.EfficientNetB1(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB2':
        backbone = efn.EfficientNetB2(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB3':
        backbone = efn.EfficientNetB3(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB4':
        backbone = efn.EfficientNetB4(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB5':
        backbone = efn.EfficientNetB5(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB6':
        backbone = efn.EfficientNetB6(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB7':
        backbone = efn.EfficientNetB7(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'VGG16':
        backbone = VGG16(weights=c.WEIGHTS,
                         include_top=c.INCLUDE_TOP,
                         input_shape=c.INPUT_SHAPE,
                         pooling=c.POOLING)
    elif name == 'ResNet50':
        backbone = ResNet50(include_top=c.INCLUDE_TOP,
                            weights=c.WEIGHTS,
                            input_shape=c.INPUT_SHAPE,
                            pooling=c.POOLING)
    elif name == 'InceptionV3':
        backbone = InceptionV3(include_top=c.INCLUDE_TOP,
                               weights=c.WEIGHTS,
                               input_shape=c.INPUT_SHAPE,
                               pooling=c.POOLING)
    elif name == 'DenseNet201':
        backbone = DenseNet201(weights=c.WEIGHTS,
                               include_top=c.INCLUDE_TOP,
                               input_shape=c.INPUT_SHAPE,
                               pooling=c.POOLING)
    else:
        backbone = None
    try:
        backbone.trainable = True
        return backbone
    except Exception as e:
        print(str(e))
                                            subset='validation',
                                            target_size=(imgSize, imgSize),
                                            batch_size=batch_size,
                                            class_mode='categorical',
                                            shuffle=True)
nb_train_samples = len(train_generator.filenames)
nb_validation_samples = len(val_generator.filenames)
CLASS_COUNT = len(train_generator.class_indices)
train_labels = train_generator.classes
train_labels = to_categorical(train_labels, num_classes=CLASS_COUNT)

#model
print('Generating training model...')
# base_model = ResNet50(weights='imagenet',include_top=False,input_shape=(224, 224, 3),pooling='max')
base_model = efn.EfficientNetB4(weights='imagenet',
                                include_top=False,
                                pooling='avg')
base_model.trainable = False
model = Sequential([
    base_model,
    # Dense(256, activation='relu'),
    Dense(128, activation='relu'),
    Dense(CLASS_COUNT, activation='softmax'),
])
model.summary()
model.compile(loss='categorical_crossentropy',
              optimizer=Adam(lr=0.0001, decay=1e-6),
              metrics=['accuracy'])

#training data...
print('Training model...')
Exemple #24
0
    callbacks=[densenet_time, densenet_stopping, densenet_checkpoint]
)


# ### EfficientNet

# EfficientNet is a lightweight CNN architecture that is designed to require significantly less compute than other state of the art architectures on common transfer learning datasets.

# Pre-built EfficientNet models built in Keras are used from the efficientnet library available on [GitHub](https://github.com/qubvel/efficientnet) and installable via PyPI.

# In[24]:


efficientnet_base = efn.EfficientNetB4(
    include_top=False,
    weights='imagenet',
    input_shape=(224, 224, 3),
    pooling='avg'
)
output = Dense(15, activation='sigmoid')(efficientnet_base.output)

efficientnet = Model(input=efficientnet_base.input, outputs=output)


# In[25]:


efficientnet.summary()


# In[26]:
Exemple #25
0
def model(input_form="all", aux_size=0, hyperparameters=dict()):
    print("using the following hyperparameters: {}".format(hyperparameters))

    if input_form == "features":
        return features_model(aux_size, hyperparameters)

    parameters = INPUT_FORM_PARAMETERS[input_form]

    inputs = list()
    outputs = list()

    #retreiving the hyperparameters
    DROPOUT = hyperparameters.get("dropout", 0.5)
    OPTIMIZER = hyperparameters.get("optimizer", "sgd-0001-0.9")
    DEEP_DENSE_TOP = hyperparameters.get("deep-dense-top", True)
    CONVNET_FREEZE_PERCENT = hyperparameters.get("convnet-freeze-percent", 0.0)

    #skip for now
    '''
    if parameters["t2"]:
        convnet = applications.ResNet50(
            weights="imagenet",
            include_top=False,
            input_shape=(config.IMAGE_SIZE, config.IMAGE_SIZE, 3),
        )
        for layer in convnet.layers:
            layer.name = "{}_t2".format(layer.name)
        apply_layer_freeze(convnet, CONVNET_FREEZE_PERCENT)
        out = convnet.output
        out = Flatten()(out)
        inputs.append(convnet.input)
        outputs.append(out)
    '''
    if parameters["t1"]:
        # init ResNet
        convnet = efn.EfficientNetB4(
            weights="imagenet",
            include_top=False,
            input_shape=(config.IMAGE_SIZE, config.IMAGE_SIZE, 3),
        )
        apply_layer_freeze(convnet, CONVNET_FREEZE_PERCENT)
        out = convnet.output
        out = Flatten()(out)
        inputs.append(convnet.input)
        outputs.append(out)

    if len(outputs) > 1:
        out = concatenate(outputs)
    else:
        out = outputs[0]

    out = Dense(256,
                activation="relu",
                kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(out)
    out = BatchNormalization()(out)

    if DEEP_DENSE_TOP:
        out = Dropout(DROPOUT)(out)
        out = Dense(128,
                    activation="relu",
                    kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(out)
        out = BatchNormalization()(out)
        out = Dropout(DROPOUT)(out)
        out = Dense(64,
                    activation="relu",
                    kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(out)
        out = BatchNormalization()(out)
        out = Dropout(DROPOUT)(out)
        out = Dense(32,
                    activation="relu",
                    kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(out)
        out = BatchNormalization()(out)
        out = Dropout(DROPOUT)(out)

    if parameters["features"]:
        aux_input = Input(shape=(aux_size, ), name='aux_input')
        inputs.append(aux_input)
        out = concatenate([out, aux_input])

    out = Dense(16,
                activation="relu",
                kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(out)
    out = BatchNormalization()(out)
    predictions = Dense(1,
                        activation="sigmoid",
                        kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(out)

    # creating the final model
    if len(inputs) > 1:
        model = Model(inputs=inputs, outputs=predictions)
    else:
        model = Model(inputs=inputs[0], outputs=predictions)

    # compile the model
    model.compile(loss="binary_crossentropy",
                  optimizer=OPTIMIZERS[OPTIMIZER](),
                  metrics=["accuracy"])

    return model
Exemple #26
0
import matplotlib.pyplot as plt
import random
from pylab import rcParams
import os
import math

policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
tf.keras.regularizers.l2(l2=0.01)

datagen = ImageDataGenerator(rescale=1. / 255, horizontal_flip=True)
train_csv = pd.read_csv(r"/content/train.csv")
train_csv["label"] = train_csv["label"].astype(str)

base_model = efn.EfficientNetB4(weights='noisy-student',
                                input_shape=(512, 512, 3),
                                include_top=True)

train = train_csv.iloc[:int(len(train_csv) * 0.9), :]
test = train_csv.iloc[int(len(train_csv) * 0.9):, :]
print((len(train), len(test)))
base_model.trainable = True

fold_number = 0

n_splits = 5
oof_accuracy = []

first_decay_steps = 500
lr = (tf.keras.experimental.CosineDecayRestarts(0.04, first_decay_steps))
opt = tf.keras.optimizers.SGD(lr, momentum=0.9)
Exemple #27
0
import os
import numpy as np
import efficientnet.keras as efn

from skimage.io import imread
from efficientnet.keras import center_crop_and_resize, preprocess_input
from keras.applications.imagenet_utils import decode_predictions

model = efn.EfficientNetB4(weights='noisy-student')

image_size = model.input_shape[1]


def read_image(path):
    try:
        return preprocess_input(
            center_crop_and_resize(imread(path)[:, :, :3],
                                   image_size=image_size))
    except:
        return None


from time import time


def predictor(in_paths=[], batch_size=2):
    in_images = [read_image(in_path) for in_path in in_paths]

    bad_indices = {
        i
        for i, in_image in enumerate(in_images) if in_image is None
Exemple #28
0
def build_model(input_shape, args):
    D = args.d
    F = args.f
    V = args.v

    input_tensor = Input(shape=input_shape)

    if args.tf == "in":
        base_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = in_pi
    elif args.tf == "inr":
        base_model = InceptionResNetV2(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = inr_pi
    elif args.tf == "vg":
        base_model = VGG16(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = vg_pi
    elif args.tf == "xc":
        base_model = Xception(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = xc_pi
    elif args.tf == "re":
        base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = re_pi
    elif args.tf == "de":
        base_model = DenseNet121(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = de_pi
    elif args.tf == "mo":
        base_model = MobileNet(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = mo_pi
    elif args.tf.find("ef") > -1:
        if args.tf == "ef0":
            base_model = efn.EfficientNetB0(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef1":
            base_model = efn.EfficientNetB1(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef2":
            base_model = efn.EfficientNetB2(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef3":
            base_model = efn.EfficientNetB3(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef4":
            base_model = efn.EfficientNetB4(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef5":
            base_model = efn.EfficientNetB5(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef6":
            base_model = efn.EfficientNetB6(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef7":
            base_model = efn.EfficientNetB7(weights='imagenet', include_top=False, input_tensor=input_tensor)
    else:
        print("unknown network type:", args.tf)
        exit()

    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(F, activation='relu')(x)
    if D > 0:
        x = Dropout(D)(x)
 
    pred = Dense(nb_classes, activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=pred)

    layer_num = len(base_model.layers)
    for layer in base_model.layers[:int(layer_num * V)]:
        layer.trainable = False

    return model #, pi