def get_model(model='b2', shape=(320,320)):
    K.clear_session()
    h,w = shape
    if model == 'b0':
        base_model = efn.EfficientNetB0(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b1':
        base_model = efn.EfficientNetB1(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b2':
        base_model = efn.EfficientNetB2(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b3':
        base_model =  efn.EfficientNetB3(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b4':
        base_model =  efn.EfficientNetB4(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b5':
        base_model =  efn.EfficientNetB5(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b6':
        base_model =  efn.EfficientNetB6(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    else:
        base_model =  efn.EfficientNetB7(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))


    x = base_model.output
    y_pred = Dense(4, activation='sigmoid')(x)
    return Model(inputs=base_model.input, outputs=y_pred)
    def load_model(self):
        FACTOR = 0.70
        HEIGHT = 137
        WIDTH = 236
        HEIGHT_NEW = int(HEIGHT * FACTOR)
        WIDTH_NEW = int(WIDTH * FACTOR)
        HEIGHT_NEW = 128
        WIDTH_NEW = 128

        # base_model=EfficientNetB3(include_top=False, weights='imagenet',input_shape=(HEIGHT_NEW,WIDTH_NEW,3))
        base_model=efn.EfficientNetB2(include_top=False, weights='imagenet',input_shape=(HEIGHT_NEW,WIDTH_NEW,3))
        # base_model.trainable=False
        x = base_model.output
        x = layers.GlobalAveragePooling2D()(x)
        
        grapheme_root = layers.Dense(168, activation = 'softmax', name = 'root')(x)
        vowel_diacritic = layers.Dense(11, activation = 'softmax', name = 'vowel')(x)
        consonant_diacritic = layers.Dense(7, activation = 'softmax', name = 'consonant')(x)

        model = Model(inputs=base_model.input,outputs = [grapheme_root, vowel_diacritic, consonant_diacritic])
        # for layer in base_model.layers:
        #     layer.trainable = True
        model.compile(optimizer='adam', loss = {'root' : 'categorical_crossentropy', 
                    'vowel' : 'categorical_crossentropy', 
                    'consonant': 'categorical_crossentropy'},
                    loss_weights = {'root' : 0.5,
                            'vowel' : 0.25,
                            'consonant': 0.25},
                    metrics={'root' : 'accuracy', 
                    'vowel' : 'accuracy', 
                    'consonant': 'accuracy'})
        print(model.summary())

        return model
Exemple #3
0
def get_efficientnet_model(
    model_name='efficientnetb0',
    input_shape=(224, 224, 3),
    input_tensor=None,
    include_top=True,
    classes=1000,
    weights='imagenet',
):

    layer_names = [
        'block3a_expand_activation',  #C2
        'block4a_expand_activation',  #C3
        'block6a_expand_activation',  #C4
        'top_activation'  #C5
    ]

    Args = {
        'input_shape': input_shape,
        'weights': weights,
        'include_top': include_top,
        'input_tensor': input_tensor
    }

    if model_name == 'efficientnetb0':
        backbone = efn.EfficientNetB0(**Args)

    elif model_name == 'efficientnetb1':
        backbone = efn.EfficientNetB1(**Args)

    elif model_name == 'efficientnetb2':
        backbone = efn.EfficientNetB2(**Args)

    elif model_name == 'efficientnetb3':
        backbone = efn.EfficientNetB3(**Args)

    elif model_name == 'efficientnetb4':
        backbone = efn.EfficientNetB4(**Args)

    elif model_name == 'efficientnetb5':
        backbone = efn.EfficientNetB5(**Args)

    elif model_name == 'efficientnetb6':
        backbone = efn.EfficientNetB6(**Args)

    elif model_name == 'efficientnetb7':
        backbone = efn.EfficientNetB7(**Args)

    else:
        raise ValueError('No such model {}'.format(model_name))

    several_layers = []

    several_layers.append(backbone.get_layer(layer_names[0]).output)
    several_layers.append(backbone.get_layer(layer_names[1]).output)
    several_layers.append(backbone.get_layer(layer_names[2]).output)
    several_layers.append(backbone.get_layer(layer_names[3]).output)

    model = keras.models.Model(inputs=[backbone.input], outputs=several_layers)
    return model
Exemple #4
0
def get_model():
    K.clear_session()
    base_model = efn.EfficientNetB2(weights='imagenet',
                                    include_top=False,
                                    pooling='avg',
                                    input_shape=(260, 260, 3))
    x = base_model.output
    y_pred = Dense(4, activation='sigmoid')(x)
    return Model(inputs=base_model.input, outputs=y_pred)
def create_model():
    K.clear_session()
    
    base_model =  efn.EfficientNetB2(weights = 'imagenet', include_top = False, pooling = 'avg', input_shape = SHAPE)
    x = base_model.output
    x = Dropout(0.15)(x)
    y_pred = Dense(6, activation = 'sigmoid')(x)

    return Model(inputs = base_model.input, outputs = y_pred)
def effnet_retinanet(num_classes, backbone='EfficientNetB0', inputs=None, modifier=None, **kwargs):
    """ Constructs a retinanet model using a resnet backbone.

    Args
        num_classes: Number of classes to predict.
        backbone: Which backbone to use (one of ('resnet50', 'resnet101', 'resnet152')).
        inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
        modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).

    Returns
        RetinaNet model with a ResNet backbone.
    """
    # choose default input
    if inputs is None:
        if keras.backend.image_data_format() == 'channels_first':
            inputs = keras.layers.Input(shape=(3, None, None))
        else:
            # inputs = keras.layers.Input(shape=(224, 224, 3))
            inputs = keras.layers.Input(shape=(None, None, 3))

    # get last conv layer from the end of each block [28x28, 14x14, 7x7]
    if backbone == 'EfficientNetB0':
        model = efn.EfficientNetB0(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB1':
        model = efn.EfficientNetB1(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB2':
        model = efn.EfficientNetB2(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB3':
        model = efn.EfficientNetB3(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB4':
        model = efn.EfficientNetB4(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB5':
        model = efn.EfficientNetB5(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB6':
        model = efn.EfficientNetB6(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB7':
        model = efn.EfficientNetB7(input_tensor=inputs, include_top=False, weights=None)
    else:
        raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone))

    layer_outputs = ['block4a_expand_activation', 'block6a_expand_activation', 'top_activation']

    layer_outputs = [
        model.get_layer(name=layer_outputs[0]).output,  # 28x28
        model.get_layer(name=layer_outputs[1]).output,  # 14x14
        model.get_layer(name=layer_outputs[2]).output,  # 7x7
    ]
    # create the densenet backbone
    model = keras.models.Model(inputs=inputs, outputs=layer_outputs, name=model.name)

    # invoke modifier if given
    if modifier:
        model = modifier(model)

    # create the full model
    return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=model.outputs, **kwargs)
Exemple #7
0
def construct_mlp(input_size, num_classes, num_frames,
                  dropout_size=0.5, ef_mode=4, l2_reg=1e-5):
    """
    Construct a MLP model for urban sound tagging.
    Parameters
    ----------
    num_frames
    input_size
    num_classes
    dropout_size
    ef_mode
    l2_reg
    Returns
    -------
    model
    """

    # Add hidden layers
    from keras.layers import Flatten, Conv1D, Conv2D, GlobalMaxPooling1D, GlobalAveragePooling1D, LSTM, Concatenate, GlobalAveragePooling2D, LeakyReLU

    import efficientnet.keras as efn

    if ef_mode == 0:
        base_model = efn.EfficientNetB0(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 1:
        base_model = efn.EfficientNetB1(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 2:
        base_model = efn.EfficientNetB2(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 3:
        base_model = efn.EfficientNetB3(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 4:
        base_model = efn.EfficientNetB4(weights='noisy-student', include_top=False, pooling='avg')  #imagenet or weights='noisy-student'
    elif ef_mode == 5:
        base_model = efn.EfficientNetB5(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 6:
        base_model = efn.EfficientNetB6(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 7:
        base_model = efn.EfficientNetB7(weights='noisy-student', include_top=False, pooling='avg')

    input1 = Input(shape=input_size, dtype='float32', name='input')
    input2 = Input(shape=(num_frames,85), dtype='float32', name='input2') #1621
    y = TimeDistributed(base_model)(input1)
    y = TimeDistributed(Dropout(dropout_size))(y)
    y = Concatenate()([y, input2])
    y = TimeDistributed(Dense(num_classes, activation='sigmoid', kernel_regularizer=regularizers.l2(l2_reg)))(y)
    y = AutoPool1D(axis=1, name='output')(y)

    m = Model(inputs=[input1, input2], outputs=y)
    m.summary()
    m.name = 'urban_sound_classifier'

    return m
Exemple #8
0
    def getB2Net(self, shape, model_name):
        effnet = efn.EfficientNetB2(weights=None,\
                                include_top=False,\
                                input_shape=shape)
        #effnet.load_weights(self.weight_path + 'efficientnet-b2_imagenet_1000_notop.h5')

        for i, layer in enumerate(effnet.layers):
            effnet.layers[i].name = str(model_name) + "_" + layer.name
            if "batch_normalization" in layer.name:
                effnet.layers[i] = GroupNormalization(groups=self.batch_size,
                                                      axis=-1,
                                                      epsilon=0.00001)
        return effnet
Exemple #9
0
def create_model(input_shape, n_out):
    # input_tensor = Input(shape=input_shape)
    # base_model = ResNet50(include_top=False,
    #                       weights=None,
    #                       input_tensor=input_tensor)
    # base_model.load_weights('D:/Diabetic_Retinopathy/Resnet50_bestqwk.h5')
    # ResNet18, preprocess_input = Classifiers.get('resnet18')
    # base_model = ResNet18((SIZE, SIZE, 3), weights='imagenet', include_top=False)
    base_model = efn.EfficientNetB2(input_shape=(SIZE, SIZE, 3),
                                    weights='imagenet',
                                    include_top=False)
    x = GlobalAveragePooling2D()(base_model.output)
    x = Dropout(0.5)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)
    final_output = Dense(n_out, activation="softmax", name='final_output')(x)
    # model = Model(input_tensor, final_output)
    model = Model(inputs=[base_model.input], outputs=[final_output])
    return model
Exemple #10
0
def get_model_effnet(img_shape, img_input, weights, effnet_version):

    if effnet_version == 'B0':
        effnet = efn.EfficientNetB0(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B1':
        effnet = efn.EfficientNetB1(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B2':
        effnet = efn.EfficientNetB2(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B3':
        effnet = efn.EfficientNetB3(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B4':
        effnet = efn.EfficientNetB4(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B5':
        effnet = efn.EfficientNetB5(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B6':
        effnet = efn.EfficientNetB6(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    else:
        effnet = efn.EfficientNetB7(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)

    return effnet
Exemple #11
0
def create_base_model(base_model_name, pretrained=True, IMAGE_SIZE=[300, 300]):
    if pretrained is False:
        weights = None
    else:
        weights = "imagenet"
    if base_model_name == 'B0':
        base = efn.EfficientNetB0(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B1':
        base = efn.EfficientNetB1(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B2':
        base = efn.EfficientNetB2(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B3':
        base = efn.EfficientNetB3(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B4':
        base = efn.EfficientNetB4(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B5':
        base = efn.EfficientNetB5(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B6':
        base = efn.EfficientNetB6(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B7':
        base = efn.EfficientNetB7(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    base = remove_dropout(base)
    base.trainable = True
    return base
Exemple #12
0
        return loss

    return focal_loss


def custom_loss(y_true, y_pred):
    ls = 0.1
    classes = 5
    y_true = tf.cast(y_true, dtype=tf.float32)
    y_pred = tf.cast(y_pred, dtype=tf.float32)
    y_true = (1 - ls) * y_pred + ls / classes
    custom_loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred)
    return custom_loss


base_model = efn.EfficientNetB2(weights='noisy-student',
                                input_shape=(512, 512, 3))

base_model.trainable = True

model = tf.keras.Sequential([
    tf.keras.layers.Input((512, 512, 3)),
    tf.keras.layers.BatchNormalization(renorm=True), base_model,
    BatchNormalization(),
    tf.keras.layers.LeakyReLU(),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(256),
    BatchNormalization(),
    tf.keras.layers.LeakyReLU(),
    tf.keras.layers.Dense(128),
    BatchNormalization(),
    tf.keras.layers.LeakyReLU(),
def UEfficientNet(input_shape=(None, None, 3), dropout_rate=0.1):

    backbone = efn.EfficientNetB2(weights=None,
                                  include_top=False,
                                  input_shape=input_shape)
    #     backbone.load_weights("../input/efficientnet-keras-weights-b0b5/efficientnet-b2_imagenet_1000_notop.h5")
    input = backbone.input
    start_neurons = 8

    i = 2
    lr = []
    for l in backbone.layers:
        if l.name == 'block{}a_expand_activation'.format(i):
            lr.append(l)
            i += 1

    conv4 = lr[-1].output
    conv4 = LeakyReLU(alpha=0.1)(conv4)
    pool4 = MaxPooling2D((2, 2))(conv4)
    pool4 = Dropout(dropout_rate)(pool4)

    # Middle
    convm = Conv2D(start_neurons * 32, (3, 3), activation=None,
                   padding="same")(pool4)
    convm = residual_block(convm, start_neurons * 32)
    convm = residual_block(convm, start_neurons * 32)
    convm = LeakyReLU(alpha=0.1)(convm)

    deconv4 = Conv2DTranspose(start_neurons * 16, (3, 3),
                              strides=(2, 2),
                              padding="same")(convm)
    deconv4_up1 = Conv2DTranspose(start_neurons * 16, (3, 3),
                                  strides=(2, 2),
                                  padding="same")(deconv4)
    deconv4_up2 = Conv2DTranspose(start_neurons * 16, (3, 3),
                                  strides=(2, 2),
                                  padding="same")(deconv4_up1)
    deconv4_up3 = Conv2DTranspose(start_neurons * 16, (3, 3),
                                  strides=(2, 2),
                                  padding="same")(deconv4_up2)
    uconv4 = concatenate([deconv4, conv4])
    uconv4 = Dropout(dropout_rate)(uconv4)

    uconv4 = Conv2D(start_neurons * 16, (3, 3),
                    activation=None,
                    padding="same")(uconv4)
    uconv4 = residual_block(uconv4, start_neurons * 16)
    #     uconv4 = residual_block(uconv4,start_neurons * 16)
    uconv4 = LeakyReLU(alpha=0.1)(uconv4)  #conv1_2

    deconv3 = Conv2DTranspose(start_neurons * 8, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv4)
    deconv3_up1 = Conv2DTranspose(start_neurons * 8, (3, 3),
                                  strides=(2, 2),
                                  padding="same")(deconv3)
    deconv3_up2 = Conv2DTranspose(start_neurons * 8, (3, 3),
                                  strides=(2, 2),
                                  padding="same")(deconv3_up1)
    conv3 = lr[-2].output
    uconv3 = concatenate([deconv3, deconv4_up1, conv3])
    uconv3 = Dropout(dropout_rate)(uconv3)

    uconv3 = Conv2D(start_neurons * 8, (3, 3), activation=None,
                    padding="same")(uconv3)
    uconv3 = residual_block(uconv3, start_neurons * 8)
    #     uconv3 = residual_block(uconv3,start_neurons * 8)
    uconv3 = LeakyReLU(alpha=0.1)(uconv3)

    deconv2 = Conv2DTranspose(start_neurons * 4, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv3)
    deconv2_up1 = Conv2DTranspose(start_neurons * 4, (3, 3),
                                  strides=(2, 2),
                                  padding="same")(deconv2)
    conv2 = lr[-4].output
    uconv2 = concatenate([deconv2, deconv3_up1, deconv4_up2, conv2])

    uconv2 = Dropout(0.1)(uconv2)
    uconv2 = Conv2D(start_neurons * 4, (3, 3), activation=None,
                    padding="same")(uconv2)
    uconv2 = residual_block(uconv2, start_neurons * 4)
    #     uconv2 = residual_block(uconv2,start_neurons * 4)
    uconv2 = LeakyReLU(alpha=0.1)(uconv2)

    deconv1 = Conv2DTranspose(start_neurons * 2, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv2)
    conv1 = lr[-5].output
    uconv1 = concatenate(
        [deconv1, deconv2_up1, deconv3_up2, deconv4_up3, conv1])

    uconv1 = Dropout(0.1)(uconv1)
    uconv1 = Conv2D(start_neurons * 2, (3, 3), activation=None,
                    padding="same")(uconv1)
    uconv1 = residual_block(uconv1, start_neurons * 2)
    #     uconv1 = residual_block(uconv1,start_neurons * 2)
    uconv1 = LeakyReLU(alpha=0.1)(uconv1)

    uconv0 = Conv2DTranspose(start_neurons * 1, (3, 3),
                             strides=(2, 2),
                             padding="same")(uconv1)
    uconv0 = Dropout(0.1)(uconv0)
    uconv0 = Conv2D(start_neurons * 1, (3, 3), activation=None,
                    padding="same")(uconv0)
    uconv0 = residual_block(uconv0, start_neurons * 1)
    #     uconv0 = residual_block(uconv0,start_neurons * 1)
    uconv0 = LeakyReLU(alpha=0.1)(uconv0)

    uconv0 = Dropout(dropout_rate / 2)(uconv0)
    uconv0 = Conv2DTranspose(start_neurons * 1, (3, 3),
                             strides=(2, 2),
                             padding="same")(uconv0)
    output_layer = Conv2D(4, (1, 1), padding="same",
                          activation="sigmoid")(uconv0)

    model = Model(input, output_layer)
    model.name = 'u-xception'

    return model
Exemple #14
0
def build_model(input_shape, args):
    D = args.d
    F = args.f
    V = args.v

    input_tensor = Input(shape=input_shape)

    if args.tf == "in":
        base_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = in_pi
    elif args.tf == "inr":
        base_model = InceptionResNetV2(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = inr_pi
    elif args.tf == "vg":
        base_model = VGG16(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = vg_pi
    elif args.tf == "xc":
        base_model = Xception(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = xc_pi
    elif args.tf == "re":
        base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = re_pi
    elif args.tf == "de":
        base_model = DenseNet121(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = de_pi
    elif args.tf == "mo":
        base_model = MobileNet(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = mo_pi
    elif args.tf.find("ef") > -1:
        if args.tf == "ef0":
            base_model = efn.EfficientNetB0(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef1":
            base_model = efn.EfficientNetB1(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef2":
            base_model = efn.EfficientNetB2(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef3":
            base_model = efn.EfficientNetB3(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef4":
            base_model = efn.EfficientNetB4(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef5":
            base_model = efn.EfficientNetB5(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef6":
            base_model = efn.EfficientNetB6(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef7":
            base_model = efn.EfficientNetB7(weights='imagenet', include_top=False, input_tensor=input_tensor)
    else:
        print("unknown network type:", args.tf)
        exit()

    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(F, activation='relu')(x)
    if D > 0:
        x = Dropout(D)(x)
 
    pred = Dense(nb_classes, activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=pred)

    layer_num = len(base_model.layers)
    for layer in base_model.layers[:int(layer_num * V)]:
        layer.trainable = False

    return model #, pi
Exemple #15
0
#import multiprocessing

weights = os.getenv("WEIGHTS", "noisy-student")

b_name = os.getenv("B", "2")

model = None

if b_name == "0":
    model = efn.EfficientNetB0(weights=weights)

if b_name == "1":
    model = efn.EfficientNetB1(weights=weights)

if b_name == "2":
    model = efn.EfficientNetB2(weights=weights)

if b_name == "3":
    model = efn.EfficientNetB3(weights=weights)

if b_name == "4":
    model = efn.EfficientNetB4(weights=weights)

if b_name == "5":
    model = efn.EfficientNetB5(weights=weights)

if b_name == "6":
    model = efn.EfficientNetB6(weights=weights)

if b_name == "7":
    model = efn.EfficientNetB7(weights=weights)
Exemple #16
0
def get_backbone(name):
    """ Chooses a backbone/ base network.

        Args:
            name: the name of the base network.

        Returns:
            backbone: the Keras model of the chosen network.
    """
    if name == 'EfficientNetB0':
        backbone = efn.EfficientNetB0(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB1':
        backbone = efn.EfficientNetB1(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB2':
        backbone = efn.EfficientNetB2(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB3':
        backbone = efn.EfficientNetB3(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB4':
        backbone = efn.EfficientNetB4(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB5':
        backbone = efn.EfficientNetB5(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB6':
        backbone = efn.EfficientNetB6(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB7':
        backbone = efn.EfficientNetB7(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'VGG16':
        backbone = VGG16(weights=c.WEIGHTS,
                         include_top=c.INCLUDE_TOP,
                         input_shape=c.INPUT_SHAPE,
                         pooling=c.POOLING)
    elif name == 'ResNet50':
        backbone = ResNet50(include_top=c.INCLUDE_TOP,
                            weights=c.WEIGHTS,
                            input_shape=c.INPUT_SHAPE,
                            pooling=c.POOLING)
    elif name == 'InceptionV3':
        backbone = InceptionV3(include_top=c.INCLUDE_TOP,
                               weights=c.WEIGHTS,
                               input_shape=c.INPUT_SHAPE,
                               pooling=c.POOLING)
    elif name == 'DenseNet201':
        backbone = DenseNet201(weights=c.WEIGHTS,
                               include_top=c.INCLUDE_TOP,
                               input_shape=c.INPUT_SHAPE,
                               pooling=c.POOLING)
    else:
        backbone = None
    try:
        backbone.trainable = True
        return backbone
    except Exception as e:
        print(str(e))
Exemple #17
0
def get_b2_backbone():
    backbone = efn.EfficientNetB2(input_shape=(128, 128, 3), include_top=False,  weights='imagenet')
    backbone_output = GlobalAveragePooling2D()(backbone.output)
    return backbone, backbone_output