Ejemplo n.º 1
0
def create_model(model_name):
  if model_name == 'efn_b4':
    model = efn.EfficientNetB4(weights=None, classes=4)
  elif model_name == 'efn_b4_p':
    model = tf.keras.models.Sequential()
    model.add(efn.EfficientNetB4(input_shape=(380, 380, 3), weights='imagenet', include_top=False))
  elif model_name == 'efn_b5_p':
    model = tf.keras.models.Sequential()
    model.add(efn.EfficientNetB5(input_shape=(456, 456, 3), weights='imagenet', include_top=False))
  elif model_name == 'resnet18':
    model = ResNet([2, 2, 2, 2], input_shape=(224, 224, 3))
  elif model_name == 'densenet121_p':
    model = tf.keras.models.Sequential()
    model.add(DenseNet121(input_shape=(224, 224, 3), weights='imagenet', include_top=False))
  elif model_name == 'densenet201_p':
    model = tf.keras.models.Sequential()
    model.add(DenseNet201(input_shape=(224, 224, 3), weights='imagenet', include_top=False))

  if model_name.split('_')[-1] == 'p':
    model.add(GlobalAveragePooling2D())
    model.add(Dense(128, activation='relu'))
    model.add(Dense(64, activation='relu'))
    model.add(Dense(4, activation='softmax'))
  model.summary()
  return model
Ejemplo n.º 2
0
def create_model(model_name, input_shape=(IMG_SIZE, IMG_SIZE, 3)):
    if model_name == 'efn_b4':
        model = efn.EfficientNetB4(weights=None, classes=4)
    elif model_name == 'efn_b4_p':
        model = tf.keras.models.Sequential()
        model.add(
            efn.EfficientNetB4(input_shape=input_shape,
                               weights='imagenet',
                               include_top=False))
    elif model_name == 'efn_b5_p':
        model = tf.keras.models.Sequential()
        model.add(
            efn.EfficientNetB5(input_shape=input_shape,
                               weights='imagenet',
                               include_top=False))
    elif model_name == 'efn_b6_p':
        model = tf.keras.models.Sequential()
        model.add(
            efn.EfficientNetB6(input_shape=input_shape,
                               weights='imagenet',
                               include_top=False))
    elif model_name == 'efn_b7_p':
        model = tf.keras.models.Sequential()
        model.add(
            efn.EfficientNetB7(input_shape=input_shape,
                               weights='imagenet',
                               include_top=False))
    elif model_name == 'densenet121_p':
        model = tf.keras.models.Sequential()
        model.add(
            DenseNet121(input_shape=input_shape,
                        weights='imagenet',
                        include_top=False))
    elif model_name == 'densenet201_p':
        model = tf.keras.models.Sequential()
        model.add(
            DenseNet201(input_shape=input_shape,
                        weights='imagenet',
                        include_top=False))
    elif model_name == 'inceptionResV2_p':
        model = tf.keras.models.Sequential()
        model.add(
            InceptionResNetV2(input_shape=input_shape,
                              weights='imagenet',
                              include_top=False))
    if model_name.split('_')[-1] == 'p':
        model.add(GlobalAveragePooling2D())
        #model.add(Dense(128, activation='relu'))
        #model.add(Dense(64, activation='relu'))
        model.add(Dense(4, activation='softmax'))
    model.summary()
    return model
Ejemplo n.º 3
0
def build_backbone_net_graph(input_tensor, architecture, weights=None):
    """
    Build basic feature extraction networks.
    :param input_tensor: Input of the basic networks, should be a tensor or tf.keras.layers.Input
    :param architecture: The architecture name of the basic network.
    :param weights: Whether download and initialize weights from the pre-trained weights,
                    could be either 'imagenet', (pre-training on ImageNet)
                                    'noisy-student',
                                    'None' (random initialization),
                                    or the path to the weights file to be loaded。
    :return: Efficient Model and corresponding endpoints.
    """
    assert architecture in ['efficientnet-b0', 'efficientnet-b1',
                            'efficientnet-b2', 'efficientnet-b3',
                            'efficientnet-b4', 'efficientnet-b5',
                            'efficientnet-b7', 'efficientnet-b7',
                            'efficientnet-l2']

    if architecture == 'efficientnet-b0':
        return efn.EfficientNetB0(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b1':
        return efn.EfficientNetB1(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b2':
        return efn.EfficientNetB2(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b3':
        return efn.EfficientNetB3(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b4':
        return efn.EfficientNetB4(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b5':
        return efn.EfficientNetB5(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b6':
        return efn.EfficientNetB6(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b7':
        return efn.EfficientNetB7(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-l2':
        return efn.EfficientNetL2(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    else:
        raise ValueError("Argument architecture should in "
                         "[efficientnet-b0, efficientnet-b1, "
                         "efficientnet-b2, efficientnet-b3, efficientnet-b4, efficientnet-b5, "
                         "efficientnet-b7, efficientnet-b7, efficientnet-l2] "
                         "but get %s" % architecture)
Ejemplo n.º 4
0
def build_COVIDNet(num_classes=3, flatten=True, checkpoint='',args=None):
    
    if args.model == 'resnet50v2':
        base_model = ResNet50V2(include_top=False, weights='imagenet', input_shape=(args.img_size, args.img_size, 3))
        x = base_model.output
    
    if args.model =='mobilenetv2':
        base_model = MobileNetV2(include_top=False, weights='imagenet', input_shape=(args.img_size, args.img_size, 3))
        x = base_model.output
    
    if args.model == 'custom':
        base_model = covidnet(input_tensor=None, input_shape=(args.img_size, args.img_size, 3), classes=3)
        x = base_model.output
        
    if args.model == 'EfficientNet':
        import efficientnet.tfkeras as efn
        base_model = efn.EfficientNetB4(weights=None, include_top=True, input_shape=(args.img_size, args.img_size, 3), classes=3)
        x = base_model.output
    
    
    if flatten:
        x = Flatten()(x)
    else:
        # x = GlobalAveragePooling2D()(x)
        x = GlobalMaxPool2D()(x)
    
    if args.datapipeline == 'covidx':
        x = Dense(1024, activation='relu',kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
    x = Dense(256, activation='relu',kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
    # x = Dropout(0.2)(x)
    predictions = Dense(num_classes, activation='softmax',name=f'FC_{num_classes}')(x)
    model = Model(inputs=base_model.input, outputs=predictions)
    if len(checkpoint):
        model.load_weights(checkpoint)
    return model
Ejemplo n.º 5
0
def build_model():
    inp = tf.keras.Input(shape=(DIM, DIM, 1))
    inp2 = tf.keras.layers.Concatenate()([inp, inp, inp])
    # base_model = efn.EfficientNetB4(weights='imagenet',include_top=False, input_shape=(DIM,DIM,3))
    base_model = efn.EfficientNetB4(weights=None,
                                    include_top=False,
                                    input_shape=(DIM, DIM, 3))
    base_model.load_weights('../input/tf-efficientnet-b4/efnB4.h5')

    x = base_model(inp2)
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    x1 = tf.keras.layers.Dense(168,
                               activation='softmax',
                               name='x1',
                               dtype='float32')(x)
    x2 = tf.keras.layers.Dense(11,
                               activation='softmax',
                               name='x2',
                               dtype='float32')(x)
    x3 = tf.keras.layers.Dense(7,
                               activation='softmax',
                               name='x3',
                               dtype='float32')(x)

    model = tf.keras.Model(inputs=inp, outputs=[x1, x2, x3])
    opt = tf.keras.optimizers.Adam(lr=0.00001)
    wgt = {'x1': 1.5, 'x2': 1.0, 'x3': 1.0}
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['categorical_accuracy'],
                  loss_weights=wgt)

    return model
Ejemplo n.º 6
0
def create_cnn_model():

    model = keras.models.Sequential()
    pre_trained_model = efn.EfficientNetB4(input_shape=(*IMG_SIZE, 3),
                                           include_top=False,
                                           weights='noisy-student')

    # freeze the batch normalisation layers
    for layer in reversed(pre_trained_model.layers):
        if isinstance(layer, tf.keras.layers.BatchNormalization):
            layer.trainable = False
        else:
            layer.trainable = True

    model.add(pre_trained_model)
    model.add(layers.Dropout(0.4))
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Dropout(0.4))
    model.add(layers.Dense(5, activation='softmax'))

    # add metrics
    metrics = [
        tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
    ]

    optimizer = tf.keras.optimizers.Adam()
    loss = tf.keras.losses.CategoricalCrossentropy()

    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
    print(model.summary())
    return model
Ejemplo n.º 7
0
def build_model():
    inp = tf.keras.Input(shape=(DIM, DIM, 1))
    inp2 = tf.keras.layers.Concatenate()([inp, inp, inp])
    # 3 channels
    base_model = efn.EfficientNetB4(weights=None,
                                    include_top=False,
                                    input_shape=(DIM, DIM, 3))
    base_model.load_weights(os.path.join(model_dir, weights_file))

    x = base_model(inp2)
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    x1 = tf.keras.layers.Dense(168,
                               activation='softmax',
                               name='x1',
                               dtype='float32')(x)
    # Explicit due to mixed precision setup at top
    x2 = tf.keras.layers.Dense(11,
                               activation='softmax',
                               name='x2',
                               dtype='float32')(x)
    x3 = tf.keras.layers.Dense(7,
                               activation='softmax',
                               name='x3',
                               dtype='float32')(x)

    model = tf.keras.Model(inputs=inp, outputs=[x1, x2, x3])
    opt = tf.keras.optimizers.Adam(lr=0.00001)
    wgt = {'x1': 1.5, 'x2': 1.0, 'x3': 1.0}
    # Due to x2 factor in recall eval for grapheme root
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['categorical_accuracy'],
                  loss_weights=wgt)

    return model
Ejemplo n.º 8
0
def effnet_retinanet(num_classes, backbone='EfficientNetB0', inputs=None, modifier=None, **kwargs):
    """ Constructs a retinanet model using a resnet backbone.

    Args
        num_classes: Number of classes to predict.
        backbone: Which backbone to use (one of ('resnet50', 'resnet101', 'resnet152')).
        inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
        modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).

    Returns
        RetinaNet model with a ResNet backbone.
    """
    # choose default input
    if inputs is None:
        if keras.backend.image_data_format() == 'channels_first':
            inputs = keras.layers.Input(shape=(3, None, None))
        else:
            # inputs = keras.layers.Input(shape=(224, 224, 3))
            inputs = keras.layers.Input(shape=(None, None, 3))

    # get last conv layer from the end of each block [28x28, 14x14, 7x7]
    if backbone == 'EfficientNetB0':
        model = efn.EfficientNetB0(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB1':
        model = efn.EfficientNetB1(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB2':
        model = efn.EfficientNetB2(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB3':
        model = efn.EfficientNetB3(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB4':
        model = efn.EfficientNetB4(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB5':
        model = efn.EfficientNetB5(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB6':
        model = efn.EfficientNetB6(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB7':
        model = efn.EfficientNetB7(input_tensor=inputs, include_top=False, weights=None)
    else:
        raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone))

    layer_outputs = ['block4a_expand_activation', 'block6a_expand_activation', 'top_activation']

    layer_outputs = [
        model.get_layer(name=layer_outputs[0]).output,  # 28x28
        model.get_layer(name=layer_outputs[1]).output,  # 14x14
        model.get_layer(name=layer_outputs[2]).output,  # 7x7
    ]
    # create the densenet backbone
    model = keras.Model(inputs=inputs, outputs=layer_outputs, name=model.name)

    # invoke modifier if given
    if modifier:
        model = modifier(model)

    # create the full model
    return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=model.outputs, **kwargs)
Ejemplo n.º 9
0
def get_efficientnet_model(model_version):
    if model_version == "B0": return efn.EfficientNetB0(weights='imagenet')
    elif model_version == "B1": return efn.EfficientNetB1(weights='imagenet')
    elif model_version == "B2": return efn.EfficientNetB2(weights='imagenet')
    elif model_version == "B3": return efn.EfficientNetB3(weights='imagenet')
    elif model_version == "B4": return efn.EfficientNetB4(weights='imagenet')
    elif model_version == "B5": return efn.EfficientNetB5(weights='imagenet')
    elif model_version == "B6": return efn.EfficientNetB6(weights='imagenet')
    elif model_version == "B7": return efn.EfficientNetB7(weights='imagenet')
    else: return efn.EfficientNetB0(weights='imagenet')
Ejemplo n.º 10
0
 def get_efficientnet(self):
     models_dict ={
         'b0': efn.EfficientNetB0(input_shape=self.shape,weights=None,include_top=False),
         'b1': efn.EfficientNetB1(input_shape=self.shape,weights=None,include_top=False),
         'b2': efn.EfficientNetB2(input_shape=self.shape,weights=None,include_top=False),
         'b3': efn.EfficientNetB3(input_shape=self.shape,weights=None,include_top=False),
         'b4': efn.EfficientNetB4(input_shape=self.shape,weights=None,include_top=False),
         'b5': efn.EfficientNetB5(input_shape=self.shape,weights=None,include_top=False),
         'b6': efn.EfficientNetB6(input_shape=self.shape,weights=None,include_top=False),
         'b7': efn.EfficientNetB7(input_shape=self.shape,weights=None,include_top=False)
     }
     return models_dict[self.model_class]
Ejemplo n.º 11
0
    def __init__(self, hparams):
        super(InputEmbedding, self).__init__()
        self.hparams = hparams
        if hparams.base_model_name == 'InceptionV3':
            base_model = tf.keras.applications.InceptionV3(include_top=False,
                                                           weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'InceptionResNetV2':
            base_model = tf.keras.applications.InceptionResNetV2(
                include_top=False, weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB0':
            base_model = efn.EfficientNetB0(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB1':
            base_model = efn.EfficientNetB1(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB2':
            base_model = efn.EfficientNetB2(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB3':
            base_model = efn.EfficientNetB3(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB4':
            base_model = efn.EfficientNetB4(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB5':
            base_model = efn.EfficientNetB5(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB6':
            base_model = efn.EfficientNetB6(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB7':
            base_model = efn.EfficientNetB7(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]

        assert hparams.end_point in base_model_layers, "no {} layer in {}".format(
            hparams.end_point, hparams.base_model_name)
        conv_tower_output = base_model.get_layer(hparams.end_point).output
        self.conv_model = tf.keras.models.Model(inputs=base_model.input,
                                                outputs=conv_tower_output)
        self.conv_out_shape = self.conv_model.predict(
            np.array([np.zeros(hparams.image_shape)])).shape
        self.encode_cordinate = EncodeCordinate(
            input_shape=self.conv_out_shape)
Ejemplo n.º 12
0
def create_b4(include_top=False,
              input_shape=None,
              input_tensor=None,
              weights="noisy-student"):
    """ネットワークの作成。"""
    import efficientnet.tfkeras as efn

    return efn.EfficientNetB4(
        include_top=include_top,
        input_shape=input_shape,
        input_tensor=input_tensor,
        weights=weights,
    )
Ejemplo n.º 13
0
def create_combined_model():
    pre_trained_efn = efn.EfficientNetB4(input_shape=(*IMG_SIZE, 3),
                                         include_top=False,
                                         weights='noisy-student')

    # freeze the batch normalisation layers
    for layer in reversed(pre_trained_efn.layers):
        if isinstance(layer, tf.keras.layers.BatchNormalization):
            layer.trainable = False
        else:
            layer.trainable = True

    x = pre_trained_efn.output
    x = layers.Dropout(0.25)(x)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dropout(0.25)(x)
    prediction1 = layers.Dense(5, activation='softmax')(x)

    model_efn = Model(inputs=pre_trained_efn.input, outputs=prediction1)

    pre_trained_resnet = tf.keras.applications.ResNet50V2(
        input_shape=(*IMG_SIZE, 3), include_top=False, weights='imagenet')

    # freeze the batch normalisation layers
    for layer in reversed(pre_trained_resnet.layers):
        if isinstance(layer, tf.keras.layers.BatchNormalization):
            layer.trainable = False
        else:
            layer.trainable = True

    x = pre_trained_resnet.output
    x = layers.Dropout(0.25)(x)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dropout(0.25)(x)
    prediction2 = layers.Dense(5, activation='softmax')(x)

    model_res = Model(inputs=pre_trained_resnet.input, outputs=prediction2)

    merged = layers.Concatenate([model_efn.output, model_res.output])

    merged = layers.Flatten()(merged)
    merged = layers.Dropout(0.5)(merged)
    merged = layers.Dense(1024, activation='relu')(merged)
    merged = layers.Dense(5, activation='softmax')(merged)

    optimizer = tf.keras.optimizers.Adam()
    loss = tf.keras.losses.CategoricalCrossentropy()
    model_fusion = Model([model_efn.input, model_res.input], merged)
    model_fusion.compile(optimizer=optimizer, loss=loss, metrics='accuracy')
    print(model_fusion.summary())
    return model_fusion
Ejemplo n.º 14
0
 def __init__(self, outputs, trainable_blocks=[]):
     super(DenseEfficientNet, self).__init__()
     self.eff_net = efn.EfficientNetB4(
         include_top=False, 
         input_shape=(300, 300, 3), 
         weights='imagenet',
         pooling='avg'
     )
     for l in self.eff_net.layers:
         l.trainable = any([(b in l.name) for b in trainable_blocks]) or isinstance(l, tf.keras.layers.BatchNormalization)
     self.b_norm = tf.keras.layers.BatchNormalization()
     self.dense_1 = tf.keras.layers.Dense(2048, activation='tanh')
     self.dense_2 = tf.keras.layers.Dense(outputs)
     self.softmax = tf.keras.layers.Softmax()
Ejemplo n.º 15
0
    def getEffTFModel(self, n=0):
        modelInput = tf.keras.Input(batch_input_shape=(None, 5, self.config['net_size'], self.config['net_size'], 3))
        modelInput0, modelInput1, modelInput2, modelInput3, modelInput4 = tf.split(modelInput, [1, 1, 1, 1, 1], 1)
        x0 = tf.squeeze(tf.keras.layers.Lambda(lambda x0: x0)(modelInput0))
        x1 = tf.squeeze(tf.keras.layers.Lambda(lambda x1: x1)(modelInput1))
        x2 = tf.squeeze(tf.keras.layers.Lambda(lambda x2: x2)(modelInput2))
        x3 = tf.squeeze(tf.keras.layers.Lambda(lambda x3: x3)(modelInput3))
        x4 = tf.squeeze(tf.keras.layers.Lambda(lambda x4: x4)(modelInput4))
        net = ''
        if n % 10 == 0:
            net = efn.EfficientNetB0(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')
        elif n % 10 == 1:
            net = efn.EfficientNetB1(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')
        elif n % 10 == 2:
            net = efn.EfficientNetB2(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')
        elif n % 10 == 3:
            net = efn.EfficientNetB3(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')
        elif n % 10 == 4:
            net = efn.EfficientNetB4(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')

        activation = tf.keras.layers.LeakyReLU()

        self.config['rnn_size'] = 256
        ret0 = net(x0)
        ret0 = Dense(self.config['rnn_size'], activation=activation)(ret0)
        ret0 = tf.expand_dims(ret0, axis=1)
        ret0 = self.transformer(ret0)
        ret1 = net(x1)
        ret1 = Dense(self.config['rnn_size'], activation=activation)(ret1)
        ret1 = tf.expand_dims(ret1, axis=1)
        ret1 = self.transformer(ret1)
        ret2 = net(x2)
        ret2 = Dense(self.config['rnn_size'], activation=activation)(ret2)
        ret2 = tf.expand_dims(ret2, axis=1)
        ret2 = self.transformer(ret2)
        ret3 = net(x3)
        ret3 = Dense(self.config['rnn_size'], activation=activation)(ret3)
        ret3 = tf.expand_dims(ret3, axis=1)
        ret3 = self.transformer(ret3)
        ret4 = net(x4)
        ret4 = Dense(self.config['rnn_size'], activation=activation)(ret4)
        ret4 = tf.expand_dims(ret4, axis=1)
        ret4 = self.transformer(ret4)
        ret = tf.concat([ret0, ret1, ret2, ret3, ret4], axis=1)
        print(ret)
        x = tf.keras.layers.Dense(self.config['rnn_size'], activation=activation)(ret)
        model = tf.keras.Model(modelInput, x)
        return model
Ejemplo n.º 16
0
def predict_caption_for_image(image):
    global encoder
    global decoder
    global image_features_extract_model
    BUFFER_SIZE = 1000
    embedding_dim = 256
    units = 512
    vocab_size = 25243
    max_length = 47
    # Shape of the vector extracted from InceptionV3 is (64, 2048)
    # These two variables represent that vector shape
    features_shape = 1792
    attention_features_shape = 121
    # Feel free to change these parameters according to your system's configuration
    with open('./saved_tokenizer/tokenizer.json') as f:
        data = json.load(f)
    tokenizer = tf.keras.preprocessing.text.tokenizer_from_json(data)

    image_model = eff.EfficientNetB4(weights='noisy-student',
                                     include_top=False,
                                     input_shape=(331, 331, 3))
    new_input = image_model.input
    hidden_layer = image_model.layers[-1].output

    image_features_extract_model = tf.keras.Model(new_input, hidden_layer)

    encoder = CNN_Encoder(embedding_dim)
    decoder = RNN_Decoder(embedding_dim, units, vocab_size)

    optimizer = tf.keras.optimizers.Adam()

    checkpoint_path = "./checkpoints_small/train"
    ckpt = tf.train.Checkpoint(encoder=encoder,
                               decoder=decoder,
                               optimizer=optimizer)
    ckpt_manager = tf.train.CheckpointManager(ckpt,
                                              checkpoint_path,
                                              max_to_keep=5)

    if ckpt_manager.latest_checkpoint:
        # restoring the latest checkpoint in checkpoint_path
        ckpt.restore(ckpt_manager.latest_checkpoint)

    results, attention_plot = predict_caption(image, max_length, encoder,
                                              decoder,
                                              image_features_extract_model,
                                              tokenizer)
    return ' '.join(results)
def create_model(input_shape, c, wbifpn=False):
    '''model'''
    c1, c2, c3, c4 = c
    effnet = efn.EfficientNetB4(input_shape=input_shape,
                                weights=None,
                                include_top=False)
    p4 = effnet.get_layer('block2a_activation').output
    p5 = effnet.get_layer('block3a_activation').output
    p6 = effnet.get_layer('block4a_activation').output
    p7 = effnet.get_layer('block7a_activation').output
    features = (p7, p6, p5, p4)
    features = build_fpn(features, c1, wbifpn)
    features = build_fpn(features, c2, wbifpn)
    features = build_fpn(features, c3, wbifpn)
    features = build_fpn(features, c4, wbifpn)
    features = list(features)
    for i in range(1, 4):
        feature_curr = features[i]
        feature_past = features[i - 1]
        feature_past_up = UpSampling2D((2, 2))(feature_past)
        feature_past_up = Conv2D(
            c4, (3, 3),
            padding='same',
            activation='relu',
            kernel_initializer='glorot_uniform')(feature_past_up)
        if wbifpn:
            feature_final = Fuse(name='final{}'.format(str(i)))(
                [feature_curr, feature_past_up])
        else:
            feature_final = Add(name='final{}'.format(str(i)))(
                [feature_curr, feature_past_up])
        features[i] = feature_final
    if stride_obj == 2:
        features[-1] = UpSampling2D((2, 2))(features[-1])
        features[-1] = Conv2D(128, (3, 3),
                              activation='relu',
                              padding='same',
                              kernel_initializer='glorot_uniform')(
                                  features[-1])
    out = Conv2D(5, (3, 3),
                 activation='sigmoid',
                 kernel_initializer='glorot_uniform',
                 padding='same')(features[-1])
    prediction_model = tf.keras.models.Model(inputs=[effnet.input],
                                             outputs=out)
    prediction_model.load_weights(
        '/home/b170007ec/Programs/Manoj/DETECTOR/obj_model3.h5')
    return prediction_model
Ejemplo n.º 18
0
def generate_base_model():
    conv_base = enet.EfficientNetB4(
        include_top=False, input_shape=(380, 380, 3), pooling="avg", weights="noisy-student",
    )
    conv_base.trainable = False

    x = conv_base.output
    x = Dropout(0.8)(x)
    preds = Dense(4, activation="sigmoid")(x)
    model = Model(inputs=conv_base.input, outputs=preds)

    model.compile(
        optimizer=keras.optimizers.Nadam(),
        loss="binary_crossentropy",
        metrics=[soft_acc_multi_output],
    )

    return model
Ejemplo n.º 19
0
 def model_chooser(self, classes=2, weights=None):
     print("Model selection started.")
     name = self.model_name
     if(name=='C0'):
         self.model = efn.EfficientNetB0(include_top=True, weights=weights, classes=classes)
     elif(name=='C1'):
         self.model = efn.EfficientNetB1(include_top=True, weights=weights, classes=classes)
     elif(name=='C2'):
         self.model = efn.EfficientNetB2(include_top=True, weights=weights, classes=classes)
     elif(name=='C3'):
         self.model = efn.EfficientNetB3(include_top=True, weights=weights, classes=classes)
     elif(name=='C4'):
         self.model = efn.EfficientNetB4(include_top=True, weights=weights, classes=classes)
     elif(name=='C5'):
         self.model = efn.EfficientNetB5(include_top=True, weights=weights, classes=classes)
     
     if(classes==2):
         self.model.compile(optimizer="adam", loss="binary_crossentropy", metrics = ['acc'])
     elif(classes>2):
         self.model.compile(optimizer="adam", loss="categorical_crossentropy", metrics = ['acc'])      
Ejemplo n.º 20
0
def create_model(input_shape ,wbifpn=False):
    '''model'''
    effnet = efn.EfficientNetB4(input_shape=input_shape,weights=None,include_top = False)
    x = up_image(effnet.output,c = 256)
    x = up_image(x,c = 128)
    x = up_image(x,c = 64)
    x = Conv2D(32,(2,2),kernel_initializer = 'glorot_uniform',padding='same',activation='relu')(x)
    x = Conv2D(3,(1,1),kernel_initializer='glorot_uniform',padding='same',activation='sigmoid')(x)
    #selfsupervision weights 
    arbi = Model(effnet.input,x)
    arbi.load_weights('/home/b170007ec/Programs/Manoj/DAE/model2_dae.h5')
    p4 = effnet.get_layer('block2a_activation').output
    p5 = effnet.get_layer('block3a_activation').output
    p6 = effnet.get_layer('block4a_activation').output
    p7 = effnet.get_layer('block7a_activation').output
    features = (p7,p6,p5,p4)
    features = build_fpn(features,8,wbifpn)
    features = build_fpn(features,16,wbifpn)
    features = build_fpn(features,32,wbifpn)
    features = build_fpn(features,64,wbifpn)
    features = list(features)
    for i in range(1,4):
        feature_curr = features[i]
        feature_past = features[i-1]
        feature_past_up = UpSampling2D((2,2))(feature_past)
        feature_past_up = Conv2D(64,(3,3),padding='same',activation='relu',kernel_initializer='glorot_uniform')(feature_past_up)
        if wbifpn:
            feature_final = Fuse(name='final{}'.format(str(i)))([feature_curr,feature_past_up])
        else:
            feature_final = Add(name='final{}'.format(str(i)))([feature_curr,feature_past_up])
        features[i] = feature_final
    if stride == 2:
        features[-1] = UpSampling2D((2,2))(features[-1])
        features[-1] = Conv2D(128,(3,3),activation='relu',padding='same',kernel_initializer='glorot_uniform')(features[-1])
    out = Conv2D(5,(3,3),activation='sigmoid',kernel_initializer='glorot_uniform',padding='same')(features[-1])
    zeros = tf.expand_dims(tf.zeros_like(out[...,0]),axis=-1)
    out_concat = tf.concat([zeros,out],axis = -1)
    prediction_model=tf.keras.models.Model(inputs=[effnet.input],outputs=out)
    model = Model(inputs = [effnet.input],outputs = out_concat)
    return model,prediction_model
Ejemplo n.º 21
0
def create_base_model(base_model_name, pretrained=True, IMAGE_SIZE=[300, 300]):
    if pretrained is False:
        weights = None
    else:
        weights = "imagenet"
    if base_model_name == 'B0':
        base = efn.EfficientNetB0(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B1':
        base = efn.EfficientNetB1(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B2':
        base = efn.EfficientNetB2(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B3':
        base = efn.EfficientNetB3(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B4':
        base = efn.EfficientNetB4(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B5':
        base = efn.EfficientNetB5(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B6':
        base = efn.EfficientNetB6(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B7':
        base = efn.EfficientNetB7(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    base = remove_dropout(base)
    base.trainable = True
    return base
def build_model(ALPHA,GAMMA):
    encoder = efn.EfficientNetB4(include_top=False,input_shape = (INPUT_SIZE,INPUT_SIZE,3))    
    #x_ = up_image(encoder.output,c = 256)
    #x_ = up_image(x_,c = 128)
    #x_ = up_image(x_,c = 64)
    #x_ = Conv2D(32,(2,2),kernel_initializer = 'glorot_uniform',padding='same',activation='relu')(x_)
    #x_ = Conv2D(3,(1,1),kernel_initializer='glorot_uniform',padding='same',activation='sigmoid')(x_)
    #arbi = Model(encoder.input,x_)
    #arbi.load_weights('/home/b170007ec/Programs/Manoj/DAE/model2_dae.h5')
    x = encoder.output
    x = Dropout(0.5)(x)
    x = Conv2D(512,(1,1),padding = 'same',kernel_initializer = 'glorot_uniform',activation='relu')(x)
    x = MaxPool2D((2,2))(x)
    x = Conv2D(256,(3,3),padding = 'same',kernel_initializer = 'glorot_uniform',activation='relu')(x)
    x = MaxPool2D((2,2))(x)
    x = Conv2D(128,(1,1),padding = 'same',kernel_initializer = 'glorot_uniform',activation='relu')(x)
    x = MaxPool2D((2,2))(x)
    x = Flatten()(x)
    x = Dense(3,activation='softmax')(x)
    model = Model(encoder.input,x)
    model.compile(optimizer=Adam(lr=LR),loss=focal_loss(ALPHA,GAMMA),metrics = ['acc',metric])
    return model
Ejemplo n.º 23
0
def load_b4():
    cnn_net = efn.EfficientNetB4(weights='imagenet',include_top=False,input_shape=(380, 380, 3))
    model = build_model(cnn_net)
    model.load_weights('models/effnet_b4.h5')
    return model
Ejemplo n.º 24
0
    def load_model(self):
        if not os.path.isfile('efn_b4.h5'):
            # base_model = efn.EfficientNetB4(weights='imagenet', include_top=False, input_shape=(self.input_shape[0], self.input_shape[1], 3), classes=self.num_classes)
            base_model = efn.EfficientNetB4(weights=None,
                                            include_top=True,
                                            input_shape=(self.input_shape[0],
                                                         self.input_shape[1],
                                                         3),
                                            classes=self.num_classes)
            if self.distributed_training is True and hvd.rank == 0:
                base_model.save('efn_b4.h5')
        else:
            base_model = tf.keras.models.load_model('efn_b4.h5', compile=False)
        print(base_model.summary())
        if not self.use_noise:
            # x = base_model.output
            # x = tf.keras.layers.GlobalAveragePooling2D()(x)
            # x = tf.keras.layers.Dropout(0.3)(x)
            # predictions = tf.keras.layers.Dense(self.num_classes, activation='softmax')(x)
            # model = tf.keras.models.Model(inputs = base_model.input, outputs = predictions)
            # model = tf.keras.models.Model(inputs = base_model.input, outputs = base_model.outputs)
            model = tf.keras.models.Sequential()
            # model.add(tf.keras.layers.Lambda(lambda x: tf.repeat(x, 3, axis=-1), input_shape=self.input_shape))  # commented out since tf.repeat does not exist before 1.15
            model.add(
                tf.keras.layers.Lambda(
                    lambda x: tf.keras.backend.repeat_elements(x, 3, axis=-1),
                    input_shape=self.input_shape))
            model.add(base_model)
            # model.add(tf.keras.layers.GlobalAveragePooling2D())
            # model.add(tf.keras.layers.Dropout(0.3))
            # model.add(tf.keras.layers.Dense(self.num_classes, activation='softmax'))
        else:
            model = tf.keras.models.Sequential()
            # model.add(tf.keras.layers.Lambda(lambda x: tf.repeat(x, 3, axis=-1), input_shape=self.input_shape))  # commented out since tf.repeat does not exist before 1.15
            model.add(
                tf.keras.layers.Lambda(
                    lambda x: tf.keras.backend.repeat_elements(x, 3, axis=-1),
                    input_shape=self.input_shape))
            model.add(
                tf.keras.layers.GaussianNoise(0.5,
                                              input_shape=self.input_shape))
            model.add(base_model)
            # model.add(tf.keras.layers.GlobalAveragePooling2D(name="gap"))
            # model.add(tf.keras.layers.Dropout(0.3))
            # model.add(tf.keras.layers.Dense(self.num_classes, activation="softmax", name="fc_out"))

        if self.distributed_training is True:
            # opt = K.optimizers.SGD(0.001 * hvd.size())
            # opt = tf.keras.optimizers.Adam(hvd.size())
            opt = tf.keras.optimizers.Adadelta(1.0 * hvd.size())
            # Horovod: add Horovod Distributed Optimizer.
            opt = hvd.DistributedOptimizer(opt)
        else:
            opt = tf.keras.optimizers.Adam()

        if self.multi_gpu_training is True:
            # probe the number of GPUs
            from tensorflow.python.client import device_lib
            local_device_protos = device_lib.list_local_devices()
            gpu_list = [
                x.name for x in local_device_protos if x.device_type == 'GPU'
            ]
            self._n_gpus = len(gpu_list)
            print('Parallalizing the model on %d GPUs...' % self._n_gpus)
            parallel_model = tf.keras.utils.multi_gpu_model(model,
                                                            gpus=self._n_gpus)
            parallel_model.compile(
                loss=tf.keras.losses.sparse_categorical_crossentropy,
                optimizer=opt,
                metrics=['sparse_categorical_accuracy'])
            self._multi_gpu_model = parallel_model
            self.model = model
            print(parallel_model.summary())
        else:
            model.compile(loss=tf.keras.losses.sparse_categorical_crossentropy,
                          optimizer=opt,
                          metrics=['sparse_categorical_accuracy'])
            self.model = model
            if self.distributed_training is True:
                if hvd.rank() == 0:
                    print(model.summary())
            else:
                print(model.summary())
Ejemplo n.º 25
0
def load_backbone(backbone_type="resnet50",
                  backbone_outputs=('C3', 'C4', 'C5', 'P6', 'P7'),
                  num_features=256):
    global BACKBONE_LAYERS
    inputs = Input((None, None, 3), name='images')
    if backbone_type.lower() == 'resnet50':
        preprocess = BackBonePreProcess(rgb=False,
                                        mean_shift=True,
                                        normalize=0)(inputs)
        model = ResNet50(input_tensor=preprocess, include_top=False)
    elif backbone_type.lower() == 'resnet50v2':
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=2)(inputs)
        resnet50v2, _ = Classifiers.get('resnet50v2')
        model = resnet50v2(input_tensor=preprocess,
                           include_top=False,
                           weights='imagenet')
    elif backbone_type.lower() == "resnet101v2":
        preprocess = BackBonePreProcess(rgb=True,
                                        mean_shift=False,
                                        normalize=2)(inputs)
        model = ResNet101V2(input_tensor=preprocess,
                            include_top=False,
                            backend=tf.keras.backend,
                            layers=tf.keras.layers,
                            models=tf.keras.models,
                            utils=tf.keras.utils)
    elif backbone_type.lower() == 'resnext50':
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=2)(inputs)
        model = ResNeXt50(input_tensor=preprocess, include_top=False)
    elif backbone_type.lower() == "seresnet50":
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=3)(inputs)
        seresnet50, _ = Classifiers.get('seresnet50')
        model = seresnet50(input_tensor=preprocess,
                           original_input=inputs,
                           include_top=False,
                           weights='imagenet')
    elif backbone_type.lower() == "seresnet34":
        preprocess = BackBonePreProcess(rgb=True,
                                        mean_shift=False,
                                        normalize=0)(inputs)
        seresnet34, _ = Classifiers.get('seresnet34')
        model = seresnet34(input_tensor=preprocess,
                           original_input=inputs,
                           include_top=False,
                           weights='imagenet')
    elif backbone_type.lower() == "seresnext50":
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=3)(inputs)
        seresnext50, _ = Classifiers.get('seresnext50')
        model = seresnext50(input_tensor=preprocess,
                            original_input=inputs,
                            include_top=False,
                            weights='imagenet')
    elif backbone_type.lower() == "vgg16":
        preprocess = BackBonePreProcess(rgb=False,
                                        mean_shift=True,
                                        normalize=0)(inputs)
        model = VGG16(input_tensor=preprocess, include_top=False)
    elif backbone_type.lower() == "mobilenet":
        preprocess = BackBonePreProcess(rgb=False,
                                        mean_shift=False,
                                        normalize=2)(inputs)
        model = MobileNet(input_tensor=preprocess,
                          include_top=False,
                          alpha=1.0)
    elif backbone_type.lower() == 'efficientnetb2':
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=3)(inputs)
        model = efn.EfficientNetB2(input_tensor=preprocess,
                                   include_top=False,
                                   weights='imagenet')
    elif backbone_type.lower() == 'efficientnetb3':
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=3)(inputs)
        model = efn.EfficientNetB3(input_tensor=preprocess,
                                   include_top=False,
                                   weights='imagenet')
    elif backbone_type.lower() == 'efficientnetb4':
        preprocess = BackBonePreProcess(rgb=True, mean_shift=True,
                                        normalize=3)(inputs)
        model = efn.EfficientNetB4(input_tensor=preprocess,
                                   include_top=False,
                                   weights='imagenet')
    else:
        raise NotImplementedError(
            f"backbone_type은 {BACKBONE_LAYERS.keys()} 중에서 하나가 되어야 합니다.")
    model.trainable = False

    # Block Layer 가져오기
    features = []
    for key, layer_name in BACKBONE_LAYERS[backbone_type.lower()].items():
        if key in backbone_outputs:
            layer_tensor = model.get_layer(layer_name).output
            features.append(Identity(name=key)(layer_tensor))

    if backbone_type.lower() == "mobilenet":
        # Extra Layer for Feature Extracting
        Z6 = ZeroPadding2D(((0, 1), (0, 1)),
                           name=f'P6_zeropadding')(features[-1])
        P6 = Conv2D(num_features, (3, 3),
                    strides=(2, 2),
                    padding='valid',
                    activation='relu',
                    name=f'P6_conv')(Z6)
        if 'P6' in backbone_outputs:
            features.append(Identity(name='P6')(P6))
        G6 = GroupNormalization(name=f'P6_norm')(P6)
        Z7 = ZeroPadding2D(((0, 1), (0, 1)), name=f'P7_zeropadding')(G6)
        P7 = Conv2D(num_features, (3, 3),
                    strides=(2, 2),
                    padding='valid',
                    activation='relu',
                    name=f'P7_conv')(Z7)
        if 'P7' in backbone_outputs:
            features.append(Identity(name=f'P7')(P7))
    else:
        P6 = Conv2D(num_features, (3, 3),
                    strides=(2, 2),
                    padding='same',
                    activation='relu',
                    name=f'P6_conv')(features[-1])
        if 'P6' in backbone_outputs:
            features.append(Identity(name=f'P6')(P6))
        G6 = GroupNormalization(name=f'P6_norm')(P6)
        P7 = Conv2D(num_features, (3, 3),
                    strides=(2, 2),
                    padding='same',
                    activation='relu',
                    name=f'P7_conv')(G6)
        if 'P7' in backbone_outputs:
            features.append(Identity(name=f'P7')(P7))

    return Model(inputs, features, name=backbone_type)
Ejemplo n.º 26
0
# for i in x_train:
#     plt.imshow(i)
#     plt.show()

#make sure hey are RGB
#y = x_train[0]
# y[:,:,0] = 0
# plt.imshow(y)

#loading the model
#include_top = false will remove the last layer because i have 2 categories only, not 1000 as imagenet
#predefine image shape
Image_size = (224, 224, 3)

efnet = efc.EfficientNetB4(input_shape=Image_size,
                           weights='imagenet',
                           include_top=False)

#build the model architecture again
x = efnet.input
y = efnet.output  # a vector with a size of 2 for each image (2 probabilities)
y = Flatten(name='flatten')(y)
#normalize the input to the neural network to speed up training
y = BatchNormalization()(y)
y = Dense(32, activation='relu', name='FC1')(y)
y = Dropout(0.2)(y)
#y = Dense(512, activation='relu', name = 'FC2')(y)
#y = Dropout(0.5)(y)
y = Dense(2, activation='softmax', name='prediction')(y)
model = Model(inputs=x, outputs=y)
    def build(name, width, height, depth, n_classes, reg=0.8):
        """
        Args:
            name: name of the network
            width: width of the images
            height: height of the images
            depth: number of channels of the images
            reg: regularization value
        """

        # If Keras backend is TensorFlow
        inputShape = (height, width, depth)
        chanDim = -1

        # If Keras backend is Theano
        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)
            chanDim = 1

        # Define the base model architecture
        if name == 'EfficientNetB0':
            base_model = efn.EfficientNetB0(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'EfficientNetB1':
            base_model = efn.EfficientNetB1(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'EfficientNetB2':
            base_model = efn.EfficientNetB2(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'EfficientNetB3':
            base_model = efn.EfficientNetB3(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'EfficientNetB4':
            base_model = efn.EfficientNetB4(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'EfficientNetB5':
            base_model = efn.EfficientNetB5(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'EfficientNetB6':
            base_model = efn.EfficientNetB6(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'ResNet50':
            base_model = ResNet50(weights='imagenet',
                                  include_top=False,
                                  input_shape=inputShape)
        elif name == 'DenseNet121':
            base_model = DenseNet121(weights='imagenet',
                                     include_top=False,
                                     input_shape=inputShape)

        #x1 = GlobalMaxPooling2D()(base_model.output)    # Compute the max pooling of the base model output
        #x2 = GlobalAveragePooling2D()(base_model.output)    # Compute the average pooling of the base model output
        #x3 = Flatten()(base_model.output)    # Flatten the base model output

        #x = Concatenate(axis=-1)([x1, x2, x3])

        x = GlobalAveragePooling2D()(base_model.output)
        x = Dropout(0.5)(x)
        """
        # First Dense => Relu => BN => DO
        fc_layer_1 = Dense(512, kernel_regularizer=l2(reg))(x)
        activation_1 = Activation('relu')(fc_layer_1)
        batch_norm_1 = BatchNormalization(axis=-1)(activation_1)
        dropout_1 = Dropout(0.5)(batch_norm_1)
        
        # First Dense => Relu => BN => DO
        fc_layer_2 = Dense(256, kernel_regularizer=l2(reg))(dropout_1)
        activation_2 = Activation('relu')(fc_layer_2)
        batch_norm_2 = BatchNormalization(axis=-1)(activation_2)
        dropout_2 = Dropout(0.5)(batch_norm_2)
        
        # Add the output layer
        output = Dense(n_classes, kernel_regularizer=l2(reg), activation='softmax')(dropout_2)
        """
        output = Dense(n_classes,
                       kernel_regularizer=l2(reg),
                       activation='softmax')(x)

        # Create the model
        model = Model(inputs=base_model.inputs, outputs=output)

        return model
def get_model(arch="b3", pretrained="imagenet", image_size=(128, 128, 3)):
    image_input = tf.keras.layers.Input(shape=image_size,
                                        dtype='float32',
                                        name='image_input')
    if arch.startswith("b2"):
        base_model = efn.EfficientNetB2(weights=pretrained,
                                        input_shape=image_size,
                                        include_top=False)
    elif arch.startswith("b3"):
        base_model = efn.EfficientNetB3(weights=pretrained,
                                        input_shape=image_size,
                                        include_top=False)
    elif arch.startswith("b4"):
        base_model = efn.EfficientNetB4(weights=pretrained,
                                        input_shape=image_size,
                                        include_top=False)
    elif arch.startswith("b5"):
        base_model = efn.EfficientNetB5(weights=pretrained,
                                        input_shape=image_size,
                                        include_top=False)
    elif arch.startswith("b6"):
        base_model = efn.EfficientNetB6(weights=pretrained,
                                        input_shape=image_size,
                                        include_top=False)
    elif arch.startswith("b7"):
        base_model = efn.EfficientNetB7(weights=pretrained,
                                        input_shape=image_size,
                                        include_top=False)
    else:
        raise ValueError("Unknown arch!")
    base_model.trainable = True
    tmp = base_model(image_input)
    hidden_dim = base_model.output_shape[-1]
    tmp = tf.keras.layers.GlobalAveragePooling2D()(tmp)
    tmp = tf.keras.layers.Dropout(0.5)(tmp)
    if arch.endswith("g"):
        prediction_0 = tf.keras.layers.Dense(CLASS_COUNTS[0],
                                             activation='softmax',
                                             name="root",
                                             dtype='float32')(SELayer(
                                                 hidden_dim, 8)(tmp))
        prediction_1 = tf.keras.layers.Dense(CLASS_COUNTS[1],
                                             activation='softmax',
                                             name="vowel",
                                             dtype='float32')(SELayer(
                                                 hidden_dim, 8)(tmp))
        prediction_2 = tf.keras.layers.Dense(CLASS_COUNTS[2],
                                             activation='softmax',
                                             name="consonant",
                                             dtype='float32')(SELayer(
                                                 hidden_dim, 8)(tmp))
    else:
        prediction_0 = tf.keras.layers.Dense(CLASS_COUNTS[0],
                                             activation='softmax',
                                             name="root",
                                             dtype='float32')(tmp)
        prediction_1 = tf.keras.layers.Dense(CLASS_COUNTS[1],
                                             activation='softmax',
                                             name="vowel",
                                             dtype='float32')(tmp)
        prediction_2 = tf.keras.layers.Dense(CLASS_COUNTS[2],
                                             activation='softmax',
                                             name="consonant",
                                             dtype='float32')(tmp)
    prediction = tf.keras.layers.Concatenate(axis=-1)(
        [prediction_0, prediction_1, prediction_2])
    return tf.keras.Model(image_input, prediction)
                X.append(x / 255)
                Y.append(y / 255)
            else:
                x, y = image_blackout(image, 120, 120)
                X.append(x / 255)
                Y.append(y / 255)
        return np.asarray(X), np.asarray(Y)


#################################
#####ARCITECTURE:
#################################

#efficientnet encoder
encoder = efn.EfficientNetB4(include_top=False,
                             weights='imagenet',
                             input_shape=(INPUT_SIZE, INPUT_SIZE, 3))


#supporting blocks
def up_image(input_1, c=None):
    x = UpSampling2D((2, 2))(input_1)
    x_ = Conv2D(c // 4, (1, 1),
                kernel_initializer='glorot_uniform',
                activation='relu')(x)
    return x


#architecture
input_1 = encoder.output
x = up_image(input_1, c=256)
X_datagen.fit(x_train, augment=True, seed=13)
Y_datagen.fit(y_train, augment=True, seed=13)
test_datagen.fit(X_test, augment=True, seed=13)
X_datagen_val.fit(x_test, augment=True, seed=13)
Y_datagen_val.fit(y_test, augment=True, seed=13)

X_train_augmented = X_datagen.flow(x_train,  batch_size=15, shuffle=True, seed=13)
Y_train_augmented = Y_datagen.flow(y_train,  batch_size=15, shuffle=True, seed=13)
test_augmented = test_datagen.flow(X_test, shuffle=False, seed=13)
X_train_augmented_val = X_datagen_val.flow(x_test,  batch_size=15, shuffle=True, seed=13)
Y_train_augmented_val = Y_datagen_val.flow(y_test,  batch_size=15, shuffle=True, seed=13)

train_generator = zip(X_train_augmented, Y_train_augmented)
val_generator = zip(X_train_augmented_val, Y_train_augmented_val)

base_model = efn.EfficientNetB4(weights='imagenet', include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, IMG_CHANNELS))
base_model.trainable = True

x10 = base_model.get_layer('stem_activation').output  # (128, 256, 4)
x20 = base_model.get_layer('block2d_add').output # (64, 128, 32)
x30 = base_model.get_layer('block3d_add').output  # (32, 64, 56)
x40 = base_model.get_layer('block5f_add').output  # (16, 32, 160)
x50 = base_model.get_layer('block7b_add').output  # (8, 16, 448)

tf.keras.backend.clear_session()
nb_filter = [32,64,128,256,512]
# Build U-Net++ model
#inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
qq = base_model.layers[0].output
s = Lambda(lambda x: x / 255) (qq)