示例#1
0
def build_backbone_net_graph(input_tensor, architecture, weights=None):
    """
    Build basic feature extraction networks.
    :param input_tensor: Input of the basic networks, should be a tensor or tf.keras.layers.Input
    :param architecture: The architecture name of the basic network.
    :param weights: Whether download and initialize weights from the pre-trained weights,
                    could be either 'imagenet', (pre-training on ImageNet)
                                    'noisy-student',
                                    'None' (random initialization),
                                    or the path to the weights file to be loaded。
    :return: Efficient Model and corresponding endpoints.
    """
    assert architecture in ['efficientnet-b0', 'efficientnet-b1',
                            'efficientnet-b2', 'efficientnet-b3',
                            'efficientnet-b4', 'efficientnet-b5',
                            'efficientnet-b7', 'efficientnet-b7',
                            'efficientnet-l2']

    if architecture == 'efficientnet-b0':
        return efn.EfficientNetB0(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b1':
        return efn.EfficientNetB1(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b2':
        return efn.EfficientNetB2(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b3':
        return efn.EfficientNetB3(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b4':
        return efn.EfficientNetB4(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b5':
        return efn.EfficientNetB5(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b6':
        return efn.EfficientNetB6(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b7':
        return efn.EfficientNetB7(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-l2':
        return efn.EfficientNetL2(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    else:
        raise ValueError("Argument architecture should in "
                         "[efficientnet-b0, efficientnet-b1, "
                         "efficientnet-b2, efficientnet-b3, efficientnet-b4, efficientnet-b5, "
                         "efficientnet-b7, efficientnet-b7, efficientnet-l2] "
                         "but get %s" % architecture)
示例#2
0
def get_model(height=112, width=112):
    base_model = efficientnet.EfficientNetB1(weights='imagenet', include_top=False, input_shape=(height, width, 3))
    x = base_model.output
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(1024, activation='relu')(x)
    x = layers.Dense(8, activation='softmax', name='predictions')(x)
    model = Model(inputs=base_model.input, outputs=x)
    return model
示例#3
0
def effnet_retinanet(num_classes, backbone='EfficientNetB0', inputs=None, modifier=None, **kwargs):
    """ Constructs a retinanet model using a resnet backbone.

    Args
        num_classes: Number of classes to predict.
        backbone: Which backbone to use (one of ('resnet50', 'resnet101', 'resnet152')).
        inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
        modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).

    Returns
        RetinaNet model with a ResNet backbone.
    """
    # choose default input
    if inputs is None:
        if keras.backend.image_data_format() == 'channels_first':
            inputs = keras.layers.Input(shape=(3, None, None))
        else:
            # inputs = keras.layers.Input(shape=(224, 224, 3))
            inputs = keras.layers.Input(shape=(None, None, 3))

    # get last conv layer from the end of each block [28x28, 14x14, 7x7]
    if backbone == 'EfficientNetB0':
        model = efn.EfficientNetB0(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB1':
        model = efn.EfficientNetB1(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB2':
        model = efn.EfficientNetB2(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB3':
        model = efn.EfficientNetB3(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB4':
        model = efn.EfficientNetB4(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB5':
        model = efn.EfficientNetB5(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB6':
        model = efn.EfficientNetB6(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB7':
        model = efn.EfficientNetB7(input_tensor=inputs, include_top=False, weights=None)
    else:
        raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone))

    layer_outputs = ['block4a_expand_activation', 'block6a_expand_activation', 'top_activation']

    layer_outputs = [
        model.get_layer(name=layer_outputs[0]).output,  # 28x28
        model.get_layer(name=layer_outputs[1]).output,  # 14x14
        model.get_layer(name=layer_outputs[2]).output,  # 7x7
    ]
    # create the densenet backbone
    model = keras.Model(inputs=inputs, outputs=layer_outputs, name=model.name)

    # invoke modifier if given
    if modifier:
        model = modifier(model)

    # create the full model
    return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=model.outputs, **kwargs)
示例#4
0
def get_efficientnet_model(model_version):
    if model_version == "B0": return efn.EfficientNetB0(weights='imagenet')
    elif model_version == "B1": return efn.EfficientNetB1(weights='imagenet')
    elif model_version == "B2": return efn.EfficientNetB2(weights='imagenet')
    elif model_version == "B3": return efn.EfficientNetB3(weights='imagenet')
    elif model_version == "B4": return efn.EfficientNetB4(weights='imagenet')
    elif model_version == "B5": return efn.EfficientNetB5(weights='imagenet')
    elif model_version == "B6": return efn.EfficientNetB6(weights='imagenet')
    elif model_version == "B7": return efn.EfficientNetB7(weights='imagenet')
    else: return efn.EfficientNetB0(weights='imagenet')
示例#5
0
    def efficientnetb1(self, input_shape, **kwagrs):
        # x_input = tf.keras.Input(shape=input_shape, name="triplet")

        m = efn.EfficientNetB1(input_shape=(256, 256, 3), weights=None)
        # remove the output layer, leave the feature extraction part
        m_fe = tf.keras.Model(inputs=m.inputs, outputs=m.layers[-2].output)
        output = tf.keras.layers.Dense(1, activation="sigmoid")(m_fe.output)
        m = tf.keras.Model(inputs=m_fe.inputs, outputs=output)

        return m
示例#6
0
文件: models.py 项目: makamoa/alfred
 def effnet_b1(self):
     conv_base = efn.EfficientNetB1(weights=None, input_shape=self.input_shape, include_top=False,
                                    pooling='avg')  # or weights='noisy-student'
     x = Flatten(name='flat')(conv_base.output)
     x = Dropout(0.5, name='drop1')(x)
     x = Dense(1024, activation='relu', name='dense1')(x)
     x = Dropout(0.5, name='drop2')(x)
     x = Dense(512, activation='relu', name='dense2')(x)
     x = Dense(self.nb_of_points, activation='sigmoid',name='dense3')(x)
     model = Model(conv_base.input, x)
     return model
示例#7
0
    def __init__(self, hparams):
        super(InputEmbedding, self).__init__()
        self.hparams = hparams
        if hparams.base_model_name == 'InceptionV3':
            base_model = tf.keras.applications.InceptionV3(include_top=False,
                                                           weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'InceptionResNetV2':
            base_model = tf.keras.applications.InceptionResNetV2(
                include_top=False, weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB0':
            base_model = efn.EfficientNetB0(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB1':
            base_model = efn.EfficientNetB1(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB2':
            base_model = efn.EfficientNetB2(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB3':
            base_model = efn.EfficientNetB3(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB4':
            base_model = efn.EfficientNetB4(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB5':
            base_model = efn.EfficientNetB5(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB6':
            base_model = efn.EfficientNetB6(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB7':
            base_model = efn.EfficientNetB7(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]

        assert hparams.end_point in base_model_layers, "no {} layer in {}".format(
            hparams.end_point, hparams.base_model_name)
        conv_tower_output = base_model.get_layer(hparams.end_point).output
        self.conv_model = tf.keras.models.Model(inputs=base_model.input,
                                                outputs=conv_tower_output)
        self.conv_out_shape = self.conv_model.predict(
            np.array([np.zeros(hparams.image_shape)])).shape
        self.encode_cordinate = EncodeCordinate(
            input_shape=self.conv_out_shape)
示例#8
0
 def get_efficientnet(self):
     models_dict ={
         'b0': efn.EfficientNetB0(input_shape=self.shape,weights=None,include_top=False),
         'b1': efn.EfficientNetB1(input_shape=self.shape,weights=None,include_top=False),
         'b2': efn.EfficientNetB2(input_shape=self.shape,weights=None,include_top=False),
         'b3': efn.EfficientNetB3(input_shape=self.shape,weights=None,include_top=False),
         'b4': efn.EfficientNetB4(input_shape=self.shape,weights=None,include_top=False),
         'b5': efn.EfficientNetB5(input_shape=self.shape,weights=None,include_top=False),
         'b6': efn.EfficientNetB6(input_shape=self.shape,weights=None,include_top=False),
         'b7': efn.EfficientNetB7(input_shape=self.shape,weights=None,include_top=False)
     }
     return models_dict[self.model_class]
示例#9
0
def create_b1(include_top=False,
              input_shape=None,
              input_tensor=None,
              weights="noisy-student"):
    """ネットワークの作成。"""
    import efficientnet.tfkeras as efn

    return efn.EfficientNetB1(
        include_top=include_top,
        input_shape=input_shape,
        input_tensor=input_tensor,
        weights=weights,
    )
示例#10
0
    def getEffTFModel(self, n=0):
        modelInput = tf.keras.Input(batch_input_shape=(None, 5, self.config['net_size'], self.config['net_size'], 3))
        modelInput0, modelInput1, modelInput2, modelInput3, modelInput4 = tf.split(modelInput, [1, 1, 1, 1, 1], 1)
        x0 = tf.squeeze(tf.keras.layers.Lambda(lambda x0: x0)(modelInput0))
        x1 = tf.squeeze(tf.keras.layers.Lambda(lambda x1: x1)(modelInput1))
        x2 = tf.squeeze(tf.keras.layers.Lambda(lambda x2: x2)(modelInput2))
        x3 = tf.squeeze(tf.keras.layers.Lambda(lambda x3: x3)(modelInput3))
        x4 = tf.squeeze(tf.keras.layers.Lambda(lambda x4: x4)(modelInput4))
        net = ''
        if n % 10 == 0:
            net = efn.EfficientNetB0(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')
        elif n % 10 == 1:
            net = efn.EfficientNetB1(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')
        elif n % 10 == 2:
            net = efn.EfficientNetB2(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')
        elif n % 10 == 3:
            net = efn.EfficientNetB3(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')
        elif n % 10 == 4:
            net = efn.EfficientNetB4(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')

        activation = tf.keras.layers.LeakyReLU()

        self.config['rnn_size'] = 256
        ret0 = net(x0)
        ret0 = Dense(self.config['rnn_size'], activation=activation)(ret0)
        ret0 = tf.expand_dims(ret0, axis=1)
        ret0 = self.transformer(ret0)
        ret1 = net(x1)
        ret1 = Dense(self.config['rnn_size'], activation=activation)(ret1)
        ret1 = tf.expand_dims(ret1, axis=1)
        ret1 = self.transformer(ret1)
        ret2 = net(x2)
        ret2 = Dense(self.config['rnn_size'], activation=activation)(ret2)
        ret2 = tf.expand_dims(ret2, axis=1)
        ret2 = self.transformer(ret2)
        ret3 = net(x3)
        ret3 = Dense(self.config['rnn_size'], activation=activation)(ret3)
        ret3 = tf.expand_dims(ret3, axis=1)
        ret3 = self.transformer(ret3)
        ret4 = net(x4)
        ret4 = Dense(self.config['rnn_size'], activation=activation)(ret4)
        ret4 = tf.expand_dims(ret4, axis=1)
        ret4 = self.transformer(ret4)
        ret = tf.concat([ret0, ret1, ret2, ret3, ret4], axis=1)
        print(ret)
        x = tf.keras.layers.Dense(self.config['rnn_size'], activation=activation)(ret)
        model = tf.keras.Model(modelInput, x)
        return model
示例#11
0
def get_model():

    with STRATEGY.scope():

        efnm = efn.EfficientNetB1(weights='noisy-student',
                                  include_top=False,
                                  input_shape=(IMG_SIZE[0], IMG_SIZE[1], 3))
        efnm.trainable = True

        model = tf.keras.models.Sequential([
            efnm,
            tf.keras.layers.Flatten(),
            tf.keras.layers.Dense(CLASSES, activation='softmax')
        ])

        model.compile(loss=LOSS_FN, optimizer=OPTIMIZER, metrics=['accuracy'])

    return model
示例#12
0
 def model_chooser(self, classes=2, weights=None):
     print("Model selection started.")
     name = self.model_name
     if(name=='C0'):
         self.model = efn.EfficientNetB0(include_top=True, weights=weights, classes=classes)
     elif(name=='C1'):
         self.model = efn.EfficientNetB1(include_top=True, weights=weights, classes=classes)
     elif(name=='C2'):
         self.model = efn.EfficientNetB2(include_top=True, weights=weights, classes=classes)
     elif(name=='C3'):
         self.model = efn.EfficientNetB3(include_top=True, weights=weights, classes=classes)
     elif(name=='C4'):
         self.model = efn.EfficientNetB4(include_top=True, weights=weights, classes=classes)
     elif(name=='C5'):
         self.model = efn.EfficientNetB5(include_top=True, weights=weights, classes=classes)
     
     if(classes==2):
         self.model.compile(optimizer="adam", loss="binary_crossentropy", metrics = ['acc'])
     elif(classes>2):
         self.model.compile(optimizer="adam", loss="categorical_crossentropy", metrics = ['acc'])      
示例#13
0
def create_base_model(base_model_name, pretrained=True, IMAGE_SIZE=[300, 300]):
    if pretrained is False:
        weights = None
    else:
        weights = "imagenet"
    if base_model_name == 'B0':
        base = efn.EfficientNetB0(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B1':
        base = efn.EfficientNetB1(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B2':
        base = efn.EfficientNetB2(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B3':
        base = efn.EfficientNetB3(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B4':
        base = efn.EfficientNetB4(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B5':
        base = efn.EfficientNetB5(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B6':
        base = efn.EfficientNetB6(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B7':
        base = efn.EfficientNetB7(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    base = remove_dropout(base)
    base.trainable = True
    return base
    def build(name, width, height, depth, n_classes, reg=0.8):
        """
        Args:
            name: name of the network
            width: width of the images
            height: height of the images
            depth: number of channels of the images
            reg: regularization value
        """

        # If Keras backend is TensorFlow
        inputShape = (height, width, depth)
        chanDim = -1

        # If Keras backend is Theano
        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)
            chanDim = 1

        # Define the base model architecture
        if name == 'EfficientNetB0':
            base_model = efn.EfficientNetB0(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'EfficientNetB1':
            base_model = efn.EfficientNetB1(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'EfficientNetB2':
            base_model = efn.EfficientNetB2(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'EfficientNetB3':
            base_model = efn.EfficientNetB3(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'EfficientNetB4':
            base_model = efn.EfficientNetB4(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'EfficientNetB5':
            base_model = efn.EfficientNetB5(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'EfficientNetB6':
            base_model = efn.EfficientNetB6(weights='imagenet',
                                            include_top=False,
                                            input_shape=inputShape)
        elif name == 'ResNet50':
            base_model = ResNet50(weights='imagenet',
                                  include_top=False,
                                  input_shape=inputShape)
        elif name == 'DenseNet121':
            base_model = DenseNet121(weights='imagenet',
                                     include_top=False,
                                     input_shape=inputShape)

        #x1 = GlobalMaxPooling2D()(base_model.output)    # Compute the max pooling of the base model output
        #x2 = GlobalAveragePooling2D()(base_model.output)    # Compute the average pooling of the base model output
        #x3 = Flatten()(base_model.output)    # Flatten the base model output

        #x = Concatenate(axis=-1)([x1, x2, x3])

        x = GlobalAveragePooling2D()(base_model.output)
        x = Dropout(0.5)(x)
        """
        # First Dense => Relu => BN => DO
        fc_layer_1 = Dense(512, kernel_regularizer=l2(reg))(x)
        activation_1 = Activation('relu')(fc_layer_1)
        batch_norm_1 = BatchNormalization(axis=-1)(activation_1)
        dropout_1 = Dropout(0.5)(batch_norm_1)
        
        # First Dense => Relu => BN => DO
        fc_layer_2 = Dense(256, kernel_regularizer=l2(reg))(dropout_1)
        activation_2 = Activation('relu')(fc_layer_2)
        batch_norm_2 = BatchNormalization(axis=-1)(activation_2)
        dropout_2 = Dropout(0.5)(batch_norm_2)
        
        # Add the output layer
        output = Dense(n_classes, kernel_regularizer=l2(reg), activation='softmax')(dropout_2)
        """
        output = Dense(n_classes,
                       kernel_regularizer=l2(reg),
                       activation='softmax')(x)

        # Create the model
        model = Model(inputs=base_model.inputs, outputs=output)

        return model
from tensorflow.python.client import device_lib

print(device_lib.list_local_devices())

acc_metric = tensorflow.keras.metrics.Accuracy()
bce_metric = tensorflow.keras.metrics.BinaryCrossentropy()
bce_loss = tensorflow.keras.losses.BinaryCrossentropy()

# Specify image size
IMG_WIDTH = 240
IMG_HEIGHT = 240
CHANNELS = 3

# loading pretrained conv base model
conv_base = efn.EfficientNetB1(weights="imagenet",
                               include_top=False,
                               input_shape=(IMG_WIDTH, IMG_HEIGHT, CHANNELS))

dropout_rate = 0.15
model = models.Sequential()
model.add(conv_base)
model.add(layers.GlobalAveragePooling2D(name="gap"))
model.add(layers.Dropout(dropout_rate, name="dropout_out"))
model.add(layers.Dense(1, activation="sigmoid"))
conv_base.trainable = True

print(model.summary())

print('Loading data files...')
data_path = 'D:/Data/deepfake-detection-challenge/dfdc_processed'
metadata = pd.read_csv(data_path + '/metadata_copy.csv')
示例#16
0
def create_model(is_twohundred=False, is_halffeatures=True):
    print('Loading base model (DenseNet)..')

    # Encoder Layers

    if is_twohundred:
        base_model = applications.DenseNet201(input_shape=(480, 640, 3), include_top=False)
    else:
        #base_model = applications.DenseNet169(input_shape=(480, 640, 3), include_top=False)
        base_model = efn.EfficientNetB1(weights='imagenet',include_top=False,input_shape=((480, 640, 3)))

    print('Base model loaded.')

    # Starting point for decoder
    base_model_output_shape = base_model.layers[-1].output.shape

    # Layer freezing?
    for layer in base_model.layers: layer.trainable = True

    # Starting number of decoder filters
    if is_halffeatures:
        decode_filters = int(int(base_model_output_shape[-1]) / 2)
    else:
        decode_filters = int(base_model_output_shape[-1])
    base_model.summary()
    # Define upsampling layer
    def upproject(tensor, filters, name, concat_with):
        up_i = BilinearUpSampling2D((2, 2), name=name + '_upsampling2d')(tensor)
        up_i = Concatenate(name=name + '_concat')(
            [up_i, base_model.get_layer(concat_with).output])  # Skip connection
        up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_convA')(up_i)
        up_i = LeakyReLU(alpha=0.2)(up_i)
        up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name + '_convB')(up_i)
        up_i = LeakyReLU(alpha=0.2)(up_i)
        return up_i

    def global_non_local(X,cc):
        h, w , c = list(X.shape)[1], list(X.shape)[2], list(X.shape)[3]
        c=cc
        theta = Conv2D(c, kernel_size=(1,1), padding='same')(X)
        theta_rsh = Reshape((h*w, c))(theta)

        phi = Conv2D(c, kernel_size=(1,1), padding='same')(X)
        phi_rsh = Reshape((c, h*w))(phi)

        g = Conv2D(c, kernel_size=(1,1), padding='same')(X)
        g_rsh = Reshape((h*w, c))(g)

        theta_phi = tf.matmul(theta_rsh, phi_rsh)
        theta_phi = tf.keras.layers.Softmax()(theta_phi)

        theta_phi_g = tf.matmul(theta_phi, g_rsh)
        theta_phi_g = Reshape((h, w, c))(theta_phi_g)

        theta_phi_g = Conv2D(c*2, kernel_size=(1,1), padding='same')(theta_phi_g)

        out = Add()([theta_phi_g, X])

        return out

    non_local=global_non_local(base_model.output,decode_filters)

    # Decoder Layers
    decoder = Conv2D(filters=decode_filters, kernel_size=1, padding='same', input_shape=base_model_output_shape,
                     name='conv2')(non_local)

    decoder = upproject(decoder, int(decode_filters / 2), 'up1', concat_with='block5d_add')
    decoder = upproject(decoder, int(decode_filters / 4), 'up2', concat_with='block3c_add')

    # conv3 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv3_')(decoder)

    decoder = upproject(decoder, int(decode_filters / 8), 'up3', concat_with='block2c_add')
    # conv2 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv2_')(decoder)

    decoder = upproject(decoder, int(decode_filters / 16), 'up4', concat_with='block1b_add')
    # conv1 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv1_')(decoder)

    decoder = upproject(decoder, int(decode_filters / 32), 'up5', concat_with='input_1')
    conv0 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv0_')(decoder)

    '''
    decoder = Conv2D(filters=decode_filters, kernel_size=1, padding='same', input_shape=base_model_output_shape,
                     name='conv2')(base_model.output)
    decoder = upproject(decoder, int(decode_filters / 2), 'up1', concat_with='pool3_pool')
    decoder = upproject(decoder, int(decode_filters / 4), 'up2', concat_with='pool2_pool')
    decoder = upproject(decoder, int(decode_filters / 8), 'up3', concat_with='pool1')
    decoder = upproject(decoder, int(decode_filters / 16), 'up4', concat_with='conv1/relu')
    decoder = upproject(decoder, int(decode_filters / 32), 'up5', concat_with='input_1')
    conv0 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv0_')(decoder)
    '''

    predictions_raw_0 = conv0 * 1000
    # predictions_raw_1 = conv1 * 1000
    # predictions_raw_2 = conv2 * 1000
    # predictions_raw_3 = conv3 * 1000

    predictions_0 = tf.clip_by_value(predictions_raw_0, 1.0, 65535.0)
    # predictions_1 = tf.clip_by_value(predictions_raw_1, 1.0, 65535.0)
    # predictions_2 = tf.clip_by_value(predictions_raw_2, 1.0, 65535.0)
    # predictions_3     = tf.clip_by_value(predictions_raw_3, 1.0, 65535.0)

    final_outputs_0 = tf.cast(tf.clip_by_value(predictions_raw_0, 0, 65535.0), tf.int16)
    eval_outputs__0 = tf.clip_by_value(predictions_raw_0, 0.1, 65535.0)
    # final_outputs_1 = tf.cast(tf.clip_by_value(predictions_raw_1, 0, 65535.0), tf.int16)
    # final_outputs_2 = tf.cast(tf.clip_by_value(predictions_raw_2, 0, 65535.0), tf.int16)
    # final_outputs_3 = tf.cast(tf.clip_by_value(predictions_raw_3, 0, 65535.0), tf.int16)

    # Create the model
    model = Model(inputs=base_model.input, outputs=
    [predictions_0, final_outputs_0, eval_outputs__0])



    model.summary()
    #print(base_model.output)
    #print(base_model.layers[-1].output)
    print('Model created.')

    return model
#a=create_model()
示例#17
0
            Architecture.EFFNETB4: (efn.preprocess_input, True),
            Architecture.EFFNETB5: (efn.preprocess_input, True),
            Architecture.EFFNETB6: (efn.preprocess_input, True),
            Architecture.EFFNETB7: (efn.preprocess_input, True),
        }[self]


class Optimizers(enum.Enum):
    ADAM = 'adam'
    ADAMAX = 'adamax'
    ADADELTA = 'adadelta'
    ADAGRAD = 'adagrad'
    RMSPROP = 'rmsprop'
    SGD = 'sgd'
    NADAM = 'nadam'

    def __call__(self, *args, **kwargs):
        return {
            Optimizers.ADAM: tf.keras.optimizers.Adam,
            Optimizers.ADAMAX: tf.keras.optimizers.Adamax,
            Optimizers.ADADELTA: tf.keras.optimizers.Adadelta,
            Optimizers.ADAGRAD: tf.keras.optimizers.Adagrad,
            Optimizers.RMSPROP: tf.keras.optimizers.RMSprop,
            Optimizers.SGD: tf.keras.optimizers.SGD,
            Optimizers.NADAM: tf.keras.optimizers.Nadam,
        }[self]


if __name__ == '__main__':
    effnet_base = efn.EfficientNetB1(weights='imagenet', include_top=False,  input_shape=(256, 256, 3))
    print(effnet_base.summary())
示例#18
0
def get_model(config):

    # model = globals().get(config.MODEL.NAME)(1)
    print('model name:', config.MODEL.NAME)
    model_name = config.MODEL.NAME
    input_shape = (config.DATA.IMG_H, config.DATA.IMG_W, 3)
    pretrained_weight = config.MODEL.WEIGHT
    if pretrained_weight == 'None':
        pretrained_weight = None
    if 'EfficientNet' in model_name:
        ##keras.application
        if 'B7' in model_name:
            encoder = efn.EfficientNetB7(input_shape=input_shape,
                                         weights=pretrained_weight,
                                         include_top=False)
        elif 'B0' in model_name:
            encoder = efn.EfficientNetB0(input_shape=input_shape,
                                         weights=pretrained_weight,
                                         include_top=False)
        elif 'B1' in model_name:
            encoder = efn.EfficientNetB1(input_shape=input_shape,
                                         weights=pretrained_weight,
                                         include_top=False)
        elif 'B2' in model_name:
            encoder = efn.EfficientNetB2(input_shape=input_shape,
                                         weights=pretrained_weight,
                                         include_top=False)
        elif 'B3' in model_name:
            encoder = efn.EfficientNetB3(input_shape=input_shape,
                                         weights=pretrained_weight,
                                         include_top=False)
        elif 'B4' in model_name:
            encoder = efn.EfficientNetB4(input_shape=input_shape,
                                         weights=pretrained_weight,
                                         include_top=False)
        elif 'B5' in model_name:
            encoder = efn.EfficientNetB5(input_shape=input_shape,
                                         weights=pretrained_weight,
                                         include_top=False)
        elif 'B6' in model_name:
            encoder = efn.EfficientNetB6(input_shape=input_shape,
                                         weights=pretrained_weight,
                                         include_top=False)
        model = tf.keras.Sequential([
            encoder,
            tf.keras.layers.GlobalAveragePooling2D(),
            tf.keras.layers.Dense(len(CLASSES), activation='softmax')
        ])

    else:
        ##https://github.com/qubvel/classification_models
        #['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'seresnet18', 'seresnet34', 'seresnet50',
        # 'seresnet101', 'seresnet152', 'seresnext50', 'seresnext101', 'senet154', 'resnet50v2', 'resnet101v2',
        # 'resnet152v2', 'resnext50', 'resnext101', 'vgg16', 'vgg19',
        # 'densenet121', 'densenet169', 'densenet201',
        # 'inceptionresnetv2', 'inceptionv3', 'xception', 'nasnetlarge', 'nasnetmobile', 'mobilenet', 'mobilenetv2']

        base_model, preprocess_input = Classifiers.get(model_name)
        base_model = base_model(input_shape=input_shape,
                                weights=pretrained_weight,
                                include_top=False)
        x = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)

        output = tf.keras.layers.Dense(
            len(CLASSES), activation=config.MODEL.OUTPUT_ACTIVATION)(x)
        # if 'focal' in config.LOSS.NAME:
        #     if 'categorical_focal_loss' == config.LOSS.NAME:
        #     else:
        #         output = tf.keras.layers.Dense(len(CLASSES), activation='sigmoid')(x)
        # else:
        #     output = tf.keras.layers.Dense(len(CLASSES), activation='softmax')(x)
        model = tf.keras.models.Model(inputs=[base_model.input],
                                      outputs=[output])

    return model
示例#19
0
data = next(test_ds.as_numpy_iterator())
#
# @tf.custom_gradient
# def softhardthresh(x):
#     mask = tf.nn.sigmoid(x)
#     def grad(dy):
#         return dy * (mask * (1 - mask))
#     return tf.cast(mask >= 0.5, tf.float32), grad

actfun = 'swish'
with tf.device(args.device):

    model_ = efn.EfficientNetB1(
        weights=None,
        include_top=False,
        pooling='max',
        input_shape=data[0][1][0].shape)  # or weights='noisy-student'
    feats = layers.Dense(128, activation=actfun)(model_.output)
    output = layers.Dense(32)(feats)
    model_full = tf.keras.Model(model_.input,
                                tf.nn.tanh(output),
                                name='model_full')

    x = layers.Dense(32, activation=actfun)(output)
    output = layers.Dense(args.CLASS_NAMES.shape[0])(x)
    model_weaksup_full = tf.keras.Model(model_.input,
                                        output,
                                        name='class_model_full')

    model_ = efn.EfficientNetB0(