def double_conv_layer(x, filter_size, size, dropout, batch_norm=False):
    '''
    construction of a double convolutional layer using
    SAME padding
    RELU nonlinear activation function
    :param x: input
    :param filter_size: size of convolutional filter
    :param size: number of filters
    :param dropout: FLAG & RATE of dropout.
            if < 0 dropout cancelled, if > 0 set as the rate
    :param batch_norm: flag of if batch_norm used,
            if True batch normalization
    :return: output of a double convolutional layer
    '''
    axis = 3
    conv = layers.Conv2D(size, (filter_size, filter_size), padding='same')(x)
    if batch_norm is True:
        conv = layers.BatchNormalization(axis=axis)(conv)
    conv = layers.Activation('relu')(conv)
    conv = layers.Conv2D(size, (filter_size, filter_size),
                         padding='same')(conv)
    if batch_norm is True:
        conv = layers.BatchNormalization(axis=axis)(conv)
    conv = layers.Activation('relu')(conv)
    if dropout > 0:
        conv = layers.Dropout(dropout)(conv)

    shortcut = layers.Conv2D(size, kernel_size=(1, 1), padding='same')(x)
    if batch_norm is True:
        shortcut = layers.BatchNormalization(axis=axis)(shortcut)

    res_path = layers.add([shortcut, conv])
    return res_path
Beispiel #2
0
    def feature_fusion_module_new(self, input, name, num_features):
        input_big = input[0]
        input_small = input[1]

        b_shape = input_big.get_shape()
        s_shape = input_small.get_shape()

        if(b_shape[1].value > s_shape[1].value):
            up_sampled_input = keras_ly.UpSampling2D(size=(2, 2), name=name+'_upsample')(input_small)
        else:
            up_sampled_input = input_small

        concat_1 = tf.concat(axis=3, values=[input_big, up_sampled_input], name=name+'_concat')
        conv_1 = keras_ly.Conv2D(num_features, [3, 3], padding='SAME', name=name+'_conv1')(concat_1)
        conv_1_bn_relu = tf.nn.relu(slim.batch_norm(conv_1, fused=True))

        global_pool = tf.reduce_mean(conv_1_bn_relu, [1, 2], keep_dims=True)

        conv_2 = keras_ly.Conv2D(num_features, [1, 1], padding='SAME', name=name+'_conv2')(global_pool)
        conv_3 = keras_ly.Conv2D(num_features, [1, 1], padding='SAME', name=name+'_conv3')(conv_2)

        sigmoid = tf.sigmoid(conv_3, name=name+'_sigmoid')

        mul = tf.multiply(sigmoid, conv_1_bn_relu, name=name+'_multiply') #sigmoid * conv_1
        add_out = tf.add(conv_1_bn_relu, mul, name=name+'_add_out') # conv_1 + mul

        return add_out
def VGG6(inputs, n_class=10):
    # Block 1
    x = layers.Conv2D(32, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv1')(inputs)
    x = layers.Conv2D(32, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    x = layers.Flatten(name='flatten')(x)
    x = layers.Dense(512, activation='relu', name='fc1')(x)
    features = layers.Dense(512, activation='relu', name='fc2')(x)
    outputs = layers.Dense(n_class, activation='softmax',
                           name='predictions')(features)

    return outputs
Beispiel #4
0
    def feature_fusion_module(self, input, name):
        input_big = input[0]
        input_small = input[1]

        up_sampled_input = keras_ly.UpSampling2D(size=(2, 2),
                                                 name=name +
                                                 '_upsample')(input_small)

        concat_1 = tf.concat(axis=3,
                             values=[input_big, up_sampled_input],
                             name=name + '_concat')
        conv_1 = keras_ly.Conv2D(1024, [3, 3],
                                 padding='SAME',
                                 name=name + '_conv1')(concat_1)

        global_pool = tf.reduce_mean(conv_1, [1, 2], keep_dims=True)
        conv_2 = keras_ly.Conv2D(1024, [1, 1],
                                 padding='SAME',
                                 name=name + '_conv2')(global_pool)
        conv_3 = keras_ly.Conv2D(1024, [1, 1],
                                 padding='SAME',
                                 name=name + '_conv3')(conv_2)
        sigmoid = tf.sigmoid(conv_3, name=name + '_sigmoid')

        mul = tf.multiply(sigmoid, conv_1, name=name + '_multiply')
        add_out = tf.add(conv_1, mul, name=name + '_add_out')

        return add_out
Beispiel #5
0
    def grouped_convolution(y, nb_channels, _strides):
        # when `cardinality` == 1 this is just a standard convolution
        if cardinality == 1:
            return layers.Conv2D(nb_channels,
                                 kernel_size=(3, 3),
                                 strides=_strides,
                                 padding='same')(y)

        assert not nb_channels % cardinality
        _d = nb_channels // cardinality

        # in a grouped convolution layer, input and output channels are divided into `cardinality` groups,
        # and convolutions are separately performed within each group
        groups = []
        for j in range(cardinality):
            group = layers.Lambda(lambda z: z[:, :, :, j * _d:j * _d + _d])(y)
            groups.append(
                layers.Conv2D(_d,
                              kernel_size=(3, 3),
                              strides=_strides,
                              padding='same')(group))

        # the grouped convolutional layer concatenates them as the outputs of the layer
        y = layers.concatenate(groups)

        return y
Beispiel #6
0
def classifier_model():

    model = models.Sequential()
    model.add(
        layers.Conv2D(NUM_FILTERS_1, [3, 3],
                      strides=(2, 2),
                      padding='same',
                      activation='relu',
                      input_shape=(28, 28, 1),
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    model.add(
        layers.Conv2D(NUM_FILTERS_2, [3, 3],
                      strides=(2, 2),
                      padding='same',
                      activation='relu',
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    model.add(layers.Flatten())
    model.add(
        layers.Dense(NUM_CLASSES,
                     kernel_initializer=initializers.glorot_normal(),
                     bias_initializer=initializers.Zeros()))

    return model
Beispiel #7
0
    def olliNetwork(self):
        self.model = models.Sequential()

        self.model.add(
            layers.Conv2D(64, (5, 5),
                          activation='relu',
                          input_shape=(48, 48, 1)))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(128, (4, 4), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(3072, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(128, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(3, activation='softmax'))
Beispiel #8
0
def attention_block(x, gating, inter_shape):
    shape_x = K.int_shape(x)
    shape_g = K.int_shape(gating)

    theta_x = layers.Conv2D(inter_shape, (2, 2),
                            strides=(2, 2),
                            padding='same')(x)  # 16
    shape_theta_x = K.int_shape(theta_x)

    phi_g = layers.Conv2D(inter_shape, (1, 1), padding='same')(gating)
    upsample_g = layers.Conv2DTranspose(
        inter_shape, (3, 3),
        strides=(shape_theta_x[1] // shape_g[1],
                 shape_theta_x[2] // shape_g[2]),
        padding='same')(phi_g)  # 16

    concat_xg = layers.add([upsample_g, theta_x])
    act_xg = layers.Activation('relu')(concat_xg)
    psi = layers.Conv2D(1, (1, 1), padding='same')(act_xg)
    sigmoid_xg = layers.Activation('sigmoid')(psi)
    shape_sigmoid = K.int_shape(sigmoid_xg)
    upsample_psi = layers.UpSampling2D(size=(shape_x[1] // shape_sigmoid[1],
                                             shape_x[2] // shape_sigmoid[2]))(
                                                 sigmoid_xg)  # 32

    upsample_psi = expend_as(upsample_psi, shape_x[3])

    y = layers.multiply([upsample_psi, x])

    result = layers.Conv2D(shape_x[3], (1, 1), padding='same')(y)
    result_bn = layers.BatchNormalization()(result)
    return result_bn
Beispiel #9
0
    def _create_generator(self):
        inputs = layers.Input(shape=(self.args.latent_dims, ))

        x = layers.Dense(128 * 16 * 16)(inputs)
        x = layers.LeakyReLU()(x)
        x = layers.Reshape((16, 16, 128))(x)

        x = layers.Conv2D(256, kernel_size=5, strides=1, padding='same')(x)
        x = layers.LeakyReLU()(x)

        # we use a kernel-size which is a multiple of the strides to don't have artifacts when up-sampling
        x = layers.Conv2DTranspose(256,
                                   kernel_size=4,
                                   strides=2,
                                   padding='same')(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(256, kernel_size=5, padding='same')(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(256, kernel_size=5, padding='same')(x)
        x = layers.LeakyReLU()(x)

        outputs = layers.Conv2D(CHANNELS,
                                kernel_size=7,
                                activation='tanh',
                                padding='same')(x)

        generator = models.Model(inputs, outputs)
        return generator
Beispiel #10
0
def conv_block(input_tensor, num_filters):
    encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)
    encoder = layers.BatchNormalization()(encoder)
    encoder = layers.Activation('relu')(encoder)
    encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder)
    encoder = layers.BatchNormalization()(encoder)
    encoder = layers.Activation('relu')(encoder)
    return encoder
Beispiel #11
0
def decoder_block(input_tensor, concat_tensor, num_filters):
    decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor)
    decoder = layers.concatenate([concat_tensor, decoder], axis=-1)
    decoder = layers.BatchNormalization()(decoder)
    decoder = layers.Activation('relu')(decoder)
    decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
    decoder = layers.BatchNormalization()(decoder)
    decoder = layers.Activation('relu')(decoder)
    decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
    decoder = layers.BatchNormalization()(decoder)
    decoder = layers.Activation('relu')(decoder)
    return decoder
Beispiel #12
0
    def residual_block(y,
                       nb_channels_in,
                       nb_channels_out,
                       _strides=(1, 1),
                       _project_shortcut=False):
        """
        Our network consists of a stack of residual blocks. These blocks have the same topology,
        and are subject to two simple rules:
        - If producing spatial maps of the same size, the blocks share the same hyper-parameters (width and filter sizes).
        - Each time the spatial map is down-sampled by a factor of 2, the width of the blocks is multiplied by a factor of 2.
        """
        shortcut = y

        # we modify the residual building block as a bottleneck design to make the network more economical
        y = layers.Conv2D(nb_channels_in,
                          kernel_size=(1, 1),
                          strides=(1, 1),
                          padding='same')(y)
        y = add_common_layers(y)

        # ResNeXt (identical to ResNet when `cardinality` == 1)
        y = grouped_convolution(y, nb_channels_in, _strides=_strides)
        y = add_common_layers(y)

        y = layers.Conv2D(nb_channels_out,
                          kernel_size=(1, 1),
                          strides=(1, 1),
                          padding='same')(y)
        # batch normalization is employed after aggregating the transformations and before adding to the shortcut
        y = layers.BatchNormalization()(y)

        # identity shortcuts used directly when the input and output are of the same dimensions
        if _project_shortcut or _strides != (1, 1):
            # when the dimensions increase projection shortcut is used to match dimensions (done by 1×1 convolutions)
            # when the shortcuts go across feature maps of two sizes, they are performed with a stride of 2
            shortcut = layers.Conv2D(nb_channels_out,
                                     kernel_size=(1, 1),
                                     strides=_strides,
                                     padding='same')(shortcut)
            shortcut = layers.BatchNormalization()(shortcut)

        y = layers.add([shortcut, y])

        # relu is performed right after each batch normalization,
        # expect for the output of the block where relu is performed after the adding to the shortcut
        y = layers.LeakyReLU()(y)

        return y
Beispiel #13
0
    def attention_refinment_module(self, input, name):
        global_pool = tf.reduce_mean(input, [1, 2], keep_dims=True)
        #conv_1 = keras_ly.Conv2D(2048, [1, 1], padding='SAME', name=name+'_conv1')(global_pool)
        conv_1 = keras_ly.Conv2D(input.get_shape()[3], [1, 1], padding='SAME', name=name+'_conv1')(global_pool)
        sigmoid = tf.sigmoid(conv_1, name=name+'_sigmoid')
        mul_out = tf.multiply(input, sigmoid, name=name+'_multiply')

        return mul_out
def create_model(dropout_rate):
    model = models.Sequential()
    model.add(
        layers.Conv2D(32, (3, 3), activation='relu',
                      input_shape=(150, 150, 3)))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dropout(dropout_rate))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    return model
Beispiel #15
0
def conv2_bn(x, filts, k=3, stride=1, rate=1, name=None, pad='same'):
    x = l.Conv2D(filts, (k, k),
                 strides=(stride, stride),
                 dilation_rate=rate,
                 padding=pad,
                 name=name)(x)

    x = l.BatchNormalization(name=name + '_bn')(x)
    x = l.Activation('relu', name=name + '_relu')(x)
    return x
Beispiel #16
0
def attention_block(x, gating, inter_shape, name):
    """
    self gated attention, attention mechanism on spatial dimension
    :param x: input feature map
    :param gating: gate signal, feature map from the lower layer
    :param inter_shape: intermedium channle numer
    :param name: name of attention layer, for output
    :return: attention weighted on spatial dimension feature map
    """

    shape_x = K.int_shape(x)
    shape_g = K.int_shape(gating)

    theta_x = layers.Conv2D(inter_shape, (2, 2),
                            strides=(2, 2),
                            padding='same')(x)  # 16
    shape_theta_x = K.int_shape(theta_x)

    phi_g = layers.Conv2D(inter_shape, (1, 1), padding='same')(gating)
    upsample_g = layers.Conv2DTranspose(
        inter_shape, (3, 3),
        strides=(shape_theta_x[1] // shape_g[1],
                 shape_theta_x[2] // shape_g[2]),
        padding='same')(phi_g)  # 16
    # upsample_g = layers.UpSampling2D(size=(shape_theta_x[1] // shape_g[1], shape_theta_x[2] // shape_g[2]),
    #                                  data_format="channels_last")(phi_g)

    concat_xg = layers.add([upsample_g, theta_x])
    act_xg = layers.Activation('relu')(concat_xg)
    psi = layers.Conv2D(1, (1, 1), padding='same')(act_xg)
    sigmoid_xg = layers.Activation('sigmoid')(psi)
    shape_sigmoid = K.int_shape(sigmoid_xg)
    upsample_psi = layers.UpSampling2D(size=(shape_x[1] // shape_sigmoid[1],
                                             shape_x[2] // shape_sigmoid[2]),
                                       name=name + '_weight')(sigmoid_xg)  # 32

    upsample_psi = expend_as(upsample_psi, shape_x[3])

    y = layers.multiply([upsample_psi, x])

    result = layers.Conv2D(shape_x[3], (1, 1), padding='same')(y)
    result_bn = layers.BatchNormalization()(result)
    return result_bn
Beispiel #17
0
def classifier_model():  #Building of the CNN
    model = models.Sequential()

    model.add(
        layers.Conv2D(1, [2, 40],
                      input_shape=(1, 40, 173),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))
    #
    #model.add(layers.MaxPool1D(pool_size=2, strides=2, padding='valid'))
    #
    model.add(
        layers.Conv2D(1, [2, 20],
                      strides=(1, 1),
                      padding='valid',
                      activation='relu',
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    #
    # model.add(layers.MaxPool1D(pool_size=2, strides=2, padding='valid'))
    #
    #
    model.add(
        layers.Conv2D(1, [2, 10],
                      strides=(3, 3),
                      padding='valid',
                      activation='relu',
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    #
    # model.add(layers.MaxPool1D(pool_size=2, strides=2, padding='valid'))
    #
    model.add(layers.Flatten())
    #
    model.add(
        layers.Dense(1,
                     kernel_initializer=initializers.glorot_normal(),
                     bias_initializer=initializers.Zeros()))

    print(model.summary())
    return model
def residual_layer(input_tensor, nb_in_filters=64, nb_bottleneck_filters=16, filter_sz=3, stage=0, reg=0.0):

    bn_name = 'bn' + str(stage)
    conv_name = 'conv' + str(stage)
    relu_name = 'relu' + str(stage)
    merge_name = 'add' + str(stage)

    # batchnorm-relu-conv, from nb_in_filters to nb_bottleneck_filters via 1x1 conv
    if stage>1: # first activation is just after conv1
        x = layers.BatchNormalization(axis=-1, name=bn_name+'a')(input_tensor)
        x = layers.Activation('relu', name=relu_name+'a')(x)
    else:
        x = input_tensor

    x = layers.Conv2D(nb_bottleneck_filters, (1, 1),
                      kernel_initializer='glorot_normal',
                      kernel_regularizer=regularizers.l2(reg),
                      use_bias=False,
                      name=conv_name+'a')(x)

    # batchnorm-relu-conv, from nb_bottleneck_filters to nb_bottleneck_filters via FxF conv
    x = layers.BatchNormalization(axis=-1, name=bn_name+'b')(x)
    x = layers.Activation('relu', name=relu_name+'b')(x)
    x = layers.Conv2D(nb_bottleneck_filters, (filter_sz, filter_sz),
                      padding='same',
                      kernel_initializer='glorot_normal',
                      kernel_regularizer=regularizers.l2(reg),
                      use_bias = False,
                      name=conv_name+'b')(x)

    # batchnorm-relu-conv, from nb_in_filters to nb_bottleneck_filters via 1x1 conv
    x = layers.BatchNormalization(axis=-1, name=bn_name+'c')(x)
    x = layers.Activation('relu', name=relu_name+'c')(x)
    x = layers.Conv2D(nb_in_filters, (1, 1),
                      kernel_initializer='glorot_normal',
                      kernel_regularizer=regularizers.l2(reg),
                      name=conv_name+'c')(x)

    # merge
    x = layers.add([x, input_tensor], name=merge_name)

    return x
Beispiel #19
0
def classifier_model():  # linear stack of

    ################################################################################
    ############################    YOUR CODE HERE   ################################

    # Define a Sequential model
    model = models.Sequential()
    # The first two layers are convolutional layers. For the first layer, we must specify the input shape.
    model.add(
        layers.Conv2D(
            NUM_FILTERS_1,
            3,
            strides=(2, 2),
            activation='relu',
            padding='same',
            input_shape=(28, 28, 1)))  # we have to add 2d convolutional layer
    # a conv layer is defined by the number of filters
    # second dimension we have to choose a stride of 2 : amount of shift that we are using for shift
    # papdding same = output is cropped , output should be 28*28
    # specify the kind of activate : RELU
    # first layer of input dimension
    # we initialize the biases to 0
    # we set kernel initializer

    model.add(
        layers.Conv2D(NUM_FILTERS_2,
                      3,
                      strides=(2, 2),
                      activation='relu',
                      padding='same'))
    # also a convolutional layer
    # 3x3 filters , 2x2 strides
    # we don't specify the input shape
    # The final layer is a dense, 1 dimensional layer. We must therefore first flatten the result of the
    # previous layer
    model.add(
        layers.Flatten()
    )  # we want to reduce to 1 dimension to be able to have a dense layer of 1 dim
    # convert 2-3 layer dimension into a vextor
    model.add(layers.Dense(10))
    return model
Beispiel #20
0
def classifier_model():  #Building of the CNN
    model = models.Sequential()

    model.add(
        layers.Conv2D(1, [2, 10],
                      input_shape=(40, 44, 1),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu',
                      data_format='channels_last'))

    model.add(
        layers.MaxPooling2D(pool_size=(2, 2),
                            strides=None,
                            padding='valid',
                            data_format=None))

    model.add(
        layers.Conv2D(1, [2, 6],
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))

    model.add(
        layers.MaxPooling2D(pool_size=(2, 2),
                            strides=None,
                            padding='valid',
                            data_format=None))

    model.add(
        layers.Conv2D(1, [2, 3],
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))

    model.add(layers.Flatten())

    model.add(layers.Dense(1))

    print(model.summary())
    return model
Beispiel #21
0
def conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(1, 1)):
    if K.image_data_format() == 'channels_first':
        bn_axis = 1
    else:
        bn_axis = 3

    x = layers.Conv2D(filters, (num_row, num_col),
                      strides=strides,
                      padding=padding)(x)  # use_bias=False,
    x = layers.BatchNormalization(axis=bn_axis)(x)  # scale=False,
    x = layers.Activation('relu')(x)
    return x
Beispiel #22
0
    def _create_discriminator(self):
        inputs = layers.Input(shape=(HEIGHT, WIDTH, CHANNELS))

        x = layers.Conv2D(128, kernel_size=3)(inputs)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(128, kernel_size=4, strides=2)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(128, kernel_size=4, strides=2)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(128, kernel_size=4, strides=2)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Flatten()(x)
        x = layers.Dropout(self.args.dropout)(x)
        outputs = layers.Dense(1, activation='sigmoid')(x)

        discriminator = models.Model(inputs, outputs)
        return discriminator
    def model_definition(self):
        self.model = models.Sequential()

        self.model.add(
            layers.Conv2D(64, (5, 5),
                          activation='relu',
                          input_shape=self.input_shape))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        self.model.add(layers.AveragePooling2D())
        self.model.add(layers.Conv2D(128, (1, 1), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(3072, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(128, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(3, activation='softmax'))

        adam = optimizers.Adamax()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=adam,
                           metrics=['acc'])
Beispiel #24
0
def gating_signal(input, out_size, batch_norm=False):
    """
    resize the down layer feature map into the same dimension as the up layer feature map
    using 1x1 conv
    :param input:   down-dim feature map
    :param out_size:output channel number
    :return: the gating feature map with the same dimension of the up layer feature map
    """
    x = layers.Conv2D(out_size, (1, 1), padding='same')(input)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    return x
def keras_efficientnet(blocks_args, global_params, training=False):
    inp = layers.Input((224, 224, 3))
    x = layers.Conv2D(32, 3, padding='same', strides=2, name='stem_conv2d', use_bias=False)(inp)
    x = em.batchnorm(name='stem_tpu_batch_normalization')(x)
    x = layers.Lambda(lambda x: em.relu_fn(x))(x)
    idx = 0
    for block in blocks_args:
        x = el.mbConvBlock(x, block, global_params, idx, training=training)
        # x = MBConvBlock(block, global_params, idx)(x, training=training)
        idx += 1
        if block.num_repeat > 1:
            block = block._replace(
                input_filters=block.output_filters, strides=[1, 1])
        for _ in range(block.num_repeat - 1):
            x = el.mbConvBlock(x, block, global_params, idx, training=training)
            idx += 1
    x = layers.Conv2D(1280, 1, name='head_conv2d', use_bias=False)(x)
    x = em.batchnorm(name='head_tpu_batch_normalization')(x)
    x = layers.Lambda(lambda x: em.relu_fn(x))(x)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(1000, activation='softmax', name='head_dense', )(x)
    model = models.Model(inp, x, name='efficientnet-b0')
    return model
Beispiel #26
0
def create_model(img_shape):
    inputs = layers.Input(shape=img_shape)
    encoder0_pool, encoder0 = encoder_block(inputs, 32)
    encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64)
    encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128)
    encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256)
    encoder4_pool, encoder4 = encoder_block(encoder3_pool, 512)
    center = conv_block(encoder4_pool, 1024)
    decoder4 = decoder_block(center, encoder4, 512)
    decoder3 = decoder_block(decoder4, encoder3, 256)
    decoder2 = decoder_block(decoder3, encoder2, 128)
    decoder1 = decoder_block(decoder2, encoder1, 64)
    decoder0 = decoder_block(decoder1, encoder0, 32)
    outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0) # change to perceptron?
    model = models.Model(inputs=[inputs], outputs=[outputs])
    return model
Beispiel #27
0
    def attention_refinment_module_new(self, input, name, last_arm=False):
        global_pool = tf.reduce_mean(input, [1, 2], keep_dims=True)
        conv_1 = keras_ly.Conv2D(input.get_shape()[3], [1, 1], padding='SAME', name=name+'_conv1')(global_pool)
        with tf.variable_scope(name+'_conv1_bn') as scope:
            conv_1_bn = slim.batch_norm(conv_1, fused=True, scope=scope)
        sigmoid = tf.sigmoid(conv_1_bn, name=name+'_sigmoid')
        mul_out = tf.multiply(input, sigmoid, name=name+'_multiply')

        if last_arm:
            glob_red = tf.reduce_mean(mul_out, [1, 2], keep_dims=True)
            out_scale = tf.multiply(glob_red, mul_out)
            print('last arm shape')
            print(input.shape)
            print(out_scale.shape)
            return out_scale
        else:
            return mul_out
Beispiel #28
0
def conv_layer(layer_name, layer_num):
    w = get_weights(vgg_layers, layer_num)
    b = get_bias(vgg_layers, layer_num)

    filters = w[3]
    kernel_size = [w[0], w[1]]
    has_bias = False
    bias_initial = None
    if b:
        has_bias = True
        bias_initial = tf.keras.initializers.Constant(b)
    conv = layers.Conv2D(filters=filters,
                         kernel_size=kernel_size,
                         strides=1,
                         padding='same',
                         use_bias=has_bias,
                         bias_initializer=bias_initial,
                         activation='relu')
    return conv
Beispiel #29
0
    def shortcut(self, input, residual):
        """Adds a shortcut between input and residual block and merges them with "sum"
        """
        # Expand channels of shortcut to match residual.
        # Stride appropriately to match residual (width, height)
        # Should be int if network architecture is correctly configured.
        input_shape = K.int_shape(input)
        #residual_shape = K.int_shape(residual)


        try:
             residual_shape = np.shape(residual).as_list()
        except:
             residual_shape = np.shape(residual)


        stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
        stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
        equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
        

        #equal_width = input_shape[ROW_AXIS] == residual_shape[ROW_AXIS]
        #equal_heights = input_shape[COL_AXIS] == residual_shape[COL_AXIS]

        shortcut = input
        # 1 X 1 conv if shape is different. Else identity.
        if stride_width > 1 or stride_height > 1 or not equal_channels:
        #if not equal_width or not equal_height or not equal_channels:
            shortcut = layers.Conv2D(filters=residual_shape[CHANNEL_AXIS],
                              kernel_size=(1, 1),
                              strides=(stride_width, stride_height),
                              padding="valid",
                              kernel_initializer="he_normal",
                              kernel_regularizer=regularizers.l2(0.0001))(input)

        return layers.add([shortcut, residual])
Beispiel #30
0
    def create_model(self, img_shape, num_class):

        concat_axis = 3
        inputs = layers.Input(shape = img_shape)

        conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1_1')(inputs)
        conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
        conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
        conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
        conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

        up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
        ch, cw = self.get_crop_shape(conv4, up_conv5)
        crop_conv4 = layers.Cropping2D(cropping=(ch,cw))(conv4)
        up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

        up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
        ch, cw = self.get_crop_shape(conv3, up_conv6)
        crop_conv3 = layers.Cropping2D(cropping=(ch,cw))(conv3)
        up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis) 
        conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
        conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

        up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
        ch, cw = self.get_crop_shape(conv2, up_conv7)
        crop_conv2 = layers.Cropping2D(cropping=(ch,cw))(conv2)
        up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

        up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
        ch, cw = self.get_crop_shape(conv1, up_conv8)
        crop_conv1 = layers.Cropping2D(cropping=(ch,cw))(conv1)
        up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

        model = models.Model(inputs=inputs, outputs=conv9)

        return model