Example #1
0
def identity_block(input_tensor, kernel_size, filters, stage, block):

    filters1, filters2, filters3 = filters

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters1, (1, 1),
               kernel_initializer=RandomNormal(stddev=0.02),
               name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(trainable=False, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2,
               kernel_size,
               padding='same',
               kernel_initializer=RandomNormal(stddev=0.02),
               name=conv_name_base + '2b')(x)
    x = BatchNormalization(trainable=False, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1),
               kernel_initializer=RandomNormal(stddev=0.02),
               name=conv_name_base + '2c')(x)
    x = BatchNormalization(trainable=False, name=bn_name_base + '2c')(x)

    x = layers.add([x, input_tensor])
    x = Activation('relu')(x)
    return x
Example #2
0
def residual(x, out_dim, name, stride=1):
    #-----------------------------------#
    #   残差网络结构
    #   两个形态
    #   1、残差边有卷积,改变维度
    #   2、残差边无卷积,加大深度
    #-----------------------------------#
    shortcut = x
    num_channels = K.int_shape(shortcut)[-1]

    x = ZeroPadding2D(padding=1, name=name + '.pad1')(x)
    x = Conv2D(out_dim, 3, strides=stride, kernel_initializer=RandomNormal(stddev=0.02), use_bias=False, name=name + '.conv1')(x)
    x = BatchNormalization(epsilon=1e-5, name=name + '.bn1')(x)
    x = Activation('relu', name=name + '.relu1')(x)

    x = Conv2D(out_dim, 3, padding='same', kernel_initializer=RandomNormal(stddev=0.02), use_bias=False, name=name + '.conv2')(x)
    x = BatchNormalization(epsilon=1e-5, name=name + '.bn2')(x)

    if num_channels != out_dim or stride != 1:
        shortcut = Conv2D(out_dim, 1, strides=stride, kernel_initializer=RandomNormal(stddev=0.02), use_bias=False, name=name + '.shortcut.0')(
            shortcut)
        shortcut = BatchNormalization(epsilon=1e-5, name=name + '.shortcut.1')(shortcut)

    x = Add(name=name + '.add')([x, shortcut])
    x = Activation('relu', name=name + '.relu')(x)
    return x
Example #3
0
    def call(self, layer_input):

        # first convolutional layer
        x = tf.keras.layers.Conv2D(
            self.num_filters,
            self.filter_shape,
            strides=1,
            padding='same',
            kernel_initializer=RandomNormal(mean=0, stddev=0.02),
            bias_initializer=RandomNormal(mean=0, stddev=0.02))(layer_input)
        x = tf.keras.layers.BatchNormalization()(x)
        x = tf.keras.layers.Activation(tf.keras.activations.relu)(x)

        # second convolutional layer
        x = tf.keras.layers.Conv2D(
            self.num_filters,
            self.filter_shape,
            strides=1,
            padding='same',
            kernel_initializer=RandomNormal(mean=0, stddev=0.02),
            bias_initializer=RandomNormal(mean=0, stddev=0.02))(x)
        x = tf.keras.layers.BatchNormalization()(x)

        # Return elementwise summation of resblock input and output
        return tf.add(x, layer_input)
def generator(img_shape):
    in_img = Input(shape=img_shape)
    e1 = encoder(in_img, 64, batchnorm=False)
    e2 = encoder(e1, 128)
    e3 = encoder(e2, 256)
    e4 = encoder(e3, 512)
    e5 = encoder(e4, 512)
    e6 = encoder(e5, 512)
    e7 = encoder(e6, 512)

    b = Conv2D(512, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=RandomNormal(stddev=0.02))(e7)
    b = LeakyReLU()(b)

    d1 = decoder(b, e7, 512)
    d2 = decoder(d1, e6, 512)
    d3 = decoder(d2, e5, 512)
    d4 = decoder(d3, e4, 512, dropout=False)
    d5 = decoder(d4, e3, 256, dropout=False)
    d6 = decoder(d5, e2, 128, dropout=False)
    d7 = decoder(d6, e1, 64, dropout=False)

    out = Conv2DTranspose(3, (4, 4),
                          strides=(2, 2),
                          padding='same',
                          kernel_initializer=RandomNormal(stddev=0.02))(d7)
    out = Activation('tanh')(out)

    model = Model(in_img, out)
    return model
Example #5
0
def get_generator_unet(n_block=3):
    input = Input(shape=(image_size, image_size, input_channel))
    # encoder
    e0 = Conv2D(64, kernel_size=4, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(input)  # use reflection padding instead
    e0 = BatchNormalization()(e0)
    e0 = Activation('relu')(e0)
    e1 = conv_block(e0, 128, downsample=True, dropout=False)  # 1/2
    e2 = conv_block(e1, 256, downsample=True, dropout=False)  # 1/4
    e3 = conv_block(e2, 512, downsample=True, dropout=False)  # 1/8
    e4 = conv_block(e3, 512, downsample=True, dropout=False)  # 1/16
    e5 = conv_block(e4, 512, downsample=True, dropout=False)  # 1/32
    e6 = conv_block(e5, 512, downsample=True, dropout=False)  # 1/64
    e7 = conv_block(e6, 512, downsample=True, dropout=False)  # 1/128
    # decoder
    d0 = conv_block(e7, 512, downsample=False, dropout=True)  # 1/64
    d1 = Concatenate(axis=-1)([d0, e6])
    d1 = conv_block(d1, 512, downsample=False, dropout=True)  # 1/32
    d2 = Concatenate(axis=-1)([d1, e5])
    d2 = conv_block(d2, 512, downsample=False, dropout=True)  # 1/16
    d3 = Concatenate(axis=-1)([d2, e4])
    d3 = conv_block(d3, 512, downsample=False, dropout=True)  # 1/8
    d4 = Concatenate(axis=-1)([d3, e3])
    d4 = conv_block(d4, 256, downsample=False, dropout=True)  # 1/4
    d5 = Concatenate(axis=-1)([d4, e2])
    d5 = conv_block(d5, 128, downsample=False, dropout=True)  # 1/2
    d6 = Concatenate(axis=-1)([d5, e1])
    d6 = conv_block(d6, 64, downsample=False, dropout=True)  # 1
    # out
    x = Conv2D(output_channel, kernel_size=3, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(d6)  # use reflection padding instead
    x = BatchNormalization()(x)
    x = Activation('tanh')(x)
    generator = Model(inputs=input, outputs=x)
    return generator
Example #6
0
def get_rpn(base_layers, num_anchors):
    #----------------------------------------------------#
    #   利用一个512通道的3x3卷积进行特征整合
    #----------------------------------------------------#
    x = Conv2D(512, (3, 3),
               padding='same',
               activation='relu',
               kernel_initializer=RandomNormal(stddev=0.02),
               name='rpn_conv1')(base_layers)

    #----------------------------------------------------#
    #   利用一个1x1卷积调整通道数,获得预测结果
    #----------------------------------------------------#
    x_class = Conv2D(num_anchors, (1, 1),
                     activation='sigmoid',
                     kernel_initializer=RandomNormal(stddev=0.02),
                     name='rpn_out_class')(x)
    x_regr = Conv2D(num_anchors * 4, (1, 1),
                    activation='linear',
                    kernel_initializer=RandomNormal(stddev=0.02),
                    name='rpn_out_regress')(x)

    x_class = Reshape((-1, 1), name="classification")(x_class)
    x_regr = Reshape((-1, 4), name="regression")(x_regr)
    return [x_class, x_regr]
Example #7
0
def get_generator(n_block=3):
    input = Input(shape=(image_size, image_size, input_channel))
    x = Conv2D(64, kernel_size=7, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(input)  # use reflection padding instead
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    # downsample
    x = Conv2D(128, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    # downsample
    x = Conv2D(256, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    for i in range(n_block):
        x = residual_block(x)
    # upsample
    x = Conv2DTranspose(128, kernel_size=3, strides=2, padding='same',
                        kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    # upsample
    x = Conv2DTranspose(64, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    # out
    x = Conv2D(output_channel, kernel_size=7, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)  # use reflection padding instead
    x = BatchNormalization()(x)
    x = Activation('tanh')(x)
    generator = Model(inputs=input, outputs=x)
    return generator
Example #8
0
    def build_discriminator(self):
        with tf.name_scope('discriminator') as scope:
            model = Sequential(name=scope)
            model.add(
                Conv2D(64, (5, 5),
                       strides=(2, 2),
                       padding='same',
                       kernel_initializer=RandomNormal(mean=0.0, stddev=0.01)))
            model.add(LeakyReLU())
            model.add(Dropout(0.3))

            model.add(
                Conv2D(128, (5, 5),
                       strides=(2, 2),
                       padding='same',
                       kernel_initializer=RandomNormal(mean=0.0, stddev=0.01)))
            model.add(LeakyReLU())
            model.add(Dropout(0.3))

            model.add(Flatten())
            model.add(
                Dense(1,
                      activation='sigmoid',
                      kernel_initializer=RandomNormal(mean=0.0, stddev=0.01)))

            return model
Example #9
0
def Attention_CNN(input_shape=(20, 4, 1), filters=16, conv_1=7):
    X_input = Input(input_shape)

    # stage1
    X = Conv2D(filters=filters,
               kernel_size=(conv_1, 4),
               strides=(1, 1),
               name="conv1",
               padding='valid',
               activation='relu',
               kernel_initializer=RandomNormal(stddev=0.05))(X_input)
    #X = BatchNormalization(axis=3, name='bn_conv1')(X)
    X = Activation('relu')(X)
    y = se_block(X)
    X = add([X, y])
    X = MaxPooling2D(pool_size=(2, 1))(X)
    # 输出层
    X = Flatten()(X)
    X = Dense(128,
              activation='relu',
              kernel_initializer=RandomNormal(stddev=0.05))(X)
    X = Dropout(0.4)(X)
    X = Dense(1,
              name='fc',
              activation='linear',
              kernel_initializer=RandomNormal(stddev=0.05))(X)

    model = Model(inputs=X_input, outputs=X, name='simplenet')

    return model
def BuildInitialModel(input_dim):
    """
  Builds a model with a single input layer and a binary sigmoid output layer.

  # Arguments :
    input_dim - the dimension of the 1D input vector

  """
    l1init = RandomNormal(
        mean=0.0, stddev=0.1,
        seed=None)  # original model parameters for initializing input layer
    hiddeninit = RandomNormal(
        mean=0.0, stddev=0.05,
        seed=None)  # original model parameters for initializing hidden layers
    outputinit = RandomNormal(
        mean=0.0, stddev=0.001,
        seed=None)  # original model parameters for initializing output layer

    model = Sequential()
    model.add(
        Dense(300,
              input_dim=input_dim,
              activation='relu',
              kernel_initializer=l1init))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid', kernel_initializer=outputinit))
    return model
Example #11
0
def _get_convolutional_network(kernel_regularizer_conv,
                               kernel_regularizer_dense):
    convolutional_net = Sequential()
    _add_convolutional_layer(convolutional_net, 64, (10, 10),
                             kernel_regularizer_conv, True)
    # convolutional_net.add(SpatialDropout2D(0.2))
    # convolutional_net.add(MaxPool2D(pool_size=(3, 3), strides=(3, 3)))

    _add_convolutional_layer(convolutional_net, 128, (7, 7),
                             kernel_regularizer_conv, True)
    # convolutional_net.add(SpatialDropout2D(0.5))
    # convolutional_net.add(MaxPool2D(pool_size=(3, 3), strides=(3, 3)))

    _add_convolutional_layer(convolutional_net, 128, (4, 4),
                             kernel_regularizer_conv, True)
    # convolutional_net.add(SpatialDropout2D(0.5))
    # convolutional_net.add(MaxPool2D())

    _add_convolutional_layer(convolutional_net, 256, (4, 4),
                             kernel_regularizer_conv, False)

    convolutional_net.add(Flatten())
    convolutional_net.add(
        Dense(units=4096,
              activation='sigmoid',
              kernel_initializer=RandomNormal(mean=0, stddev=0.01),
              bias_initializer=RandomNormal(mean=0.5, stddev=0.01),
              kernel_regularizer=l2(kernel_regularizer_dense)))
    # convolutional_net.add(BatchNormalization())

    return convolutional_net
Example #12
0
def conv_2d(*args, **kwargs):
    return Conv2D(
        *args, **kwargs,
        kernel_initializer=RandomNormal(0.0, 0.01),
        bias_initializer=RandomNormal(0.5, 0.01),
        kernel_regularizer=l2()
    )
def construct_network2():
    model = tf.keras.models.Sequential([
        InputLayer(input_shape=(28, 28, 1)),
        Conv2D(20,
               3,
               kernel_initializer=RandomNormal(0, 0.01),
               padding="same",
               activation="relu"),
        BatchNormalization(),
        MaxPool2D(pool_size=2, strides=2, padding="same"),
        Conv2D(30,
               3,
               kernel_initializer=RandomNormal(0, 0.01),
               padding="same",
               activation="relu"),
        BatchNormalization(),
        MaxPool2D(pool_size=2, strides=2, padding="same"),
        Conv2D(50,
               3,
               kernel_initializer=RandomNormal(0, 0.01),
               padding="same",
               activation="relu"),
        BatchNormalization(),
        MaxPool2D(pool_size=2, strides=2, padding="same"),
        Flatten(),
        Dense(10, kernel_initializer=RandomNormal(0, 0.01)),
    ])
    model.compile(
        optimizer=SGD(learning_rate=0.01, momentum=0.9),
        loss=SparseCategoricalCrossentropy(from_logits=True),
        metrics=["accuracy"],
    )
    model.summary()
    return model
Example #14
0
    def __init__(self, filters, kernel_size):
        super(ConvBlock, self).__init__()
        self.filters = filters
        self.kernel_size = kernel_size

        self.stddev = _calc_initializer_stddev(filters, kernel_size)
        self.conv1 = layers.Conv2D(
            filters=self.filters,
            kernel_size=self.kernel_size,
            strides=1,
            padding='same',
            kernel_initializer=RandomNormal(stddev=self.stddev))
        self.bn1 = layers.BatchNormalization()
        self.relu1 = layers.ReLU()

        self.conv2 = layers.Conv2D(
            filters=self.filters,
            kernel_size=self.kernel_size,
            strides=1,
            padding='same',
            kernel_initializer=RandomNormal(stddev=self.stddev))
        self.bn2 = layers.BatchNormalization()
        self.relu2 = layers.ReLU()

        self.dropout = layers.Dropout(0.2)
Example #15
0
 def __design_generator(self):
     inputs = Input(shape=self.latent_features)
     x = Dense(units=self.latent_features * 32,
               kernel_initializer=RandomNormal(stddev=0.02))(inputs)
     x = LeakyReLU(alpha=0.2)(x)
     x = Reshape(target_shape=(self.latent_features, 32))(x)
     x = Conv1DTranspose(filters=32,
                         kernel_size=4,
                         strides=2,
                         padding='same',
                         kernel_initializer=RandomNormal(stddev=0.02))(x)
     x = LeakyReLU(alpha=0.2)(x)
     x = Conv1DTranspose(filters=32,
                         kernel_size=4,
                         strides=2,
                         padding='same',
                         kernel_initializer=RandomNormal(stddev=0.02))(x)
     x = LeakyReLU(alpha=0.2)(x)
     x = Conv1DTranspose(filters=32,
                         kernel_size=4,
                         strides=2,
                         padding='same',
                         kernel_initializer=RandomNormal(stddev=0.02))(x)
     x = LeakyReLU(alpha=0.2)(x)
     x = Flatten()(x)
     outputs = Dense(units=self.features,
                     activation='tanh',
                     kernel_initializer=RandomNormal(stddev=0.02))(x)
     model = Model(inputs=inputs, outputs=outputs)
     return model
Example #16
0
def KochNet(input_shape=(105, 105, 3)):
    """
    The conv net used as backbone in
    [Siamese Neural Networks for One-shot Image Recognition](https://www.cs.cmu.edu/~rsalakhu/papers/oneshot1.pdf)
    """

    model = Sequential(name="koch_net")
    model.add(Input(input_shape))
    model.add(conv_2d(64, (10, 10)))
    model.add(MaxPooling2D())
    model.add(conv_2d(128, (7, 7), activation="relu"))
    model.add(MaxPooling2D())
    model.add(conv_2d(128, (4, 4), activation="relu"))
    model.add(MaxPooling2D())
    model.add(conv_2d(256, (4, 4), activation="relu"))
    model.add(Flatten())
    model.add(
        Dense(
            4096,
            activation="sigmoid",
            kernel_initializer=RandomNormal(0.0, 0.2),
            bias_initializer=RandomNormal(0.5, 0.01),
        ))

    return model
Example #17
0
def create_generator():
    size = 4
    model = Sequential()
    model.add(
        Dense(units=size * size * 256,
              kernel_initializer=RandomNormal(0, 0.02),
              input_dim=noise_img_dim))
    model.add(LeakyReLU(0.2))
    model.add(Reshape(target_shape=(size, size, 256)))
    model.add(
        Conv2DTranspose(128, (4, 4),
                        strides=2,
                        padding="same",
                        kernel_initializer=RandomNormal(0, 0.02)))
    model.add(LeakyReLU(0.2))
    model.add(
        Conv2DTranspose(128, (4, 4),
                        strides=2,
                        padding="same",
                        kernel_initializer=RandomNormal(0, 0.02)))
    model.add(LeakyReLU(0.2))
    model.add(
        Conv2DTranspose(128, (4, 4),
                        strides=2,
                        padding="same",
                        kernel_initializer=RandomNormal(0, 0.02)))
    model.add(LeakyReLU(0.2))
    model.add(
        Conv2D(channels, (3, 3),
               padding="same",
               kernel_initializer=RandomNormal(0, 0.02)))
    model.add(Activation("tanh"))
    model.compile(optimizer=optimizer, loss="binary_crossentropy")
    return model
Example #18
0
def _depthwise_conv_block(inputs,
                          pointwise_conv_filters,
                          alpha,
                          depth_multiplier=1,
                          strides=(1, 1),
                          block_id=1):

    pointwise_conv_filters = int(pointwise_conv_filters * alpha)

    # 深度可分离卷积
    x = DepthwiseConv2D((3, 3),
                        padding='same',
                        depth_multiplier=depth_multiplier,
                        depthwise_initializer=RandomNormal(stddev=0.02),
                        strides=strides,
                        use_bias=False,
                        name='conv_dw_%d' % block_id)(inputs)
    x = BatchNormalization(name='conv_dw_%d_bn' % block_id)(x)
    x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)

    # 1x1卷积
    x = Conv2D(pointwise_conv_filters, (1, 1),
               kernel_initializer=RandomNormal(stddev=0.02),
               padding='same',
               use_bias=False,
               strides=(1, 1),
               name='conv_pw_%d' % block_id)(x)
    x = BatchNormalization(name='conv_pw_%d_bn' % block_id)(x)
    return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
Example #19
0
def create_embedding_dict(sparse_feature_columns,
                          varlen_sparse_feature_columns,
                          init_std,
                          seed,
                          l2_reg,
                          prefix='sparse_',
                          seq_mask_zero=True):
    sparse_embedding = {
        feat.embedding_name:
        Embedding(feat.vocabulary_size,
                  feat.embedding_dim,
                  embeddings_initializer=RandomNormal(mean=0.0,
                                                      stddev=init_std,
                                                      seed=seed),
                  embeddings_regularizer=l2(l2_reg),
                  name=prefix + '_emb_' + feat.embedding_name)
        for feat in sparse_feature_columns
    }

    if varlen_sparse_feature_columns and len(
            varlen_sparse_feature_columns) > 0:
        for feat in varlen_sparse_feature_columns:
            # if feat.name not in sparse_embedding:
            sparse_embedding[feat.embedding_name] = Embedding(
                feat.vocabulary_size,
                feat.embedding_dim,
                embeddings_initializer=RandomNormal(mean=0.0,
                                                    stddev=init_std,
                                                    seed=seed),
                embeddings_regularizer=l2(l2_reg),
                name=prefix + '_seq_emb_' + feat.name,
                mask_zero=seq_mask_zero)
    return sparse_embedding
Example #20
0
def create_model():
    model = Sequential()

    model.add(Conv2D(2, 3, activation = None,use_bias = False, \
                input_shape = (28, 28, 1), padding = "valid", \
                data_format='channels_last', dilation_rate = (1, 1), strides = (1, 1), \
                kernel_initializer = RandomNormal(mean = 0.0, stddev = 0.05, seed = int(time()) ) \
            ))

    model.add(ReLU(max_value=None, negative_slope=0, threshold=0))

    model.add(Conv2D(4, 3, activation = None,use_bias = False, \
                padding = "valid", \
                data_format='channels_last', dilation_rate = (1, 1), strides = (1, 1), \
                kernel_initializer = RandomNormal(mean = 0.0, stddev = 0.05, seed = int(time()) ) \
            ))

    model.add(ReLU(max_value=None, negative_slope=0, threshold=0))

    model.add(
        MaxPooling2D((2, 2),
                     strides=(2, 2),
                     padding="valid",
                     data_format="channels_last"))

    model.add(Conv2D(8, 3, activation = None,use_bias = False, \
                input_shape = (28, 28, 1), padding = "valid", \
                data_format='channels_last', dilation_rate = (1, 1), strides = (1, 1), \
                kernel_initializer = RandomNormal(mean = 0.0, stddev = 0.05, seed = int(time()) ) \
            ))

    model.add(ReLU(max_value=None, negative_slope=0, threshold=0))

    model.add(Conv2D(16, 3, activation = None,use_bias = False, \
                padding = "valid", \
                data_format='channels_last', dilation_rate = (1, 1), strides = (1, 1), \
                kernel_initializer = RandomNormal(mean = 0.0, stddev = 0.05, seed = int(time()) ) \
            ))

    model.add(ReLU(max_value=None, negative_slope=0, threshold=0))

    model.add(
        MaxPooling2D((2, 2),
                     strides=(2, 2),
                     padding="valid",
                     data_format="channels_last"))

    model.add(Flatten())

    model.add(Dense(10, activation = None, use_bias = True, \
                   kernel_initializer = RandomNormal(mean = 0.0, stddev = 0.05, seed = int(time()) ) \
                   ))

    model.add(Softmax(axis=1))

    optimizer = Adam(lr=0.0001)
    model.compile(loss="categorical_crossentropy", \
                    optimizer=optimizer, metrics=["accuracy"])
    return model
Example #21
0
def generator_model():
    """
    Create and return a discriminator model
    :return: discriminator model
    """

    Generator = Sequential(name='Generator')

    # Fully Connected layer --> 512 activation maps of 2x2
    Generator.add(
        Dense(units=512 * 2 * 2,
              input_shape=GENERATOR_INPUT,
              kernel_initializer=RandomNormal(stddev=GAUSS_SD)))
    Generator.add(Reshape((2, 2, 512)))
    Generator.add(BatchNormalization(momentum=MOMENTUM))
    Generator.add(LeakyReLU(ALPHA))

    # Upsampling : 2x2x512 --> 4x4x256
    Generator.add(
        Conv2DTranspose(filters=256,
                        kernel_size=(5, 5),
                        strides=2,
                        padding='same',
                        kernel_initializer=RandomNormal(stddev=GAUSS_SD)))
    Generator.add(BatchNormalization(momentum=MOMENTUM))
    Generator.add(LeakyReLU(ALPHA))

    # Upsampling : 4x4x256 --> 8x8x128
    Generator.add(
        Conv2DTranspose(filters=256,
                        kernel_size=(5, 5),
                        strides=2,
                        padding='same',
                        kernel_initializer=RandomNormal(stddev=GAUSS_SD)))
    Generator.add(BatchNormalization(momentum=MOMENTUM))
    Generator.add(LeakyReLU(ALPHA))

    # Upsampling : 8x8x128 --> 16x16x64
    Generator.add(
        Conv2DTranspose(filters=128,
                        kernel_size=(5, 5),
                        strides=2,
                        padding='same',
                        kernel_initializer=RandomNormal(stddev=GAUSS_SD)))
    Generator.add(BatchNormalization(momentum=MOMENTUM))
    Generator.add(LeakyReLU(ALPHA))

    # Upsampling : 16x16x63 --> 32x32x3
    Generator.add(
        Conv2DTranspose(filters=3,
                        kernel_size=(5, 5),
                        strides=2,
                        padding='same',
                        kernel_initializer=RandomNormal(stddev=GAUSS_SD),
                        activation='tanh'))

    return Generator
Example #22
0
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    in_channels = backend.int_shape(inputs)[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)

    x = inputs
    prefix = 'block_{}_'.format(block_id)
    # part1 数据扩张
    if block_id:
        # Expand
        x = Conv2D(expansion * in_channels,
                   kernel_initializer=RandomNormal(stddev=0.02),
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'expand_BN')(x)
        x = Activation(relu6, name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    if stride == 2:
        x = ZeroPadding2D(padding=correct_pad(x, 3), name=prefix + 'pad')(x)

    # part2 可分离卷积
    x = DepthwiseConv2D(kernel_size=3,
                        depthwise_initializer=RandomNormal(stddev=0.02),
                        strides=stride,
                        activation=None,
                        use_bias=False,
                        padding='same' if stride == 1 else 'valid',
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'depthwise_BN')(x)

    x = Activation(relu6, name=prefix + 'depthwise_relu')(x)

    # part3压缩特征,而且不使用relu函数,保证特征不被破坏
    x = Conv2D(pointwise_filters,
               kernel_initializer=RandomNormal(stddev=0.02),
               kernel_size=1,
               padding='same',
               use_bias=False,
               activation=None,
               name=prefix + 'project')(x)

    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'project_BN')(x)

    if in_channels == pointwise_filters and stride == 1:
        return Add(name=prefix + 'add')([inputs, x])
    return x
Example #23
0
    def build(self, input_shape):

        self.routing_logits = self.add_weight(shape=[1, self.k_max, self.max_len],
                                              initializer=RandomNormal(stddev=self.init_std),
                                              trainable=False, name="B", dtype=tf.float32)
        self.bilinear_mapping_matrix = self.add_weight(shape=[self.input_units, self.out_units],
                                                       initializer=RandomNormal(stddev=self.init_std),
                                                       name="S", dtype=tf.float32)
        super(CapsuleLayer, self).build(input_shape)
    def __init__(self,
                 num_blocks,
                 num_classes,
                 num_depth=4,
                 num_features=256,
                 use_separable_conv=False,
                 expand_ratio=4.,
                 use_squeeze_excite=False,
                 squeeze_ratio=16.,
                 groups=16,
                 **kwargs):
        self.num_blocks = num_blocks
        self.num_classes = num_classes
        self.num_depth = num_depth
        self.num_features = num_features
        self.use_separable_conv = use_separable_conv
        self.expand_ratio = expand_ratio
        self.use_squeeze_excite = use_squeeze_excite
        self.squeeze_ratio = squeeze_ratio
        self.groups = groups
        super().__init__(**kwargs)
        self.blocks = []
        for idx in range(self.num_blocks):
            block = []
            for i in range(self.num_depth):
                if self.use_squeeze_excite:
                    layer = SqueezeExcite(self.squeeze_ratio)
                    block.append(layer)

                if self.use_separable_conv:
                    layer = MobileSeparableConv2D(num_features, (3, 3),
                                                  expand_ratio=expand_ratio)
                else:
                    layer = Conv2D(
                        num_features, (3, 3),
                        activation='relu',
                        padding='same',
                        kernel_initializer=RandomNormal(stddev=0.01))
                block.append(layer)

                layer = GroupNormalization(self.groups)
                block.append(layer)

            layer = Conv2DTranspose(
                num_features, (2, 2), (2, 2),
                padding='same',
                activation='relu',
                kernel_initializer=RandomNormal(stddev=0.01))
            block.append(layer)
            layer = Conv2D(num_classes, (1, 1),
                           padding='same',
                           activation='sigmoid',
                           kernel_initializer=RandomNormal(stddev=0.01))
            block.append(layer)
            self.blocks.append(block)
Example #25
0
    def __init__(self,
                 C,
                 layers,
                 steps=4,
                 multiplier=4,
                 stem_multiplier=3,
                 drop_path=0,
                 num_classes=10):
        super().__init__()
        self._C = C
        self._steps = steps
        self._multiplier = multiplier
        self._drop_path = drop_path

        C_curr = stem_multiplier * C
        self.stem = Sequential([
            Conv2d(3, C_curr, 3, bias=False),
            Norm(C_curr, 'def', affine=True),
        ])

        C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
        self.cells = []
        reduction_prev = False
        for i in range(layers):
            if i in [layers // 3, 2 * layers // 3]:
                C_curr *= 2
                reduction = True
            else:
                reduction = False
            cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr,
                        reduction, reduction_prev, drop_path)
            reduction_prev = reduction
            self.cells.append(cell)
            C_prev_prev, C_prev = C_prev, multiplier * C_curr

        self.avg_pool = GlobalAvgPool()
        self.classifier = Linear(C_prev, num_classes)

        k = sum(2 + i for i in range(self._steps))
        num_ops = len(get_primitives())
        self.alphas_normal = self.add_weight(
            'alphas_normal', (k, num_ops),
            initializer=RandomNormal(stddev=1e-2),
            trainable=True,
            experimental_autocast=False)
        self.alphas_reduce = self.add_weight(
            'alphas_reduce', (k, num_ops),
            initializer=RandomNormal(stddev=1e-2),
            trainable=True,
            experimental_autocast=False)

        self.tau = self.add_weight('tau', (),
                                   initializer=Constant(1.0),
                                   trainable=False,
                                   experimental_autocast=False)
Example #26
0
def create_embedding_dict(
    feature_dim_dict,
    embedding_size,
    init_std,
    seed,
    l2_rev_V,
    l2_reg_w,
):
    sparse_embedding = {
        j.name: {
            feat.name:
            Embedding(j.dimension,
                      embedding_size,
                      embeddings_initializer=RandomNormal(mean=0.0,
                                                          stddev=0.0001,
                                                          seed=seed),
                      embeddings_regularizer=l2(l2_rev_V),
                      name='sparse_emb_' + str(j.name) + '_' + str(i) + '-' +
                      feat.name)
            for i, feat in enumerate(feature_dim_dict["sparse"] +
                                     feature_dim_dict['dense'])
        }
        for j in feature_dim_dict["sparse"]
    }

    dense_embedding = {
        j.name: {
            feat.name: Dense(embedding_size,
                             kernel_initializer=RandomNormal(mean=0.0,
                                                             stddev=0.0001,
                                                             seed=seed),
                             use_bias=False,
                             kernel_regularizer=l2(l2_rev_V),
                             name='sparse_emb_' + str(j.name) + '_' + str(i) +
                             '-' + feat.name)
            for i, feat in enumerate(feature_dim_dict["sparse"] +
                                     feature_dim_dict["dense"])
        }
        for j in feature_dim_dict["dense"]
    }

    linear_embedding = {
        feat.name:
        Embedding(feat.dimension,
                  1,
                  embeddings_initializer=RandomNormal(mean=0.0,
                                                      stddev=init_std,
                                                      seed=seed),
                  embeddings_regularizer=l2(l2_reg_w),
                  name='linear_emb_' + str(i) + '-' + feat.name)
        for i, feat in enumerate(feature_dim_dict["sparse"])
    }

    return sparse_embedding, dense_embedding, linear_embedding
def build_generator(input_shape=(256, 256, 3), num_blocks=9):
    """Generator network architecture"""
    x0 = layers.Input(input_shape)

    x = ReflectionPadding2D(padding=(3, 3))(x0)
    x = layers.Conv2D(filters=64, kernel_size=7, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)

    x = InstanceNormalization()(x)
    x = layers.ReLU()(x)

    # downsample
    x = layers.Conv2D(filters=128,
                      kernel_size=3,
                      strides=2,
                      padding='same',
                      kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
    x = InstanceNormalization()(x)
    x = layers.ReLU()(x)

    x = layers.Conv2D(filters=256,
                      kernel_size=3,
                      strides=2,
                      padding='same',
                      kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
    x = InstanceNormalization()(x)
    x = layers.ReLU()(x)

    # residual
    for _ in range(num_blocks):
        x = _resblock(x)

    # upsample
    x = layers.Conv2DTranspose(filters=128,
                               kernel_size=3,
                               strides=2,
                               padding='same',
                               kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
    x = InstanceNormalization()(x)
    x = layers.ReLU()(x)

    x = layers.Conv2DTranspose(filters=64,
                               kernel_size=3,
                               strides=2,
                               padding='same',
                               kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
    x = InstanceNormalization()(x)
    x = layers.ReLU()(x)

    # final
    x = ReflectionPadding2D(padding=(3, 3))(x)
    x = layers.Conv2D(filters=3, kernel_size=7, activation='tanh', kernel_initializer=RandomNormal(mean=0,
                                                                                                   stddev=0.02))(x)

    return Model(inputs=x0, outputs=x)
Example #28
0
def residual_block(feature, dropout=False):
    x = Conv2D(256, kernel_size=3, strides=1, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(feature)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    if dropout:
        x = Dropout(0.5)(x)
    x = Conv2D(256, kernel_size=3, strides=1, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    return Add()([feature, x])
Example #29
0
def wcrn(band, ncla1):
    input1 = Input(shape=(5, 5, band))

    # define network
    conv0x = Conv2D(64,
                    kernel_size=(1, 1),
                    padding='valid',
                    kernel_initializer=RandomNormal(mean=0.0, stddev=0.01))
    conv0 = Conv2D(64,
                   kernel_size=(3, 3),
                   padding='valid',
                   kernel_initializer=RandomNormal(mean=0.0, stddev=0.01))
    bn11 = BatchNormalization(axis=-1,
                              momentum=0.9,
                              epsilon=0.001,
                              center=True,
                              scale=True,
                              beta_initializer='zeros',
                              gamma_initializer='ones',
                              moving_mean_initializer='zeros',
                              moving_variance_initializer='ones')
    conv11 = Conv2D(128,
                    kernel_size=(1, 1),
                    padding='same',
                    kernel_initializer=RandomNormal(mean=0.0, stddev=0.01))
    conv12 = Conv2D(128,
                    kernel_size=(1, 1),
                    padding='same',
                    kernel_initializer=RandomNormal(mean=0.0, stddev=0.01))
    #
    fc1 = Dense(ncla1,
                activation='softmax',
                name='output1',
                kernel_initializer=RandomNormal(mean=0.0, stddev=0.01))

    # x1
    x1 = conv0(input1)
    x1x = conv0x(input1)
    x1 = MaxPooling2D(pool_size=(3, 3))(x1)
    x1x = MaxPooling2D(pool_size=(5, 5))(x1x)
    x1 = concatenate([x1, x1x], axis=-1)
    x11 = bn11(x1)
    x11 = Activation('relu')(x11)
    x11 = conv11(x11)
    x11 = Activation('relu')(x11)
    x11 = conv12(x11)
    x1 = Add()([x1, x11])

    x1 = Flatten()(x1)
    pre1 = fc1(x1)

    model1 = Model(inputs=input1, outputs=pre1)
    return model1
Example #30
0
def centernet_head(x, num_classes):
    x = Dropout(rate=0.5)(x)
    #-------------------------------#
    #   解码器
    #-------------------------------#
    num_filters = 256
    # 16, 16, 2048  ->  32, 32, 256 -> 64, 64, 128 -> 128, 128, 64
    for i in range(3):
        # 进行上采样
        x = Conv2DTranspose(num_filters // pow(2, i), (4, 4),
                            strides=2,
                            use_bias=False,
                            padding='same',
                            kernel_initializer='he_normal',
                            kernel_regularizer=l2(5e-4))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
    # 最终获得128,128,64的特征层
    # hm header
    y1 = Conv2D(64,
                3,
                padding='same',
                use_bias=False,
                kernel_initializer=RandomNormal(stddev=0.02))(x)
    y1 = BatchNormalization()(y1)
    y1 = Activation('relu')(y1)
    y1 = Conv2D(num_classes,
                1,
                kernel_initializer=Constant(0),
                bias_initializer=Constant(-2.19),
                activation='sigmoid')(y1)

    # wh header
    y2 = Conv2D(64,
                3,
                padding='same',
                use_bias=False,
                kernel_initializer=RandomNormal(stddev=0.02))(x)
    y2 = BatchNormalization()(y2)
    y2 = Activation('relu')(y2)
    y2 = Conv2D(2, 1, kernel_initializer=RandomNormal(stddev=0.02))(y2)

    # reg header
    y3 = Conv2D(64,
                3,
                padding='same',
                use_bias=False,
                kernel_initializer=RandomNormal(stddev=0.02))(x)
    y3 = BatchNormalization()(y3)
    y3 = Activation('relu')(y3)
    y3 = Conv2D(2, 1, kernel_initializer=RandomNormal(stddev=0.02))(y3)
    return y1, y2, y3