Esempio n. 1
0
def mobilenetv2_yolo_body(inputs, num_anchors, num_classes, alpha=1.0):
    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x 1024
    # conv_pw_11_relu :26 x 26 x 512
    # conv_pw_5_relu : 52 x 52 x 256
    mobilenetv2 = MobileNetV2(input_tensor=inputs, include_top=False, weights='imagenet')
    x, y1 = make_last_layers_mobilenet(mobilenetv2.output, 17, 512, num_anchors * (num_classes + 5))
    x = Conv2D(256, kernel_size=1, padding='same', use_bias=False, name='block_20_conv')(x)
    x = BatchNormalization(momentum=0.9, name='block_20_BN')(x)
    x = ReLU(6., name='block_20_relu6')(x)
    x = UpSampling2D(2)(x)
    x = Concatenate()([x, MobilenetConv2D(mobilenetv2.get_layer('block_12_project_BN').output, (1, 1), alpha, 384)])

    x, y2 = make_last_layers_mobilenet(x, 21, 256, num_anchors * (num_classes + 5))
    x = Conv2D(128, kernel_size=1, padding='same', use_bias=False, name='block_24_conv')(x)
    x = BatchNormalization(momentum=0.9, name='block_24_BN')(x)
    x = ReLU(6., name='block_24_relu6')(x)
    x = UpSampling2D(2)(x)
    x = Concatenate()([x, MobilenetConv2D(mobilenetv2.get_layer('block_5_project_BN').output, (1, 1), alpha, 128)])
    x, y3 = make_last_layers_mobilenet(x, 25, 128, num_anchors * (num_classes + 5))

    # y1 = Lambda(lambda y: tf.reshape(y, [-1, tf.shape(y)[1],tf.shape(y)[2], num_anchors, num_classes + 5]),name='y1')(y1)
    # y2 = Lambda(lambda y: tf.reshape(y, [-1, tf.shape(y)[1],tf.shape(y)[2], num_anchors, num_classes + 5]),name='y2')(y2)
    # y3 = Lambda(lambda y: tf.reshape(y, [-1, tf.shape(y)[1],tf.shape(y)[2], num_anchors, num_classes + 5]),name='y3')(y3)
    return Model(inputs, [y1, y2, y3])
Esempio n. 2
0
    def __init__(self):
        super(TopModel, self).__init__()
        input_filters = 2048  # output of resnet
        self.output_filters = 256
        self.nbJoints = 17
        self.depth_dim = 64

        self.deconv1 = Conv2DTranspose(filters=self.output_filters,
                                       kernel_size=4,
                                       strides=(2, 2),
                                       padding="valid")
        self.relu1 = ReLU()
        self.batchnorm1 = BatchNormalization()

        self.deconv2 = Conv2DTranspose(filters=self.output_filters,
                                       kernel_size=4,
                                       strides=(2, 2),
                                       padding="valid")
        self.relu2 = ReLU()
        self.batchnorm2 = BatchNormalization()

        self.deconv3 = Conv2DTranspose(filters=self.output_filters,
                                       kernel_size=4,
                                       strides=(2, 2),
                                       padding="valid")
        self.relu3 = ReLU()
        self.batchnorm3 = BatchNormalization()

        self.final_conv = Conv2D(filters=self.nbJoints * self.depth_dim,
                                 kernel_size=1,
                                 strides=1,
                                 padding="valid")
Esempio n. 3
0
def inverted_res_block(input_tensor, expansion, stride, alpha, filters):
    in_channels = input_tensor.shape.as_list()[-1]
    filters = r(filters * alpha)
    output_tensor = input_tensor

    output_tensor = Conv2D(expansion * in_channels,
                           kernel_size=(1, 1),
                           use_bias=False)(output_tensor)
    output_tensor = BatchNormalization(
        epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor)
    output_tensor = ReLU(relu_threshold)(output_tensor)

    output_tensor = ZeroPadding2D()(output_tensor)
    output_tensor = DepthwiseConv2D(kernel_size=(3, 3),
                                    strides=stride,
                                    use_bias=False)(output_tensor)
    output_tensor = BatchNormalization(
        epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor)
    output_tensor = ReLU(relu_threshold)(output_tensor)

    output_tensor = Conv2D(filters, kernel_size=(1, 1),
                           use_bias=False)(output_tensor)
    output_tensor = BatchNormalization(
        epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor)

    if in_channels == filters and stride == 1:
        output_tensor = Add()([input_tensor, output_tensor])
    return output_tensor
 def define_classification_model(self, x_train, number_of_pigs):
     """Softmax regressor to classify images based on encoding"""
     kernel_init = keras.initializers.he_normal()
     classifier_model = Sequential()
     classifier_model.add(
         Dense(units=32,
               kernel_regularizer=keras.regularizers.l1_l2(l1=1e-5,
                                                           l2=1e-4),
               input_dim=x_train.shape[1],
               kernel_initializer=kernel_init))
     classifier_model.add(BatchNormalization())
     classifier_model.add(ReLU())
     classifier_model.add(Dropout(0.2))
     classifier_model.add(
         Dense(units=32,
               kernel_regularizer=keras.regularizers.l1_l2(l1=1e-5,
                                                           l2=1e-4),
               kernel_initializer=kernel_init))
     classifier_model.add(ReLU())
     classifier_model.add(Dropout(0.2))
     classifier_model.add(
         Dense(units=number_of_pigs, kernel_initializer=kernel_init))
     classifier_model.add(Activation('softmax'))
     optimizer = keras.optimizers.Adam()
     metrics = [
         'accuracy', 'mse', 'categorical_accuracy',
         'top_k_categorical_accuracy'
     ]
     loss = tf.keras.losses.SparseCategoricalCrossentropy()
     classifier_model.compile(loss=loss,
                              optimizer=optimizer,
                              metrics=metrics)
     return classifier_model
Esempio n. 5
0
def xception_block(input_layer,
                   filter,
                   last_stride,
                   last_rate,
                   name,
                   residual_type='conv',
                   return_skip=False):
    if type(filter) is int:
        filters = [filter, filter, filter]
    else:
        filters = filter

    x = input_layer

    x = SeparableConv2D(filters[0],
                        kernel_size=3,
                        padding='same',
                        use_bias=False,
                        name=name + '_sepconv_1')(x)
    x = BatchNormalization(name=name + '_sepconv_1_bn')(x)
    x = ReLU()(x)

    x = SeparableConv2D(filters[1],
                        kernel_size=3,
                        padding='same',
                        use_bias=False,
                        name=name + '_sepconv_2')(x)
    x = BatchNormalization(name=name + '_sepconv_2_bn')(x)
    x = ReLU()(x)
    skip = x

    x = SeparableConv2D(filters[2],
                        kernel_size=3,
                        strides=last_stride,
                        dilation_rate=last_rate,
                        padding='same',
                        use_bias=False,
                        name=name + '_atrous_sepconv')(x)
    x = BatchNormalization(name=name + '_atrous_sepconv_bn')(x)
    x = ReLU()(x)

    if residual_type == 'conv':
        res = Conv2D(filters=filters[2],
                     kernel_size=1,
                     strides=last_stride,
                     padding='same',
                     use_bias=False,
                     name=name + "_residual")(input_layer)
        res = BatchNormalization(name=name + "_residual_bn")(res)
        # res = ReLU()(res)

        x = add([x, res])

    elif residual_type == 'add':
        x = add([x, input_layer])

    if return_skip:
        return x, skip

    return x
Esempio n. 6
0
 def Generator(self):
     gmodel = tf.keras.models.Sequential(name="Generator")
     gmodel.add(
         Dense(8 * 8 * 2 * self.Ndeconvfilters[0],
               input_shape=(self.noise_vect, ),
               kernel_initializer=tf.keras.initializers.RandomNormal(
                   mean=0.0, stddev=0.02)))
     gmodel.add(BatchNormalization(epsilon=1e-5, momentum=0.9))
     gmodel.add(ReLU())
     gmodel.add(Reshape((8, 8, 2 * self.Ndeconvfilters[0])))
     for lyrIdx in range(4):
         gmodel.add(
             Conv2DTranspose(
                 self.Ndeconvfilters[lyrIdx],
                 5,
                 strides=2,
                 padding='same',
                 kernel_initializer=tf.keras.initializers.RandomNormal(
                     mean=0.0, stddev=0.02)))
         if lyrIdx == 3:
             gmodel.add(
                 Activation('tanh'))  # last layer has tanh activation
         else:
             gmodel.add(BatchNormalization(epsilon=1e-5, momentum=0.9))
             gmodel.add(ReLU())
     gmodel.summary()
     return gmodel
Esempio n. 7
0
    def __init__(self, raanan_architecture=False, sigmoid_activation=True):
        super(Decoder, self).__init__()

        self.input_layer = InputLayer()
        self.fully_connected3 = Dense(512)
        self.fully_connected4 = Dense(7 * 7 * 64)
        self.reshape = Reshape((7, 7, 64))
        self.conv_transpose1 = Conv2DTranspose(32,
                                               3,
                                               padding="same",
                                               strides=2)
        self.conv_transpose2 = Conv2DTranspose(1, 3, padding="same", strides=2)

        self.relu1 = ReLU()
        self.relu2 = ReLU()
        self.relu3 = ReLU()

        self.last_activation = sigmoid if sigmoid_activation else tanh
        if raanan_architecture:
            self.relu1 = LeakyReLU()
            self.relu2 = LeakyReLU()
            self.relu3 = LeakyReLU()

        print("Decoder network created with raanan architecture={}".format(
            raanan_architecture))
Esempio n. 8
0
def MobilenetSeparableConv2D(input, filters,
                             kernel_size,
                             strides=(1, 1),
                             padding='valid',
                             use_bias=True):
    x = DepthwiseConv2D(kernel_size, padding=padding, use_bias=use_bias, strides=strides)(input)
    x = BatchNormalization()(x)
    x = ReLU(6.)(x)
    x = Conv2D(filters, 1, padding='same', use_bias=use_bias, strides=1)(x)
    x = BatchNormalization()(x)
    x = ReLU(6.)(x)
    return x
Esempio n. 9
0
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    in_channels = backend.int_shape(inputs)[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'block_{}_'.format(block_id)

    if block_id:
        # Expand
        x = Conv2D(expansion * in_channels,
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'expand_BN')(x)
        x = ReLU(6., name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    # Depthwise
    if stride == 2:
        x = ZeroPadding2D(padding=correct_pad(backend, x, 3),
                          name=prefix + 'pad')(x)
    x = DepthwiseConv2D(kernel_size=3,
                        strides=stride,
                        activation=None,
                        use_bias=False,
                        padding='same' if stride == 1 else 'valid',
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'depthwise_BN')(x)

    x = ReLU(6., name=prefix + 'depthwise_relu')(x)

    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1,
               padding='same',
               use_bias=False,
               activation=None,
               name=prefix + 'project')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'project_BN')(x)

    if in_channels == pointwise_filters and stride == 1:
        return Add(name=prefix + 'add')([inputs, x])
    return x
Esempio n. 10
0
def phi_network_fc(opt):
    """FC layer for encoded error
    :param opt: parser
    :return: keras Model
    """
    model = Sequential(name="phi_fc")
    model.add(Dense(1000))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(Dense(1000))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(Dense(opt.n_latent, activation='tanh'))
    return model
Esempio n. 11
0
def MobilenetConv2D(input, kernel, alpha, filters):
    last_block_filters = _make_divisible(filters * alpha, 8)

    x = Conv2D(last_block_filters, kernel, padding='same', use_bias=False)(input)
    x = BatchNormalization()(x)
    x = ReLU(6.)(x)
    return x
Esempio n. 12
0
    def __init__(self, params=None, is_training=False):
        super(Seq2SeqAttn, self).__init__()
        self.is_training = is_training

        self.max_len = params['max_len']
        self.voc_size = params['voc_size']
        self.emb_dim = params['emb_dim']
        self.enc_dim = params['enc_dim']
        self.dec_dim = params['dec_dim']

        # Encoder-Decoder structure
        self.embedding = Embedding(input_dim=self.voc_size,
                                   output_dim=self.emb_dim,
                                   input_length=self.max_len,
                                   name='embedding')
        self.encoder = LSTM(self.enc_dim * 2,
                            return_state=True,
                            return_sequences=True,
                            name='encoder_rnn')
        self.decoder = LSTM(self.dec_dim * 2,
                            return_state=True,
                            return_sequences=True,
                            name='decoder_rnn')
        self.attn = Attention(params=params, is_training=is_training)
        self.dec_relu = ReLU(name='decoder_relu')
        self.dec_drop = Dropout(0.1, name='decoder_drop')
        self.fc = TimeDistributed(Dense(self.voc_size, activation=None),
                                  name='fc')
Esempio n. 13
0
 def build(self, input_shape):
     self.conv_layer = Conv2D(**self.conv_params)
     self.bn_layer = BatchNormalization(scale=False,
                                        beta_initializer='glorot_uniform',
                                        gamma_initializer='glorot_uniform')
     if self.act:
         self.relu_layer = ReLU()
Esempio n. 14
0
    def create_model(self):
        """ DEFINE NEURAL NETWORK """
        # define model as a linear stack of dense layers
        self.model = Sequential()

        # iteratively add hidden layers
        for layer_n in range(1, self.n_layers+1):
            print layer_n, "hidden layer\n",
            if layer_n == 1:  # input_shape needs to be specified for the first layer
                self.model.add(Dense(units=self.n_hidden[layer_n], input_shape=(self.n_features,),
                                     kernel_initializer=self.weights_init, bias_initializer=self.bias_init))
            else:
                self.model.add(Dense(units=self.n_hidden[layer_n], kernel_initializer=self.weights_init,
                                     bias_initializer=self.bias_init))

            if self.batch_norm:
                self.model.add(BatchNormalization())  # add batch normalization before activation

            # add the activation layer explicitly
            if self.activation == 'LeakyReLU':
                self.model.add(LeakyReLU(alpha=self.alpha))  # for x < 0, y = alpha*x -> non-zero slope in the negative region

            elif self.activation == 'ReLU':
                self.model.add(ReLU())

            elif self.activation == 'eLU':
                self.model.add(ELU())

        # add output layer; no activation for the output layer
        self.model.add(Dense(units=self.n_outputs, kernel_initializer=self.weights_init,
                             bias_initializer=self.bias_init))
Esempio n. 15
0
def _text_recognition_vertical_model(input_shape, n_vocab):
    roi = Input(shape=input_shape, name="roi_vertical")
    x = roi
    for c in [64, 128, 256]:
        x = SeparableConv2D(c, 3, padding="same")(x)
        # TODO(agatan): if input_shape contains 0, GroupNormalization can generate nan weights.
        # x = GroupNormalization()(x)
        x = ReLU(6.)(x)
        x = SeparableConv2D(c, 3, padding="same")(x)
        # x = GroupNormalization()(x)
        x = ReLU(6.)(x)
        x = MaxPooling2D((1, 2))(x)
    x = Lambda(lambda v: tf.squeeze(v, 2))(x)
    x = Dropout(0.2)(x)
    output = Dense(n_vocab, activation="softmax")(x)
    return Model(roi, output, name="vertical_model")
Esempio n. 16
0
    def __init__(self):
        super(TransformerNet, self).__init__()
        self.conv1 = ConvLayer(32, kernel_size=9, strides=1)
        self.in1 = InstanceNormalization()
        self.conv2 = ConvLayer(64, kernel_size=3, strides=2)
        self.in2 = InstanceNormalization()
        self.conv3 = ConvLayer(128, kernel_size=3, strides=2)
        self.in3 = InstanceNormalization()

        self.res1 = ResidualBlock(128)
        self.res2 = ResidualBlock(128)
        self.res3 = ResidualBlock(128)
        self.res4 = ResidualBlock(128)
        self.res5 = ResidualBlock(128)

        self.deconv1 = UpsampleConvLayer(64,
                                         kernel_size=3,
                                         strides=1,
                                         upsample=2)
        self.in4 = InstanceNormalization()
        self.deconv2 = UpsampleConvLayer(32,
                                         kernel_size=3,
                                         strides=1,
                                         upsample=2)
        self.in5 = InstanceNormalization()
        self.deconv3 = ConvLayer(3, kernel_size=9, strides=1)

        self.relu = ReLU()
Esempio n. 17
0
def mam(x, num_feats, ratio, args, name):
    mam_name = name

    modulation_map_CSI = 0.0
    modulation_map_ICD = 0.0
    modulation_map_CSD = 0.0

    if args.is_CSI or args.is_ICD:
        _, tmp_var = tf.nn.moments(x, axes=[1,2], keepdims=True, name=mam_name+'/m1')
        if args.is_std_norm:
            mean_var, var_var = tf.nn.moments(tmp_var, axes=-1, keepdims=True, name=mam_name+'/m2')
            tmp_var = (tmp_var - mean_var) / tf.sqrt(var_var + 1e-5)

    if args.is_CSI:
        modulation_map_CSI = tmp_var

    if args.is_ICD:
        tmp = Dense(num_feats//ratio, activation=ReLU(), kernel_initializer=tf.keras.initializers.VarianceScaling(), name=mam_name+'/ICD_dense1')(tmp_var)
        modulation_map_ICD = Dense(num_feats, name=mam_name+'/ICD_dense2')(tmp)

    if args.is_CSD:
        init_w = tf.keras.initializers.GlorotUniform()
        init_b = tf.zeros_initializer()

        W = tf.Variable(init_w(shape=(3,3,num_feats,1)))
        b = tf.Variable(init_b(shape=(num_feats)))

        modulation_map_CSD = tf.nn.depthwise_conv2d(x, filter=W, strides=[1,1,1,1], padding='SAME', name=mam_name+'/CSD_up') + b

    modulation_map = tf.sigmoid(modulation_map_CSI+modulation_map_ICD+modulation_map_CSD, name=mam_name+'/sigmoid')

    return modulation_map * x
Esempio n. 18
0
def conv3d_relu_dropout(input_, filters_, kernel_size_, dropout_level):
    output_ = Conv2D(filters=filters_,
                     kernel_size=kernel_size_,
                     padding='same')(input_)
    output_ = ReLU()(output_)
    output_ = Dropout(rate=dropout_level)(output_)
    return output_
Esempio n. 19
0
    def __init__(self):

        super(Generator, self).__init__()

        self.input_layer = InputLayer(dtype=tf.float32)
        self.fully_connected1 = Dense(1024, dtype=tf.float32)
        self.bn1 = BatchNormalization(dtype=tf.float32)
        self.relu1 = ReLU(dtype=tf.float32)
        self.fully_connected2 = Dense(7 * 7 * 128, dtype=tf.float32)
        self.bn2 = BatchNormalization(dtype=tf.float32)
        self.relu2 = ReLU(dtype=tf.float32)
        self.reshape = Reshape((7, 7, 128), dtype=tf.float32)
        self.conv_transpose1 = Conv2DTranspose(64,
                                               4,
                                               padding="same",
                                               strides=2,
                                               dtype=tf.float32)
        self.bn3 = BatchNormalization(dtype=tf.float32)
        self.relu3 = ReLU(dtype=tf.float32)
        self.conv_transpose2 = Conv2DTranspose(1,
                                               4,
                                               padding="same",
                                               strides=2,
                                               activation='tanh',
                                               dtype=tf.float32)
        # self.relu4 = ReLU(dtype=tf.float32)

        # self.input_layer = InputLayer(dtype=tf.float32)
        # self.fully_connected1 = Dense(7 * 7 * 256, use_bias=False)
        # self.bn1 = BatchNormalization()
        # self.relu1 = LeakyReLU()
        #
        # self.reshape = Reshape((7, 7, 256))
        #
        # self.conv_transpose1 = Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)
        # self.bn3 = BatchNormalization()
        # self.relu3 = LeakyReLU()
        #
        # self.conv_transpose2 = Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)
        # self.bn4 = BatchNormalization()
        # self.relu4 = layers.LeakyReLU()
        #
        # model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
        # assert model.output_shape == (None, 28, 28, 1)

        print("Generator network created")
def SeparableConv2D_with_batchnorm(filters, kernel_size, name=None):
    return Sequential([
        DepthwiseConv2D(kernel_size=kernel_size, padding='same'),
        BatchNormalizationV2(epsilon=1e-5, momentum=0.999),
        ReLU(max_value=6.),
        Conv2D(filters=filters, kernel_size=1, padding='valid')
    ],
                      name=name)
Esempio n. 21
0
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
    """Adds an initial convolution layer (with batch normalization and relu6).

  Arguments:
      inputs: Input tensor of shape `(rows, cols, 3)`
          (with `channels_last` data format) or
          (3, rows, cols) (with `channels_first` data format).
          It should have exactly 3 inputs channels,
          and width and height should be no smaller than 32.
          E.g. `(224, 224, 3)` would be one valid value.
      filters: Integer, the dimensionality of the output space
          (i.e. the number of output filters in the convolution).
      alpha: controls the width of the network.
          - If `alpha` < 1.0, proportionally decreases the number
              of filters in each layer.
          - If `alpha` > 1.0, proportionally increases the number
              of filters in each layer.
          - If `alpha` = 1, default number of filters from the paper
               are used at each layer.
      kernel: An integer or tuple/list of 2 integers, specifying the
          width and height of the 2D convolution window.
          Can be a single integer to specify the same value for
          all spatial dimensions.
      strides: An integer or tuple/list of 2 integers,
          specifying the strides of the convolution along the width and height.
          Can be a single integer to specify the same value for
          all spatial dimensions.
          Specifying any stride value != 1 is incompatible with specifying
          any `dilation_rate` value != 1.

  Input shape:
      4D tensor with shape:
      `(samples, channels, rows, cols)` if data_format='channels_first'
      or 4D tensor with shape:
      `(samples, rows, cols, channels)` if data_format='channels_last'.

  Output shape:
      4D tensor with shape:
      `(samples, filters, new_rows, new_cols)` if data_format='channels_first'
      or 4D tensor with shape:
      `(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
      `rows` and `cols` values might have changed due to stride.

  Returns:
      Output tensor of block.
  """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    filters = int(filters * alpha)
    x = ZeroPadding2D(padding=(1, 1), name='conv1_pad')(inputs)
    x = Conv2D(filters,
               kernel,
               padding='valid',
               use_bias=False,
               strides=strides,
               name='conv1')(x)
    x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
    return ReLU(6, name='conv1_relu')(x)
Esempio n. 22
0
def conv3d_transpose_relu_dropout(input_, filters_, kernel_size_,
                                  dropoout_level):
    output_ = Conv2DTranspose(filters=filters_,
                              kernel_size=kernel_size_,
                              padding='same',
                              strides=(2, 2))(input_)
    output_ = ReLU()(output_)
    output_ = Dropout(rate=dropoout_level)(output_)
    return output_
Esempio n. 23
0
 def build(self, input_shape):
     self.bn_layer = BatchNormalization(scale=False)
     self.relu_layer = ReLU()
     self.concat_layer = Concatenate()
     self.conv1 = ConvBn(filters=self.f1,
                         kernel_size=1,
                         strides=self.strides,
                         padding='same',
                         name='incep1')
     self.conv3 = [
         ConvBn(filters=self.f3[0],
                kernel_size=1,
                strides=self.strides,
                padding='same',
                name='incep2_1'),
         ConvBn(filters=self.f3[1],
                kernel_size=3,
                padding='same',
                name='incep2_2')
     ]
     self.conv5 = [
         ConvBn(filters=self.f5[0],
                kernel_size=1,
                padding='same',
                strides=self.strides,
                name='incep3_1'),
         ConvBn(filters=self.f5[1],
                kernel_size=3,
                padding='same',
                name='incep3_2'),
         ConvBn(filters=self.f5[2],
                kernel_size=3,
                padding='same',
                name='incep3_3')
     ]
     if self.strides == 2:
         self.pool_layers = [
             MaxPooling2D(pool_size=3,
                          strides=2,
                          padding='same',
                          name='incep4_pool'),
             ConvBn(filters=self.f_pool,
                    kernel_size=1,
                    padding='same',
                    name='incep4_conv')
         ]
     self.conv_out = conv(filters=self.f_out,
                          kernel_size=1,
                          use_bias=False,
                          name='out')
     if self.projection:
         self.proj_conv = conv(filters=self.f_out,
                               kernel_size=1,
                               strides=self.strides,
                               use_bias=False,
                               name='projection')
def evaluate_decoder(decoder, z, reshape):
    # Evaluate
    output = tf.reshape(decoder.dec_1(z), [-1, *reshape])
    # residual block 1
    output_temp = decoder.dec_res1_conv2(ReLU()(decoder.dec_res1_layernorm2(decoder.dec_res1_conv1(ReLU()(decoder.dec_res1_layernorm1(output))))))
    output = Add()([output_temp, output])
    # residual block 2
    output_temp = decoder.dec_res2_conv2(ReLU()(decoder.dec_res2_layernorm2(decoder.dec_res2_conv1(ReLU()(decoder.dec_res2_layernorm1(output))))))
    output = Add()([output_temp, decoder.dec_res2_shortcut(output)])
    # residual block 3
    output_temp = decoder.dec_res3_conv2(ReLU()(decoder.dec_res3_layernorm2(decoder.dec_res3_conv1(ReLU()(decoder.dec_res3_layernorm1(output))))))
    output = Add()([output_temp, decoder.dec_res3_shortcut(output)])
    # residual block 4
    output_temp = decoder.dec_res4_conv2(ReLU()(decoder.dec_res4_layernorm2(decoder.dec_res4_conv1(ReLU()(decoder.dec_res4_layernorm1(output))))))
    output = Add()([output_temp, decoder.dec_res4_shortcut(output)])

    output = decoder.dec_layernorm(output)
    output = ReLU()(output)
    return decoder.dec_conv(output)
Esempio n. 25
0
def build_G(g_in, name=""):
    g_x = tf.pad(g_in, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT")

    ### c1
    g_x = Conv2D(64, kernel_size=7, strides=1, padding="valid")(g_x)
    g_x = InstanceNorm_kong()(g_x)
    g_x = ReLU()(g_x)
    ### c2
    g_x = Conv2D(64 * 2, kernel_size=3, strides=2, padding="same")(g_x)
    g_x = InstanceNorm_kong()(g_x)
    g_x = ReLU()(g_x)
    ### c3
    g_x = Conv2D(64 * 4, kernel_size=3, strides=2, padding="same")(g_x)
    g_x = InstanceNorm_kong()(g_x)
    g_x = ReLU()(g_x)

    g_x = ResBlock(c_num=64 * 4)(g_x)
    g_x = ResBlock(c_num=64 * 4)(g_x)
    g_x = ResBlock(c_num=64 * 4)(g_x)
    g_x = ResBlock(c_num=64 * 4)(g_x)
    g_x = ResBlock(c_num=64 * 4)(g_x)
    g_x = ResBlock(c_num=64 * 4)(g_x)
    g_x = ResBlock(c_num=64 * 4)(g_x)
    g_x = ResBlock(c_num=64 * 4)(g_x)
    g_x = ResBlock(c_num=64 * 4)(g_x)

    g_x = Conv2DTranspose(64 * 2, kernel_size=3, strides=2,
                          padding="same")(g_x)
    g_x = InstanceNorm_kong()(g_x)
    g_x = ReLU()(g_x)

    g_x = Conv2DTranspose(64, kernel_size=3, strides=2, padding="same")(g_x)
    g_x = InstanceNorm_kong()(g_x)
    g_x = ReLU()(g_x)

    g_x = tf.pad(g_x, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT")
    g_img = Conv2D(3,
                   kernel_size=7,
                   strides=1,
                   padding="valid",
                   activation="tanh")(g_x)
    generator = Model(g_in, g_img, name=name)
    return generator
Esempio n. 26
0
def bottleneck_decoder(tensor,
                       nfilters,
                       upsampling=False,
                       normal=False,
                       name=''):
    y = tensor
    skip = tensor
    if upsampling:
        skip = Conv2D(filters=nfilters,
                      kernel_size=(1, 1),
                      kernel_initializer='he_normal',
                      strides=(1, 1),
                      padding='same',
                      use_bias=False,
                      name=f'1x1_conv_skip_{name}')(skip)
        skip = UpSampling2D(size=(2, 2), name=f'upsample_skip_{name}')(skip)

    y = Conv2D(filters=nfilters // 4,
               kernel_size=(1, 1),
               kernel_initializer='he_normal',
               strides=(1, 1),
               padding='same',
               use_bias=False,
               name=f'1x1_conv_{name}')(y)
    y = BatchNormalization(momentum=0.1, name=f'bn_1x1_{name}')(y)
    y = PReLU(shared_axes=[1, 2], name=f'prelu_1x1_{name}')(y)

    if upsampling:
        y = Conv2DTranspose(filters=nfilters // 4,
                            kernel_size=(3, 3),
                            kernel_initializer='he_normal',
                            strides=(2, 2),
                            padding='same',
                            name=f'3x3_deconv_{name}')(y)
    elif normal:
        Conv2D(filters=nfilters // 4,
               kernel_size=(3, 3),
               strides=(1, 1),
               kernel_initializer='he_normal',
               padding='same',
               name=f'3x3_conv_{name}')(y)
    y = BatchNormalization(momentum=0.1, name=f'bn_main_{name}')(y)
    y = PReLU(shared_axes=[1, 2], name=f'prelu_{name}')(y)

    y = Conv2D(filters=nfilters,
               kernel_size=(1, 1),
               kernel_initializer='he_normal',
               use_bias=False,
               name=f'final_1x1_{name}')(y)
    y = BatchNormalization(momentum=0.1, name=f'bn_final_{name}')(y)

    y = Add(name=f'add_{name}')([y, skip])
    y = ReLU(name=f'relu_out_{name}')(y)

    return y
Esempio n. 27
0
def f_network_decoder(opt: 'parser'):
    """Conditional decoder
    :param opt: parser
    :return: keras Model
    """
    model = Sequential(name="f_decoder")
    # layer 4
    model.add(ZeroPadding2D(1))
    model.add(Conv2DTranspose(opt.nfeature, (3, 3), (1, 1), "valid"))
    model.add(BatchNormalization())
    model.add(ReLU())
    # layer 5
    model.add(ZeroPadding2D(1))
    model.add(Conv2DTranspose(opt.nfeature, (3, 3), (1, 1), "valid"))
    model.add(BatchNormalization())
    model.add(ReLU())
    # layer 6
    model.add(ZeroPadding2D(1))
    model.add(Conv2DTranspose(opt.n_out, (4, 4), (2, 2), "valid"))
    return model
Esempio n. 28
0
def g_network_decoder(opt):
    """Deterministic decoder
    :param opt: parser
    :return: keras Model
    """
    k = 4  # poke
    model = Sequential(name="g_decoder")
    # layer 4
    model.add(ZeroPadding2D(1))
    model.add(Conv2DTranspose(opt.nfeature, (3, 3), (1, 1), "valid"))
    model.add(BatchNormalization())
    model.add(ReLU())
    # layer 5
    model.add(ZeroPadding2D(1))
    model.add(Conv2DTranspose(opt.nfeature, (3, 3), (1, 1), "valid"))
    model.add(BatchNormalization())
    model.add(ReLU())
    # layer 6
    model.add(ZeroPadding2D(1))
    model.add(Conv2DTranspose(opt.n_out, (4, 4), (2, 2), "valid"))
    return model
Esempio n. 29
0
 def get_activation(self, x):
     if self.activation_type == 'elu':
         activate = ELU()(x)
     elif self.activation_type == 'relu':
         activate = ReLU()(x)
     elif self.activation_type == 'prelu':
         activate = PReLU()(x)
     elif self.activation_type == 'leakyrelu':
         activate = LeakyReLU()(x)
     else:
         raise ValueError('Undefined ACTIVATION_TYPE!')
     return activate
Esempio n. 30
0
def res_block(x_in, num_filters):
    x = tf.pad(x_in, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')
    x = Conv2D(num_filters, kernel_size=3, padding='valid', use_bias=False)(x)
    x = InstanceNormalization()(x)
    x = ReLU()(x)

    x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')
    x = Conv2D(num_filters, kernel_size=3, padding='valid', use_bias=False)(x)
    x = InstanceNormalization()(x)
    x = Add()([x_in, x])

    return x