Ejemplo n.º 1
0
def disc_ln(x,
            channel=32,
            is_training=True,
            name='discriminator',
            patch=True,
            reuse=False):
    with tf.compat.v1.variable_scope(name, reuse=reuse):

        for idx in range(3):
            x = slim.convolution2d(x,
                                   channel * 2**idx, [3, 3],
                                   stride=2,
                                   activation_fn=None)
            x = tf.contrib.layers.layer_norm(x)
            x = tf.nn.leaky_relu(x)

            x = slim.convolution2d(x,
                                   channel * 2**idx, [3, 3],
                                   activation_fn=None)
            x = tf.contrib.layers.layer_norm(x)
            x = tf.nn.leaky_relu(x)

        if patch == True:
            x = slim.convolution2d(x, 1, [1, 1], activation_fn=None)
        else:
            x = tf.reduce_mean(input_tensor=x, axis=[1, 2])
            x = slim.fully_connected(x, 1, activation_fn=None)

        return x
Ejemplo n.º 2
0
def resblock(inputs, out_channel=32, name='resblock'):
    
    with tf.variable_scope(name):
        
        x = slim.convolution2d(inputs, out_channel, [3, 3], 
                               activation_fn=None, scope='conv1')
        x = tf.nn.leaky_relu(x)
        x = slim.convolution2d(x, out_channel, [3, 3], 
                               activation_fn=None, scope='conv2')
        
        return x + inputs
Ejemplo n.º 3
0
def unet_generator(inputs,
                   channel=32,
                   num_blocks=4,
                   name='generator',
                   reuse=False):
    with tf.compat.v1.variable_scope(name, reuse=reuse):

        x0 = slim.convolution2d(inputs, channel, [7, 7], activation_fn=None)
        x0 = tf.nn.leaky_relu(x0)

        x1 = slim.convolution2d(x0,
                                channel, [3, 3],
                                stride=2,
                                activation_fn=None)
        x1 = tf.nn.leaky_relu(x1)
        x1 = slim.convolution2d(x1, channel * 2, [3, 3], activation_fn=None)
        x1 = tf.nn.leaky_relu(x1)

        x2 = slim.convolution2d(x1,
                                channel * 2, [3, 3],
                                stride=2,
                                activation_fn=None)
        x2 = tf.nn.leaky_relu(x2)
        x2 = slim.convolution2d(x2, channel * 4, [3, 3], activation_fn=None)
        x2 = tf.nn.leaky_relu(x2)

        for idx in range(num_blocks):
            x2 = resblock(x2,
                          out_channel=channel * 4,
                          name='block_{}'.format(idx))

        x2 = slim.convolution2d(x2, channel * 2, [3, 3], activation_fn=None)
        x2 = tf.nn.leaky_relu(x2)

        h1, w1 = tf.shape(input=x2)[1], tf.shape(input=x2)[2]
        x3 = tf.image.resize(x2, (h1 * 2, w1 * 2),
                             method=tf.image.ResizeMethod.BILINEAR)
        x3 = slim.convolution2d(x3 + x1,
                                channel * 2, [3, 3],
                                activation_fn=None)
        x3 = tf.nn.leaky_relu(x3)
        x3 = slim.convolution2d(x3, channel, [3, 3], activation_fn=None)
        x3 = tf.nn.leaky_relu(x3)

        h2, w2 = tf.shape(input=x3)[1], tf.shape(input=x3)[2]
        x4 = tf.image.resize(x3, (h2 * 2, w2 * 2),
                             method=tf.image.ResizeMethod.BILINEAR)
        x4 = slim.convolution2d(x4 + x0, channel, [3, 3], activation_fn=None)
        x4 = tf.nn.leaky_relu(x4)
        x4 = slim.convolution2d(x4, 3, [7, 7], activation_fn=None)

        return x4
 def convb(self,
           input,
           k_h,
           k_w,
           c_o,
           stride,
           name,
           relu=True,
           set_bias=True,
           set_tanh=False):
     with slim.arg_scope([slim.batch_norm],
                         decay=0.999,
                         fused=common.batchnorm_fused,
                         is_training=self.trainable):
         output = slim.convolution2d(
             input,
             c_o,
             kernel_size=[k_h, k_w],
             stride=stride,
             normalizer_fn=slim.batch_norm,
             weights_regularizer=_l2_regularizer_convb,
             weights_initializer=_init_xavier,
             # weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
             biases_initializer=_init_zero if set_bias else None,
             trainable=self.trainable,
             activation_fn=common.activation_fn if relu else None,
             scope=name)
         if set_tanh:
             output = tf.nn.sigmoid(output, name=name + '_extra_acv')
     return output
Ejemplo n.º 5
0
    def separable_conv(self, input, k_h, k_w, c_o, stride, name, relu=True, set_bias=True):
        with slim.arg_scope([slim.batch_norm], decay=0.999, fused=common.batchnorm_fused, is_training=self.trainable):
            output = slim.separable_convolution2d(input,
                                                  num_outputs=None,
                                                  stride=stride,
                                                  trainable=self.trainable,
                                                  depth_multiplier=1.0,
                                                  kernel_size=[k_h, k_w],
                                                  # activation_fn=common.activation_fn if relu else None,
                                                  activation_fn=None,
                                                  # normalizer_fn=slim.batch_norm,
                                                  weights_initializer=_init_xavier,
                                                  # weights_initializer=_init_norm,
                                                  weights_regularizer=_l2_regularizer_00004,
                                                  biases_initializer=None,
                                                  padding=DEFAULT_PADDING,
                                                  scope=name + '_depthwise')

            output = slim.convolution2d(output,
                                        c_o,
                                        stride=1,
                                        kernel_size=[1, 1],
                                        activation_fn=common.activation_fn if relu else None,
                                        weights_initializer=_init_xavier,
                                        # weights_initializer=_init_norm,
                                        biases_initializer=_init_zero if set_bias else None,
                                        normalizer_fn=slim.batch_norm,
                                        trainable=self.trainable,
                                        weights_regularizer=None,
                                        scope=name + '_pointwise')

        return output
Ejemplo n.º 6
0
def self_attention(inputs, name='attention', reuse=False):
    with tf.compat.v1.variable_scope(name, reuse=reuse):
        h, w = tf.shape(input=inputs)[1], tf.shape(input=inputs)[2]
        bs, _, _, ch = inputs.get_shape().as_list()
        f = slim.convolution2d(inputs, ch // 8, [1, 1], activation_fn=None)
        g = slim.convolution2d(inputs, ch // 8, [1, 1], activation_fn=None)
        s = slim.convolution2d(inputs, 1, [1, 1], activation_fn=None)
        f_flatten = tf.reshape(f, shape=[f.shape[0], -1, f.shape[-1]])
        g_flatten = tf.reshape(g, shape=[g.shape[0], -1, g.shape[-1]])
        beta = tf.matmul(f_flatten, g_flatten, transpose_b=True)
        beta = tf.nn.softmax(beta)

        s_flatten = tf.reshape(s, shape=[s.shape[0], -1, s.shape[-1]])
        att_map = tf.matmul(beta, s_flatten)
        att_map = tf.reshape(att_map, shape=[bs, h, w, 1])
        gamma = tf.compat.v1.get_variable(
            "gamma", [1], initializer=tf.compat.v1.constant_initializer(0.0))
        output = att_map * gamma + inputs

        return att_map, output
Ejemplo n.º 7
0
def generator(inputs, channel=32, num_blocks=4, name='generator', reuse=False):
    with tf.compat.v1.variable_scope(name, reuse=reuse):

        x = slim.convolution2d(inputs, channel, [7, 7], activation_fn=None)
        x = tf.nn.leaky_relu(x)

        x = slim.convolution2d(x,
                               channel * 2, [3, 3],
                               stride=2,
                               activation_fn=None)
        x = slim.convolution2d(x, channel * 2, [3, 3], activation_fn=None)
        x = tf.nn.leaky_relu(x)

        x = slim.convolution2d(x,
                               channel * 4, [3, 3],
                               stride=2,
                               activation_fn=None)
        x = slim.convolution2d(x, channel * 4, [3, 3], activation_fn=None)
        x = tf.nn.leaky_relu(x)

        for idx in range(num_blocks):
            x = resblock(x,
                         out_channel=channel * 4,
                         name='block_{}'.format(idx))

        x = slim.conv2d_transpose(x,
                                  channel * 2, [3, 3],
                                  stride=2,
                                  activation_fn=None)
        x = slim.convolution2d(x, channel * 2, [3, 3], activation_fn=None)

        x = tf.nn.leaky_relu(x)

        x = slim.conv2d_transpose(x,
                                  channel, [3, 3],
                                  stride=2,
                                  activation_fn=None)
        x = slim.convolution2d(x, channel, [3, 3], activation_fn=None)
        x = tf.nn.leaky_relu(x)

        x = slim.convolution2d(x, 3, [7, 7], activation_fn=None)
        #x = tf.clip_by_value(x, -0.999999, 0.999999)

        return x