예제 #1
0
def block(x, labels, out_channels, num_classes, is_training, CGN, CGN_groups,
          name):
    with tf.variable_scope(name):
        if CGN:
            norm0 = ops.ConditionalGroupNorm(num_classes,
                                             CGN_groups,
                                             name='cgn_0')
            norm1 = ops.ConditionalGroupNorm(num_classes,
                                             CGN_groups,
                                             name='cgn_1')
        else:
            norm0 = ops.ConditionalBatchNorm(num_classes, name='cbn_0')
            norm1 = ops.ConditionalBatchNorm(num_classes, name='cbn_1')

        x_0 = x
        x = tf.nn.relu(norm0(x, labels, is_training))
        x = usample(x)
        x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='snconv1')
        x = tf.nn.relu(norm1(x, labels, is_training))
        x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='snconv2')

        x_0 = usample(x_0)
        x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, name='snconv3')

        return x_0 + x
예제 #2
0
    def _d_residual_block(self,
                          x,
                          out_ch,
                          idx,
                          is_training,
                          resize=True,
                          is_head=False):
        update_collection = self._get_update_collection(is_training)
        with tf.variable_scope("d_resblock_" + str(idx), reuse=tf.AUTO_REUSE):
            h = x
            if not is_head:
                h = tf.nn.relu(h)
            h = snconv2d(h,
                         out_ch,
                         name='d_resblock_conv_1',
                         update_collection=update_collection)
            h = tf.nn.relu(h)
            h = snconv2d(h,
                         out_ch,
                         name='d_resblock_conv_2',
                         update_collection=update_collection)
            if resize:
                h = slim.avg_pool2d(h, [2, 2])

            # Short cut
            s = x
            if resize:
                s = slim.avg_pool2d(s, [2, 2])
            s = snconv2d(s,
                         out_ch,
                         k_h=1,
                         k_w=1,
                         name='d_resblock_conv_sc',
                         update_collection=update_collection)
            return h + s
예제 #3
0
파일: generator.py 프로젝트: zhwzhong/ylg
def block(x, labels, out_channels, num_classes, name, training=True):
    """Builds the residual blocks used in the generator.

    Args:
      x: The 4D input tensor.
      labels: The labels of the class we seek to sample from.
      out_channels: Integer number of features in the output layer.
      num_classes: Integer number of classes in the labels.
      name: The variable scope name for the block.
      training: Whether this block is for training or not.
    Returns:
      A `Tensor` representing the output of the operation.
    """
    with tf.compat.v1.variable_scope(name):
        labels_onehot = tf.one_hot(labels, num_classes)
        x_0 = x
        x = tf.nn.relu(
            tfgan.tpu.batch_norm(x, training, labels_onehot, name='cbn_0'))
        x = usample(x)
        x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, training, 'snconv1')
        x = tf.nn.relu(
            tfgan.tpu.batch_norm(x, training, labels_onehot, name='cbn_1'))
        x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, training, 'snconv2')

        x_0 = usample(x_0)
        x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, training, 'snconv3')

        return x_0 + x
예제 #4
0
    def _g_residual_block(self, x, y, n_ch, idx, is_training, resize=True):
        update_collection = self._get_update_collection(is_training)
        with tf.variable_scope("g_resblock_" + str(idx), reuse=tf.AUTO_REUSE):
            h = self._cbn(x, y, is_training, scope='g_resblock_cbn_1')
            h = tf.nn.relu(h)
            if resize:
                h = upscale(h, 2)
            h = snconv2d(h,
                         n_ch,
                         name='g_resblock_conv_1',
                         update_collection=update_collection)
            h = self._cbn(h, y, is_training, scope='g_resblock_cbn_2')
            h = tf.nn.relu(h)
            h = snconv2d(h,
                         n_ch,
                         name='g_resblock_conv_2',
                         update_collection=update_collection)

            if resize:
                sc = upscale(x, 2)
            else:
                sc = x
            sc = snconv2d(sc,
                          n_ch,
                          k_h=1,
                          k_w=1,
                          name='g_resblock_conv_sc',
                          update_collection=update_collection)

            return h + sc
예제 #5
0
def block(x, out_channels, name, downsample=True, act=tf.nn.relu):
    """Builds the residual blocks used in the discriminator.

    Args:
      x: The 4D input vector.
      out_channels: Number of features in the output layer.
      name: The variable scope name for the block.
      downsample: If True, downsample the spatial size the input tensor by
                  a factor of 2 on each side. If False, the spatial size of the
                  input tensor is unchanged.
      act: The activation function used in the block.
    Returns:
      A `Tensor` representing the output of the operation.
    """
    with tf.compat.v1.variable_scope(name):
        input_channels = x.shape.as_list()[-1]
        x_0 = x
        x = act(x)
        x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='sn_conv1')
        x = act(x)
        x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='sn_conv2')
        if downsample:
            x = dsample(x)
        if downsample or input_channels != out_channels:
            x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, name='sn_conv3')
            if downsample:
                x_0 = dsample(x_0)
        return x_0 + x
예제 #6
0
def block_without_condition(x, out_channels, is_training, name):
    with tf.variable_scope(name):
        bn0 = ops.batch_norm(name='bn_0')
        bn1 = ops.batch_norm(name='bn_1')
        # bn = ops.batch_norm()
        x = bn0(x, is_training)
        x = tf.nn.relu(x)
        x = usample(x)
        x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='snconv1')
        x = bn1(x, is_training)
        x = tf.nn.relu(x)
        x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='snconv2')
        return x
예제 #7
0
def optimized_block(x,
                    out_channels,
                    name,
                    update_collection=None,
                    act=tf.nn.relu):
    """Builds the simplified residual blocks for downsampling.

  Compared with block, optimized_block always downsamples the spatial resolution
  of the input vector by a factor of 4.

  Args:
    x: The 4D input vector.
    out_channels: Number of features in the output layer.
    name: The variable scope name for the block.
    update_collection: The update collections used in the
                       spectral_normed_weight.
    act: The activation function used in the block.
  Returns:
    A `Tensor` representing the output of the operation.
  """
    with tf.variable_scope(name):
        x_0 = x
        x = ops.snconv2d(x,
                         out_channels,
                         3,
                         3,
                         1,
                         1,
                         update_collection=update_collection,
                         name='sn_conv1')
        x = act(x)
        x = ops.snconv2d(x,
                         out_channels,
                         3,
                         3,
                         1,
                         1,
                         update_collection=update_collection,
                         name='sn_conv2')
        x = dsample(x)
        x_0 = dsample(x_0)
        x_0 = ops.snconv2d(x_0,
                           out_channels,
                           1,
                           1,
                           1,
                           1,
                           update_collection=update_collection,
                           name='sn_conv3')
        return x + x_0
예제 #8
0
def block(x, labels, out_channels, num_classes, is_training, name):
    with tf.variable_scope(name):
        bn0 = ops.ConditionalBatchNorm(num_classes, name='cbn_0')
        bn1 = ops.ConditionalBatchNorm(num_classes, name='cbn_1')
        x_0 = x
        x = tf.nn.relu(bn0(x, labels, is_training))
        x = usample(x)
        x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='snconv1')
        x = tf.nn.relu(bn1(x, labels, is_training))
        x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='snconv2')

        x_0 = usample(x_0)
        x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, name='snconv3')

        return x_0 + x
예제 #9
0
def generator(z,trainable=True, reuse=tf.AUTO_REUSE):
    #z = tf.reshape(z,(-1,1,1,n_dim))
    with tf.variable_scope("generator", reuse=reuse):
        ch = 1024
        z = ops.linear(z,ch*4*4,scope='g_h0')
        z = tf.reshape(z,(-1,4,4,ch))#4*4*1024
        print(z)
        
        z = risidual_up_block(z,ch,trainable,scope='deconv0')#8*8*1024
        print(z)
        
        z = risidual_up_block(z,ch//2,trainable,scope='deconv1')#16*16*512
        print(z)
        
        z = risidual_up_block(z,ch//4,trainable,scope='deconv2')#32*32*256
        print(z)
        
        z = risidual_up_block(z,ch//8,trainable,scope='deconv3')#64*64*128
        z = attention(z,z.shape[-1])
        print(z)
        
        z = risidual_up_block(z,ch//16,trainable,scope='deconv4')#128*128*64
        print(z)
        
        z = risidual_up_block(z,ch//32,trainable,scope='deconv5')#256*256*64
        print(z)
        z = tf.layers.batch_normalization(z,training=trainable)
        z = tf.nn.relu(z)
        z = ops.snconv2d(z,channel,3,3,1,1,name='last_layer')
        z = tf.nn.tanh(z)
        print(z)
    return z
예제 #10
0
def generator_without_condition(zs, gf_dim, is_training=True):
    """Builds the generator graph propagating from z to x.

    Args:
      zs: The list of noise tensors.
      gf_dim: The gf dimension.
      scope: Optional scope for `variable_op_scope`.
      is_training: is training
    Returns:
      outputs: The output layer of the generator.

    """

    with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
        # project `z` and reshape
        act0 = ops.snlinear(zs, gf_dim * 16 * 4 * 4, name='g_snh0')
        act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16])

        act1 = block_without_condition(act0, gf_dim * 16, is_training, 'g_block1')  # 8 * 8
        act2 = block_without_condition(act1, gf_dim * 8, is_training, 'g_block2')  # 16 * 16
        act3 = block_without_condition(act2, gf_dim * 4, is_training, 'g_block3')  # 32 * 32
        # act3 = non_local.sn_non_local_block_sim(act3, None, name='g_non_local')
        act4 = block_without_condition(act3, gf_dim * 2, is_training, 'g_block4')  # 64 * 64
        act4, attn = non_local.sn_non_local_block_sim(act4, None, name='g_non_local')

        act5 = block_without_condition(act4, gf_dim, is_training, 'g_block5')  # 128 * 128
        bn = ops.batch_norm(name='g_bn')

        act5 = tf.nn.relu(bn(act5, is_training))
        act6 = ops.snconv2d(act5, 3, 3, 3, 1, 1, name='g_snconv_last')
        out = tf.nn.tanh(act6)
        print('Generator without Condition with SA')
        return out, attn
예제 #11
0
def discriminator(x,trainable=True,reuse=tf.AUTO_REUSE):
    
    with tf.variable_scope("discriminator", reuse=reuse):
        ch = 32
        x = ops.snconv2d(x,16,7,7,1,1,name='init_block')
        #x = tf.nn.leaky_relu(x)
        
        x = risidual_down_block(x,ch,'conv0')#128*128*64
        print(x)
        
        x = risidual_down_block(x,ch*2,'conv1')#64*64*128
        print(x)
        
        x = risidual_down_block(x,ch*4,'conv2')#32*32*256
        x = attention(x,x.shape[-1])
        print(x)
        
        x = risidual_down_block(x,ch*8,'conv3') #16*16*512
        print(x)
        
        x = risidual_down_block(x,ch*16,'conv4') #8*8*1024
        print(x)
        
        x = risidual_down_block(x,ch*32,'conv5') #4*4*1024
        print(x)
        
        x = tf.reduce_sum(x, [1, 2])
        print(x)
        x = ops.snlinear(x,1,name='d_sn_linear')
        print(x)
    return x
예제 #12
0
    def forward(self, z, coord, is_training):
        valid_sizes = {4, 8, 16, 32, 64, 128, 256}
        assert (self.micro_patch_size[0] in valid_sizes and self.micro_patch_size[1] in valid_sizes), \
            "I haven't test your micro patch size: {}".format(self.micro_patch_size)

        update_collection = self._get_update_collection(is_training)
        print(" [Build] Generator ; is_training: {}".format(is_training))

        with tf.variable_scope("G_generator", reuse=tf.AUTO_REUSE):
            init_sp = 2
            init_ngf_mult = 16
            cond = tf.concat([z, coord], axis=1)
            h = snlinear(cond,
                         self.ngf_base * init_ngf_mult * init_sp * init_sp,
                         'g_z_fc',
                         update_collection=update_collection)
            h = tf.reshape(
                h, [-1, init_sp, init_sp, self.ngf_base * init_ngf_mult])

            # Stacking residual blocks
            num_resize_layers = int(
                math.log(min(self.micro_patch_size), 2) - 1)
            num_total_layers = num_resize_layers + self.num_extra_layers
            basic_layers = [8, 4, 2]
            if num_total_layers >= len(basic_layers):
                num_replicate_layers = num_total_layers - len(basic_layers)
                ngf_mult_list = basic_layers + [
                    1,
                ] * num_replicate_layers
            else:
                ngf_mult_list = basic_layers[:num_total_layers]
            print("\t ngf_mult_list = {}".format(ngf_mult_list))

            for idx, ngf_mult in enumerate(ngf_mult_list):
                n_ch = self.ngf_base * ngf_mult
                # Standard layers first
                if idx < num_resize_layers:
                    resize, is_extra = True, False
                # Extra layers do not resize spatial size
                else:
                    resize, is_extra = False, True
                h = self._g_residual_block(h,
                                           cond,
                                           n_ch,
                                           idx=idx,
                                           is_training=is_training,
                                           resize=resize)
                print(
                    "\t GResBlock: id={}, out_shape={}, resize={}, is_extra={}"
                    .format(idx, h.shape.as_list(), resize, is_extra))

            h = batch_norm(name="g_last_bn")(h, is_training=is_training)
            h = tf.nn.relu(h)
            h = snconv2d(h,
                         self.c_dim,
                         name='g_last_conv_2',
                         update_collection=update_collection)
            return tf.nn.tanh(h)
예제 #13
0
def risidual_down_block(x,ch,scope):
    with tf.variable_scope(scope):
        short_cut = x
        x = tf.nn.relu(x)
        x = ops.snconv2d(x,ch//4,1,1,1,1,name='sn_conv0')
        x = tf.nn.relu(x)
        x = ops.snconv2d(x,ch, 3, 3, 1, 1,name='sn_conv1')
        x = tf.nn.relu(x)
        x = ops.snconv2d(x,ch,3,3,1,1,name='sn_conv2')
        x = tf.nn.relu(x)
        x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
        x = ops.snconv2d(x,ch,1,1,1,1,name='sn_conv3')
        
        short_cut = tf.nn.avg_pool(short_cut, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
        short_cut2 = ops.snconv2d(short_cut,short_cut.shape[-1],1,1,1,1,name='sc_sn_conv')
        short_cut = Concatenation([short_cut,short_cut2])
        
    return x+short_cut
예제 #14
0
def attention(x, ch, scope='attention', reuse=tf.AUTO_REUSE):
    with tf.variable_scope(scope, reuse=reuse):
        f = ops.snconv2d(x,ch//8,1,1,1,1,name='f_conv')
        g = ops.snconv2d(x,ch//8,1,1,1,1,name='g_conv')
        h = ops.snconv2d(x,ch,1,1,1,1,name='h_conv')
        
        f = tf.reshape(f,(batch_size,-1,f.shape[-1]))
        g = tf.reshape(g,(batch_size,-1,g.shape[-1]))
        h = tf.reshape(h,(batch_size,-1,h.shape[-1]))
        
        s = tf.matmul(g,f,transpose_b=True)
        beta = tf.nn.softmax(s)
        o = tf.matmul(beta,h)
        gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0))
        o = tf.reshape(o,shape=x.shape)
        o = ops.snconv2d(o, ch,1,1,1,1,name='attn_conv')
        x = gamma*o+x
        
    return x
예제 #15
0
def risidual_up_block(x,ch,trainable,scope=''):
    with tf.variable_scope(scope):
        short_cut = x
        
        x = tf.layers.batch_normalization(x,training=trainable)
        x = tf.nn.relu(x)
        x = ops.snconv2d(x,ch//4,1,1,1,1,name='sn_upconv0')
        x = tf.layers.batch_normalization(x,training=trainable)
        x = tf.nn.relu(x)
        x = upsample(x)
        x = ops.snconv2d(x,ch,3,3,1,1,name='sn_upconv1')
        x = tf.layers.batch_normalization(x,training=trainable)
        x = tf.nn.relu(x)
        x = ops.snconv2d(x,ch,3,3,1,1,name='sn_upconv2')
        x = tf.layers.batch_normalization(x,training=trainable)
        x = tf.nn.relu(x)
        x = ops.snconv2d(x,ch,1,1,1,1,name='sn_upconv3')
        
        short_cut = upsample(short_cut)
        short_cut = ops.snconv2d(x,ch,3,3,1,1,name='sn_sh_upconv')
    return x+short_cut
예제 #16
0
def optimized_block(x, out_channels, name, act=tf.nn.relu):
    """Builds optimized residual blocks for downsampling.

    Compared with block, optimized_block always downsamples the spatial resolution
    by a factor of 2 on each side.

    Args:
      x: The 4D input vector.
      out_channels: Number of features in the output layer.
      name: The variable scope name for the block.
      act: The activation function used in the block.
    Returns:
      A `Tensor` representing the output of the operation.
    """
    with tf.compat.v1.variable_scope(name):
        x_0 = x
        x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='sn_conv1')
        x = act(x)
        x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='sn_conv2')
        x = dsample(x)
        x_0 = dsample(x_0)
        x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, name='sn_conv3')
        return x + x_0
예제 #17
0
def conv_block(inputs, num_filters, kernel_size, strides, padding, kernel_init, sn=True, norm_layer=None, activation=None):
    if padding.lower() == 'same':
        pad_size = (kernel_size - 1) // 2
        inputs = tf.pad(inputs, [[0, 0], [pad_size, pad_size], [pad_size, pad_size], [0, 0]], mode='REFLECT')
    else:
        pad_size = 0
    if sn:
        out = snconv2d(inputs, num_filters, kernel_size, stride=strides, kernel_initializer=kernel_init)
    else:
        out = tf.layers.conv2d(inputs, num_filters, kernel_size, strides, padding, kernel_initializer=kernel_init)
    if norm_layer is not None:
        out = norm_layer(out)
    if activation is not None:
        out = activation(out)
    return out
예제 #18
0
파일: generator.py 프로젝트: zhwzhong/ylg
def generator(zs,
              target_class,
              gf_dim,
              num_classes,
              training=True,
              mode='both'):
    """Builds the generator segment of the graph, going from z -> G(z).

    Args:
      zs: Tensor representing the latent variables.
      target_class: The class from which we seek to sample.
      gf_dim: The gf dimension.
      num_classes: Number of classes in the labels.
      training: Whether in train mode or not. This affects things like batch
        normalization and spectral normalization.

    Returns:
      - The output layer of the generator.
      - A list containing all trainable varaibles defined by the model.
    """
    with tf.compat.v1.variable_scope(
            'generator', reuse=tf.compat.v1.AUTO_REUSE) as gen_scope:
        act0 = ops.snlinear(zs,
                            gf_dim * 16 * 4 * 4,
                            training=training,
                            name='g_snh0')
        act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16])
        act1 = block(act0, target_class, gf_dim * 16, num_classes, 'g_block1',
                     training)  # 8
        act2 = block(act1, target_class, gf_dim * 8, num_classes, 'g_block2',
                     training)  # 16
        act3 = block(act2, target_class, gf_dim * 4, num_classes, 'g_block3',
                     training)  # 32
        act3, attn_map = ops.sn_attention_block_sim(act3,
                                                    training,
                                                    name='g_ops')  # 32
        act4 = block(act3, target_class, gf_dim * 2, num_classes, 'g_block4',
                     training)  # 64
        act5 = block(act4, target_class, gf_dim, num_classes, 'g_block5',
                     training)  # 128
        act5 = tf.nn.relu(
            tfgan.tpu.batch_norm(act5,
                                 training,
                                 conditional_class_labels=None,
                                 name='g_bn'))
        act6 = ops.snconv2d(act5, 3, 3, 3, 1, 1, training, 'g_snconv_last')
        out = tf.nn.tanh(act6)
    return out, attn_map
예제 #19
0
def generator_test_64(zs,
                      target_class,
                      gf_dim,
                      num_classes,
                      CGN=False,
                      CGN_groups=4,
                      is_training=True):
    """Builds the generator graph propagating from z to x.

  Args:
    zs: The list of noise tensors.
    target_class: The conditional labels in the generation.
    gf_dim: The gf dimension.
    num_classes: Number of classes in the labels.
    scope: Optional scope for `variable_op_scope`.

  Returns:
    outputs: The output layer of the generator.
  """

    with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
        # project `z` and reshape
        act0 = ops.snlinear(zs, gf_dim * 16 * 4 * 4, name='g_snh0')
        act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16])

        act1 = block(act0, target_class, gf_dim * 16, num_classes, is_training,
                     CGN, CGN_groups, 'g_block1')  # 8 * 8
        act2 = block(act1, target_class, gf_dim * 8, num_classes, is_training,
                     CGN, CGN_groups, 'g_block2')  # 16 * 16
        act3 = block(act2, target_class, gf_dim * 4, num_classes, is_training,
                     CGN, CGN_groups, 'g_block3')  # 32 * 32

        act4 = block(act3, target_class, gf_dim * 2, num_classes, is_training,
                     CGN, CGN_groups, 'g_block4')  # 64 * 64
        act4 = non_local.sn_non_local_block_sim(act4, None, name='g_non_local')
        act5 = block(act4, target_class, gf_dim, num_classes, is_training, CGN,
                     CGN_groups, 'g_block5')  # 128 * 128
        bn = ops.batch_norm(name='g_bn')

        act5 = tf.nn.relu(bn(act5, is_training))
        act6 = ops.snconv2d(act5, 3, 3, 3, 1, 1, name='g_snconv_last')
        out = tf.nn.tanh(act6)
        print('GAN test with moving average')
        return out
예제 #20
0
    def __init__(self, z_size, channel, output_size=48):
        super(Generator, self).__init__()
        self.output_size = output_size
        s = 4
        if self.output_size == 48:
            s = 6
        self.s = s
        self.z_size = z_size
        self.fully_connect = snlinear(z_size, s * s * 512)
        self.relu = nn.ReLU()
        self.tanh = nn.Tanh()

        self.conv_res4 = snconv2d(64,
                                  channel,
                                  padding=1,
                                  kernel_size=3,
                                  stride=1)
        self.self_attn = Self_Attn(in_channels=256)

        self.re1 = Residual_G(512, 256, up_sampling=True)
        self.re2 = Residual_G(256, 128, up_sampling=True)
        self.re3 = Residual_G(128, 64, up_sampling=True)
        self.bn = nn.BatchNorm2d(64)
        self.apply(init_weights)