コード例 #1
0
 def forward(self, h, is_training):
     print(" [Build] Spatial Predictor ; is_training: {}".format(is_training))
     update_collection = self._get_update_collection(is_training)
     with tf.variable_scope("Q_content_prediction_head", reuse=tf.AUTO_REUSE):
         h = snlinear(h, self.aux_dim, 'fc1', update_collection=update_collection)
         h = batch_norm(name='bn1')(h, is_training=is_training)
         h = lrelu(h)
         h = snlinear(h, self.z_dim, 'fc2', update_collection=update_collection)
         return tf.nn.tanh(h)
コード例 #2
0
ファイル: sagan_v2.py プロジェクト: fcu-D0441406/dense_net
def discriminator(x,trainable=True,reuse=tf.AUTO_REUSE):
    
    with tf.variable_scope("discriminator", reuse=reuse):
        ch = 32
        x = ops.snconv2d(x,16,7,7,1,1,name='init_block')
        #x = tf.nn.leaky_relu(x)
        
        x = risidual_down_block(x,ch,'conv0')#128*128*64
        print(x)
        
        x = risidual_down_block(x,ch*2,'conv1')#64*64*128
        print(x)
        
        x = risidual_down_block(x,ch*4,'conv2')#32*32*256
        x = attention(x,x.shape[-1])
        print(x)
        
        x = risidual_down_block(x,ch*8,'conv3') #16*16*512
        print(x)
        
        x = risidual_down_block(x,ch*16,'conv4') #8*8*1024
        print(x)
        
        x = risidual_down_block(x,ch*32,'conv5') #4*4*1024
        print(x)
        
        x = tf.reduce_sum(x, [1, 2])
        print(x)
        x = ops.snlinear(x,1,name='d_sn_linear')
        print(x)
    return x
コード例 #3
0
def discriminator(image, labels, df_dim, number_classes, act=tf.nn.relu):
    """Builds the discriminator graph.

    Args:
      image: The current batch of images to classify as fake or real.
      labels: The corresponding labels for the images.
      df_dim: The df dimension.
      number_classes: The number of classes in the labels.
      act: The activation function used in the discriminator.
    Returns:
      - A `Tensor` representing the logits of the discriminator.
      - A list containing all trainable varaibles defined by the model.
    """
    with tf.compat.v1.variable_scope(
            'discriminator', reuse=tf.compat.v1.AUTO_REUSE) as dis_scope:
        h0 = optimized_block(
            image, df_dim, 'd_optimized_block1', act=act)  # 64 * 64
        h1_ = block(h0, df_dim * 2, 'd_block2', act=act)  # 32 * 32
        h1, attn_map = ops.sn_attention_block_sim(h1_, name='d_ops')  # 32 * 32
        h2 = block(h1, df_dim * 4, 'd_block3', act=act)  # 16 * 16
        h3 = block(h2, df_dim * 8, 'd_block4', act=act)  # 8 * 8
        h4 = block(h3, df_dim * 16, 'd_block5', act=act)  # 4 * 4
        h5 = block(h4, df_dim * 16, 'd_block6', downsample=False, act=act)
        h5_act = act(h5)
        h6 = tf.reduce_sum(input_tensor=h5_act, axis=[1, 2])
        output = ops.snlinear(h6, 1, name='d_sn_linear')
        h_labels = ops.sn_embedding(labels, number_classes, df_dim * 16,
                                    name='d_embedding')
        output += tf.reduce_sum(input_tensor=h6 *
                                h_labels, axis=1, keepdims=True)
    var_list = tf.compat.v1.get_collection(
        tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, dis_scope.name)
    return output, h1_, attn_map, var_list
コード例 #4
0
def generator_without_condition(zs, gf_dim, is_training=True):
    """Builds the generator graph propagating from z to x.

    Args:
      zs: The list of noise tensors.
      gf_dim: The gf dimension.
      scope: Optional scope for `variable_op_scope`.
      is_training: is training
    Returns:
      outputs: The output layer of the generator.

    """

    with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
        # project `z` and reshape
        act0 = ops.snlinear(zs, gf_dim * 16 * 4 * 4, name='g_snh0')
        act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16])

        act1 = block_without_condition(act0, gf_dim * 16, is_training, 'g_block1')  # 8 * 8
        act2 = block_without_condition(act1, gf_dim * 8, is_training, 'g_block2')  # 16 * 16
        act3 = block_without_condition(act2, gf_dim * 4, is_training, 'g_block3')  # 32 * 32
        # act3 = non_local.sn_non_local_block_sim(act3, None, name='g_non_local')
        act4 = block_without_condition(act3, gf_dim * 2, is_training, 'g_block4')  # 64 * 64
        act4, attn = non_local.sn_non_local_block_sim(act4, None, name='g_non_local')

        act5 = block_without_condition(act4, gf_dim, is_training, 'g_block5')  # 128 * 128
        bn = ops.batch_norm(name='g_bn')

        act5 = tf.nn.relu(bn(act5, is_training))
        act6 = ops.snconv2d(act5, 3, 3, 3, 1, 1, name='g_snconv_last')
        out = tf.nn.tanh(act6)
        print('Generator without Condition with SA')
        return out, attn
コード例 #5
0
ファイル: generator.py プロジェクト: yes7rose/COCO-GAN
    def forward(self, z, coord, is_training):
        valid_sizes = {4, 8, 16, 32, 64, 128, 256}
        assert (self.micro_patch_size[0] in valid_sizes and self.micro_patch_size[1] in valid_sizes), \
            "I haven't test your micro patch size: {}".format(self.micro_patch_size)

        update_collection = self._get_update_collection(is_training)
        print(" [Build] Generator ; is_training: {}".format(is_training))

        with tf.variable_scope("G_generator", reuse=tf.AUTO_REUSE):
            init_sp = 2
            init_ngf_mult = 16
            cond = tf.concat([z, coord], axis=1)
            h = snlinear(cond,
                         self.ngf_base * init_ngf_mult * init_sp * init_sp,
                         'g_z_fc',
                         update_collection=update_collection)
            h = tf.reshape(
                h, [-1, init_sp, init_sp, self.ngf_base * init_ngf_mult])

            # Stacking residual blocks
            num_resize_layers = int(
                math.log(min(self.micro_patch_size), 2) - 1)
            num_total_layers = num_resize_layers + self.num_extra_layers
            basic_layers = [8, 4, 2]
            if num_total_layers >= len(basic_layers):
                num_replicate_layers = num_total_layers - len(basic_layers)
                ngf_mult_list = basic_layers + [
                    1,
                ] * num_replicate_layers
            else:
                ngf_mult_list = basic_layers[:num_total_layers]
            print("\t ngf_mult_list = {}".format(ngf_mult_list))

            for idx, ngf_mult in enumerate(ngf_mult_list):
                n_ch = self.ngf_base * ngf_mult
                # Standard layers first
                if idx < num_resize_layers:
                    resize, is_extra = True, False
                # Extra layers do not resize spatial size
                else:
                    resize, is_extra = False, True
                h = self._g_residual_block(h,
                                           cond,
                                           n_ch,
                                           idx=idx,
                                           is_training=is_training,
                                           resize=resize)
                print(
                    "\t GResBlock: id={}, out_shape={}, resize={}, is_extra={}"
                    .format(idx, h.shape.as_list(), resize, is_extra))

            h = batch_norm(name="g_last_bn")(h, is_training=is_training)
            h = tf.nn.relu(h)
            h = snconv2d(h,
                         self.c_dim,
                         name='g_last_conv_2',
                         update_collection=update_collection)
            return tf.nn.tanh(h)
コード例 #6
0
    def __init__(self, ssup, channel, featOnly=False):
        super(Discriminator, self).__init__()
        self.ssup = ssup
        self.featOnly = featOnly
        self.lrelu = nn.LeakyReLU()
        self.relu = nn.ReLU()
        self.softmax = nn.Softmax()
        self.self_attn = Self_Attn(in_channels=64)
        self.re1 = Residual_D(channel, 64, down_sampling=True, is_start=True)
        self.re2 = Residual_D(64, 128, down_sampling=True)
        self.re3 = Residual_D(128, 256, down_sampling=True)
        self.re4 = Residual_D(256, 512, down_sampling=True)
        self.re5 = Residual_D(512, 1024)

        self.fully_connect_gan2 = snlinear(1024, 1)
        self.fully_connect_rot2 = snlinear(1024, 4)
        self.sigmoid = nn.Sigmoid()
        self.apply(init_weights)
コード例 #7
0
def discriminator_test(image,
                       labels,
                       df_dim,
                       number_classes,
                       update_collection=None,
                       act=tf.nn.relu):
    """Builds the discriminator graph.

  Args:
    image: The current batch of images to classify as fake or real.
    labels: The corresponding labels for the images.
    df_dim: The df dimension.
    number_classes: The number of classes in the labels.
    update_collection: The update collections used in the
                       spectral_normed_weight.
    act: The activation function used in the discriminator.
    scope: Optional scope for `variable_op_scope`.
  Returns:
    A `Tensor` representing the logits of the discriminator.
  """
    with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
        h0 = optimized_block(image,
                             df_dim,
                             'd_optimized_block1',
                             update_collection,
                             act=act)  # 64 * 64
        h1 = block(h0, df_dim * 2, 'd_block2', update_collection,
                   act=act)  # 32 * 32
        h1 = non_local.sn_non_local_block_sim(h1,
                                              update_collection,
                                              name='d_non_local')  # 32 * 32
        h2 = block(h1, df_dim * 4, 'd_block3', update_collection,
                   act=act)  # 16 * 16
        h3 = block(h2, df_dim * 8, 'd_block4', update_collection,
                   act=act)  # 8 * 8
        h4 = block(h3, df_dim * 16, 'd_block5', update_collection,
                   act=act)  # 4 * 4
        h5 = block(h4,
                   df_dim * 16,
                   'd_block6',
                   update_collection,
                   False,
                   act=act)
        h5_act = act(h5)
        h6 = tf.reduce_sum(h5_act, [1, 2])
        output = ops.snlinear(h6,
                              1,
                              update_collection=update_collection,
                              name='d_sn_linear')
        h_labels = ops.sn_embedding(labels,
                                    number_classes,
                                    df_dim * 16,
                                    update_collection=update_collection,
                                    name='d_embedding')
        output += tf.reduce_sum(h6 * h_labels, axis=1, keepdims=True)
        print('Discriminator Test Structure')
        return output
コード例 #8
0
def discriminator(image, labels, df_dim, number_classes, act=tf.nn.relu):
    """Builds the discriminator graph.

    Args:
      image: The current batch of images to classify as fake or real.
      labels: The corresponding labels for the images.
      df_dim: The df dimension.
      number_classes: The number of classes in the labels.
      act: The activation function used in the discriminator.
    Returns:
      - A `Tensor` representing the logits of the discriminator.
      - A list containing all trainable varaibles defined by the model.
    """
    with tf.compat.v1.variable_scope(
            'discriminator', reuse=tf.compat.v1.AUTO_REUSE) as dis_scope:
        h0 = optimized_block(
            image, df_dim, 'd_optimized_block1', act=act)  # 64 * 64
        h1 = block(h0, df_dim * 2, 'd_block2', act=act)    # 32 * 32

        if flags.FLAGS.D_module == 'hamburger':
            if flags.FLAGS.D_version == 'v1':
                print('Add V1 to D!')
                hamburger = ops.sn_hamburger_v1
            else:
                print('Add V2 to D!')
                hamburger = ops.sn_hamburger_v2
            h1 = hamburger(h1,
                           name='d_ops',
                           use_bn=False,
                           ham_type=flags.FLAGS.D_ham_type,
                           S=flags.FLAGS.D_s,
                           D=flags.FLAGS.D_d,
                           R=flags.FLAGS.D_r,
                           steps=flags.FLAGS.D_K)
        else:
            print('No context module in D!')

        h2 = block(h1, df_dim * 4, 'd_block3', act=act)   # 16 * 16
        h3 = block(h2, df_dim * 8, 'd_block4', act=act)   # 8 * 8
        h4 = block(h3, df_dim * 16, 'd_block5', act=act)  # 4 * 4
        h5 = block(h4, df_dim * 16, 'd_block6', downsample=False, act=act)
        h5_act = act(h5)
        h6 = tf.reduce_sum(input_tensor=h5_act, axis=[1, 2])
        output = ops.snlinear(h6, 1, name='d_sn_linear')
        h_labels = ops.sn_embedding(labels, number_classes, df_dim * 16,
                                    name='d_embedding')
        output += tf.reduce_sum(input_tensor=h6 *
                                h_labels, axis=1, keepdims=True)
    var_list = tf.compat.v1.get_collection(
        tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, dis_scope.name)
    return output, var_list
コード例 #9
0
ファイル: generator.py プロジェクト: zhwzhong/ylg
def generator(zs,
              target_class,
              gf_dim,
              num_classes,
              training=True,
              mode='both'):
    """Builds the generator segment of the graph, going from z -> G(z).

    Args:
      zs: Tensor representing the latent variables.
      target_class: The class from which we seek to sample.
      gf_dim: The gf dimension.
      num_classes: Number of classes in the labels.
      training: Whether in train mode or not. This affects things like batch
        normalization and spectral normalization.

    Returns:
      - The output layer of the generator.
      - A list containing all trainable varaibles defined by the model.
    """
    with tf.compat.v1.variable_scope(
            'generator', reuse=tf.compat.v1.AUTO_REUSE) as gen_scope:
        act0 = ops.snlinear(zs,
                            gf_dim * 16 * 4 * 4,
                            training=training,
                            name='g_snh0')
        act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16])
        act1 = block(act0, target_class, gf_dim * 16, num_classes, 'g_block1',
                     training)  # 8
        act2 = block(act1, target_class, gf_dim * 8, num_classes, 'g_block2',
                     training)  # 16
        act3 = block(act2, target_class, gf_dim * 4, num_classes, 'g_block3',
                     training)  # 32
        act3, attn_map = ops.sn_attention_block_sim(act3,
                                                    training,
                                                    name='g_ops')  # 32
        act4 = block(act3, target_class, gf_dim * 2, num_classes, 'g_block4',
                     training)  # 64
        act5 = block(act4, target_class, gf_dim, num_classes, 'g_block5',
                     training)  # 128
        act5 = tf.nn.relu(
            tfgan.tpu.batch_norm(act5,
                                 training,
                                 conditional_class_labels=None,
                                 name='g_bn'))
        act6 = ops.snconv2d(act5, 3, 3, 3, 1, 1, training, 'g_snconv_last')
        out = tf.nn.tanh(act6)
    return out, attn_map
コード例 #10
0
def discriminator_without_condition(image,
                                    df_dim,
                                    update_collection=None,
                                    act=tf.nn.relu):
    """Builds the discriminator graph.

    Args:
      image: The current batch of images to classify as fake or real.
      df_dim: The df dimension.
      update_collection: The update collections used in the
                         spectral_normed_weight.
      act: The activation function used in the discriminator.
      scope: Optional scope for `variable_op_scope`.
    Returns:
      A `Tensor` representing the logits of the discriminator.
    """
    with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
        h0 = optimized_block(image,
                             df_dim,
                             'd_optimized_block1',
                             update_collection,
                             act=act)  # 64 * 64
        h1 = block(h0, df_dim * 2, 'd_block2', update_collection,
                   act=act)  # 32 * 32
        h1, attn = non_local.sn_non_local_block_sim(
            h1, update_collection, name='d_non_local')  # 32 * 32
        h2 = block(h1, df_dim * 4, 'd_block3', update_collection,
                   act=act)  # 16 * 16
        h3 = block(h2, df_dim * 8, 'd_block4', update_collection,
                   act=act)  # 8 * 8
        h4 = block(h3, df_dim * 16, 'd_block5', update_collection,
                   act=act)  # 4 * 4
        h5 = block(h4,
                   df_dim * 16,
                   'd_block6',
                   update_collection,
                   False,
                   act=act)
        h5_act = act(h5)
        h6 = tf.reduce_sum(h5_act, [1, 2])
        output = ops.snlinear(h6,
                              1,
                              update_collection=update_collection,
                              name='d_sn_linear')

        print('Discriminator without Condition with SA')

        return output, attn
コード例 #11
0
def generator_test_64(zs,
                      target_class,
                      gf_dim,
                      num_classes,
                      CGN=False,
                      CGN_groups=4,
                      is_training=True):
    """Builds the generator graph propagating from z to x.

  Args:
    zs: The list of noise tensors.
    target_class: The conditional labels in the generation.
    gf_dim: The gf dimension.
    num_classes: Number of classes in the labels.
    scope: Optional scope for `variable_op_scope`.

  Returns:
    outputs: The output layer of the generator.
  """

    with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
        # project `z` and reshape
        act0 = ops.snlinear(zs, gf_dim * 16 * 4 * 4, name='g_snh0')
        act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16])

        act1 = block(act0, target_class, gf_dim * 16, num_classes, is_training,
                     CGN, CGN_groups, 'g_block1')  # 8 * 8
        act2 = block(act1, target_class, gf_dim * 8, num_classes, is_training,
                     CGN, CGN_groups, 'g_block2')  # 16 * 16
        act3 = block(act2, target_class, gf_dim * 4, num_classes, is_training,
                     CGN, CGN_groups, 'g_block3')  # 32 * 32

        act4 = block(act3, target_class, gf_dim * 2, num_classes, is_training,
                     CGN, CGN_groups, 'g_block4')  # 64 * 64
        act4 = non_local.sn_non_local_block_sim(act4, None, name='g_non_local')
        act5 = block(act4, target_class, gf_dim, num_classes, is_training, CGN,
                     CGN_groups, 'g_block5')  # 128 * 128
        bn = ops.batch_norm(name='g_bn')

        act5 = tf.nn.relu(bn(act5, is_training))
        act6 = ops.snconv2d(act5, 3, 3, 3, 1, 1, name='g_snconv_last')
        out = tf.nn.tanh(act6)
        print('GAN test with moving average')
        return out
コード例 #12
0
    def __init__(self, z_size, channel, output_size=48):
        super(Generator, self).__init__()
        self.output_size = output_size
        s = 4
        if self.output_size == 48:
            s = 6
        self.s = s
        self.z_size = z_size
        self.fully_connect = snlinear(z_size, s * s * 512)
        self.relu = nn.ReLU()
        self.tanh = nn.Tanh()

        self.conv_res4 = snconv2d(64,
                                  channel,
                                  padding=1,
                                  kernel_size=3,
                                  stride=1)
        self.self_attn = Self_Attn(in_channels=256)

        self.re1 = Residual_G(512, 256, up_sampling=True)
        self.re2 = Residual_G(256, 128, up_sampling=True)
        self.re3 = Residual_G(128, 64, up_sampling=True)
        self.bn = nn.BatchNorm2d(64)
        self.apply(init_weights)
コード例 #13
0
ファイル: discriminator.py プロジェクト: vishal3477/COCO-GAN
    def forward(self, x, y=None, is_training=True):
        valid_sizes = {8, 16, 32, 64, 128, 256, 512}
        assert (self.macro_patch_size[0] in valid_sizes and self.macro_patch_size[1] in valid_sizes), \
            "I haven't test your macro patch size: {}".format(self.macro_patch_size)

        update_collection = self._get_update_collection(is_training)
        print(" [Build] Discriminator ; is_training: {}".format(is_training))

        with tf.variable_scope("D_discriminator", reuse=tf.AUTO_REUSE):

            num_resize_layers = int(
                math.log(min(self.macro_patch_size), 2) - 1)
            num_total_layers = num_resize_layers + self.num_extra_layers
            basic_layers = [2, 4, 8, 8]
            if num_total_layers > len(basic_layers):
                num_replicate_layers = num_total_layers - len(basic_layers)
                ndf_mult_list = [
                    1,
                ] * num_replicate_layers + basic_layers
            else:
                ndf_mult_list = basic_layers[-num_total_layers:]
                ndf_mult_list[0] = 1
            print("\t ndf_mult_list = {}".format(ndf_mult_list))

            # Stack extra layers without resize first
            h = x
            for idx, ndf_mult in enumerate(ndf_mult_list):
                n_ch = self.ndf_base * ndf_mult
                # Head is fixed and goes first
                if idx == 0:
                    is_head, resize, is_extra = True, True, False
                # Extra layers before standard layers
                elif idx <= self.num_extra_layers:
                    is_head, resize, is_extra = False, False, True
                # Last standard layer has no resize
                elif idx == len(ndf_mult_list) - 1:
                    is_head, resize, is_extra = False, False, False
                # Standard layers
                else:
                    is_head, resize, is_extra = False, True, False

                h = self._d_residual_block(h,
                                           n_ch,
                                           idx=idx,
                                           is_training=is_training,
                                           resize=resize,
                                           is_head=is_head)
                print(
                    "\t DResBlock: id={}, out_shape={}, resize={}, is_extra={}"
                    .format(idx, h.shape.as_list(), resize, is_extra))

            h = tf.nn.relu(h)
            h = tf.reduce_sum(h, axis=[1, 2])  # Global pooling
            last_feature_map = h
            adv_out = snlinear(h,
                               1,
                               'main_steam_out',
                               update_collection=update_collection)

            # Projection Discriminator
            if y is not None:
                h_num_ch = self.ndf_base * ndf_mult_list[-1]
                y_emb = snlinear(y,
                                 h_num_ch,
                                 'y_emb',
                                 update_collection=update_collection)
                proj_out = tf.reduce_sum(y_emb * h, axis=1, keepdims=True)
            else:
                proj_out = 0

            out = adv_out + proj_out

            return out, last_feature_map