def resnet_generator(z, labels): with tf.variable_scope('generator'): embedding_map = tf.get_variable( name='embedding_map', shape=[1000, 100], initializer=tf.contrib.layers.xavier_initializer()) label_embedding = tf.nn.embedding_lookup(embedding_map, labels) noise_plus_labels = tf.concat([z, label_embedding], 1) linear = ops.linear(noise_plus_labels, G_DIM * 8 * 4 * 4, use_sn=True) linear = tf.reshape(linear, [-1, G_DIM * 8, 4, 4]) res1 = resnet_blocks.class_conditional_generator_block( linear, labels, G_DIM * 8, 1000, True, "res1") # 8x8 res2 = resnet_blocks.class_conditional_generator_block( res1, labels, G_DIM * 4, 1000, True, "res2") # 16x16 nl = non_local.sn_non_local_block_sim(res2, None, name='nl') res3 = resnet_blocks.class_conditional_generator_block( nl, labels, G_DIM * 2, 1000, True, "res3") # 32x32 res4 = resnet_blocks.class_conditional_generator_block( res3, labels, G_DIM, 1000, True, "res4") # 64x64 res4 = tf.layers.batch_normalization(res4, training=True) res4 = tf.nn.relu(res4) conv = ops.conv2d(res4, 3, 3, 3, 1, 1, name="conv", use_sn=True) conv = tf.nn.tanh(conv) return conv
def discriminator_test(image, labels, df_dim, number_classes, reuse_vars=False, update_collection=tf.GraphKeys.UPDATE_OPS, act=tf.nn.relu): if reuse_vars: tf.get_variable_scope().reuse_variables() h0 = OptimizedBlock(image, df_dim, 'd_optimized_block1', update_collection, act=act) h1 = Block(h0, df_dim * 2, 'd_block2', update_collection, act=act) h1 = non_local.sn_non_local_block_sim(h1, update_collection, name='d_non_local') h2 = Block(h1, df_dim * 4, 'd_block3', update_collection, act=act) h3 = Block(h2, df_dim * 8, 'd_block4', update_collection, act=act) h4 = Block(h3, df_dim * 16, 'd_block5', update_collection, act=act) h5 = Block(h4, df_dim * 16, 'd_block6', update_collection, False, act=act) h5_act = act(h5) h6 = tf.reduce_sum(h5_act, [1, 2]) output = ops.snlinear(h6, 1, update_collection=update_collection, name='d_sn_linear') h_labels = ops.sn_embedding(labels, number_classes, df_dim * 16, update_collection=update_collection, name='d_embedding') output += tf.reduce_sum(h6 * h_labels, axis=1, keepdims=True) return output
def resnet_discriminator(x, labels, reuse=False, use_sn=True): with tf.variable_scope('discriminator', reuse=reuse): res1 = resnet_blocks.discriminator_residual_block( x, D_DIM, True, "res1", use_sn=use_sn, reuse=reuse) # 32x32 res2 = resnet_blocks.discriminator_residual_block( res1, D_DIM * 2, True, "res2", use_sn=use_sn, reuse=reuse) # 16x16 nl = non_local.sn_non_local_block_sim(res2, None, name="nl") res3 = resnet_blocks.discriminator_residual_block( nl, D_DIM * 4, True, "res3", use_sn=use_sn, reuse=reuse) # 8x8 res4 = resnet_blocks.discriminator_residual_block( res3, D_DIM * 8, True, "res4", use_sn=use_sn, reuse=reuse) # 4x4 res5 = resnet_blocks.discriminator_residual_block( res4, D_DIM * 8, False, "res5", use_sn=use_sn, reuse=reuse) # 4x4 res5 = tf.nn.relu(res5) res5_chanels = tf.reduce_sum(res5, [2, 3]) f1_logit = ops.linear(res5_chanels, 1, scope="f1", use_sn=use_sn) embedding_map = tf.get_variable( name='embedding_map', shape=[1000, D_DIM * 8], initializer=tf.contrib.layers.xavier_initializer()) label_embedding = tf.nn.embedding_lookup(embedding_map, labels) f1_logit += tf.reduce_sum(res5_chanels * label_embedding, axis=1, keepdims=True) f1 = tf.nn.sigmoid(f1_logit) return f1, f1_logit, None
def generator_without_condition(zs, gf_dim, is_training=True): """Builds the generator graph propagating from z to x. Args: zs: The list of noise tensors. gf_dim: The gf dimension. scope: Optional scope for `variable_op_scope`. is_training: is training Returns: outputs: The output layer of the generator. """ with tf.variable_scope('model', reuse=tf.AUTO_REUSE): # project `z` and reshape act0 = ops.snlinear(zs, gf_dim * 16 * 4 * 4, name='g_snh0') act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16]) act1 = block_without_condition(act0, gf_dim * 16, is_training, 'g_block1') # 8 * 8 act2 = block_without_condition(act1, gf_dim * 8, is_training, 'g_block2') # 16 * 16 act3 = block_without_condition(act2, gf_dim * 4, is_training, 'g_block3') # 32 * 32 # act3 = non_local.sn_non_local_block_sim(act3, None, name='g_non_local') act4 = block_without_condition(act3, gf_dim * 2, is_training, 'g_block4') # 64 * 64 act4, attn = non_local.sn_non_local_block_sim(act4, None, name='g_non_local') act5 = block_without_condition(act4, gf_dim, is_training, 'g_block5') # 128 * 128 bn = ops.batch_norm(name='g_bn') act5 = tf.nn.relu(bn(act5, is_training)) act6 = ops.snconv2d(act5, 3, 3, 3, 1, 1, name='g_snconv_last') out = tf.nn.tanh(act6) print('Generator without Condition with SA') return out, attn
def discriminator_test(image, labels, df_dim, number_classes, update_collection=None, act=tf.nn.relu): """Builds the discriminator graph. Args: image: The current batch of images to classify as fake or real. labels: The corresponding labels for the images. df_dim: The df dimension. number_classes: The number of classes in the labels. update_collection: The update collections used in the spectral_normed_weight. act: The activation function used in the discriminator. scope: Optional scope for `variable_op_scope`. Returns: A `Tensor` representing the logits of the discriminator. """ with tf.variable_scope('model', reuse=tf.AUTO_REUSE): h0 = optimized_block(image, df_dim, 'd_optimized_block1', update_collection, act=act) # 64 * 64 h1 = block(h0, df_dim * 2, 'd_block2', update_collection, act=act) # 32 * 32 h1 = non_local.sn_non_local_block_sim(h1, update_collection, name='d_non_local') # 32 * 32 h2 = block(h1, df_dim * 4, 'd_block3', update_collection, act=act) # 16 * 16 h3 = block(h2, df_dim * 8, 'd_block4', update_collection, act=act) # 8 * 8 h4 = block(h3, df_dim * 16, 'd_block5', update_collection, act=act) # 4 * 4 h5 = block(h4, df_dim * 16, 'd_block6', update_collection, False, act=act) h5_act = act(h5) h6 = tf.reduce_sum(h5_act, [1, 2]) output = ops.snlinear(h6, 1, update_collection=update_collection, name='d_sn_linear') h_labels = ops.sn_embedding(labels, number_classes, df_dim * 16, update_collection=update_collection, name='d_embedding') output += tf.reduce_sum(h6 * h_labels, axis=1, keepdims=True) print('Discriminator Test Structure') return output
def discriminator_without_condition(image, df_dim, update_collection=None, act=tf.nn.relu): """Builds the discriminator graph. Args: image: The current batch of images to classify as fake or real. df_dim: The df dimension. update_collection: The update collections used in the spectral_normed_weight. act: The activation function used in the discriminator. scope: Optional scope for `variable_op_scope`. Returns: A `Tensor` representing the logits of the discriminator. """ with tf.variable_scope('model', reuse=tf.AUTO_REUSE): h0 = optimized_block(image, df_dim, 'd_optimized_block1', update_collection, act=act) # 64 * 64 h1 = block(h0, df_dim * 2, 'd_block2', update_collection, act=act) # 32 * 32 h1, attn = non_local.sn_non_local_block_sim( h1, update_collection, name='d_non_local') # 32 * 32 h2 = block(h1, df_dim * 4, 'd_block3', update_collection, act=act) # 16 * 16 h3 = block(h2, df_dim * 8, 'd_block4', update_collection, act=act) # 8 * 8 h4 = block(h3, df_dim * 16, 'd_block5', update_collection, act=act) # 4 * 4 h5 = block(h4, df_dim * 16, 'd_block6', update_collection, False, act=act) h5_act = act(h5) h6 = tf.reduce_sum(h5_act, [1, 2]) output = ops.snlinear(h6, 1, update_collection=update_collection, name='d_sn_linear') print('Discriminator without Condition with SA') return output, attn
def generator_test_64(zs, target_class, gf_dim, num_classes, CGN=False, CGN_groups=4, is_training=True): """Builds the generator graph propagating from z to x. Args: zs: The list of noise tensors. target_class: The conditional labels in the generation. gf_dim: The gf dimension. num_classes: Number of classes in the labels. scope: Optional scope for `variable_op_scope`. Returns: outputs: The output layer of the generator. """ with tf.variable_scope('model', reuse=tf.AUTO_REUSE): # project `z` and reshape act0 = ops.snlinear(zs, gf_dim * 16 * 4 * 4, name='g_snh0') act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16]) act1 = block(act0, target_class, gf_dim * 16, num_classes, is_training, CGN, CGN_groups, 'g_block1') # 8 * 8 act2 = block(act1, target_class, gf_dim * 8, num_classes, is_training, CGN, CGN_groups, 'g_block2') # 16 * 16 act3 = block(act2, target_class, gf_dim * 4, num_classes, is_training, CGN, CGN_groups, 'g_block3') # 32 * 32 act4 = block(act3, target_class, gf_dim * 2, num_classes, is_training, CGN, CGN_groups, 'g_block4') # 64 * 64 act4 = non_local.sn_non_local_block_sim(act4, None, name='g_non_local') act5 = block(act4, target_class, gf_dim, num_classes, is_training, CGN, CGN_groups, 'g_block5') # 128 * 128 bn = ops.batch_norm(name='g_bn') act5 = tf.nn.relu(bn(act5, is_training)) act6 = ops.snconv2d(act5, 3, 3, 3, 1, 1, name='g_snconv_last') out = tf.nn.tanh(act6) print('GAN test with moving average') return out
def generator_test(zs, target_class, gf_dim, num_classes, reuse_vars=False): if reuse_vars: tf.get_variable_scope().reuse_variables() act0 = sn_ops.snlinear(zs, gf_dim * 16 * 4 * 4, name='g_snh0') act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16]) act1 = Block(act0, target_class, gf_dim * 16, num_classes, 'g_block1') act2 = Block(act1, target_class, gf_dim * 8, num_classes, 'g_block2') act3 = Block(act2, target_class, gf_dim * 4, num_classes, 'g_block3') act3 = non_local.sn_non_local_block_sim(act3, None, name='g_non_local') act4 = Block(act3, target_class, gf_dim * 2, num_classes, 'g_block4') act5 = Block(act4, target_class, gf_dim, num_classes, 'g_block5') bn = sn_ops.BatchNorm(name='g_bn') act5 = tf.nn.relu(bn(act5)) act6 = sn_ops.snconv2d(act5, 3, 3, 3, 1, 1, name='g_snconv_last') out = tf.nn.tanh(act6) return out