def generator(self, z, y, is_training=True, reuse=False): # Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657) # Architecture : FC1024_BR-FC7x7x128_BR-(64)4dc2s_BR-(1)4dc2s_S with tf.variable_scope('generator', reuse=reuse): # merge noise and condition label z = concat([z, y], axis=1) net = tf.nn.relu( batch_norm(linear(z, 1024, scope='g_fc1'), is_training=is_training, scope='g_bn1')) net = tf.nn.relu( batch_norm(linear(net, 128 * 7 * 7, scope='g_fc2'), is_training=is_training, scope='g_bn2')) net = tf.reshape(net, [self.batch_size, 7, 7, 128]) net = tf.nn.relu( batch_norm(deconv2d(net, [self.batch_size, 14, 14, 64], (4, 4), (2, 2), name='g_dc3'), is_training=is_training, scope='g_bn3')) out = tf.nn.sigmoid( deconv2d(net, [self.batch_size, 28, 28, 1], (4, 4), (2, 2), name='g_dc4')) return out
def classifier(self, x, is_training=True, reuse=False): # Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657) # Architecture : (64)5c2s-(128)5c2s_BL-FC1024_BL-FC128_BL-FC12S’ # All layers except the last two layers are shared by discriminator with tf.variable_scope("classifier", reuse=reuse): net = lrelu( batch_norm(linear(x, 128, scope='c_fc1'), is_training=is_training, scope='c_bn1')) out_logit = linear(net, self.y_dim, scope='c_fc2') out = tf.nn.softmax(out_logit) return out, out_logit
def discriminator(self, x, is_training=True, reuse=False): # Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657) # Architecture : (64)4c2s-(128)4c2s_BL-FC1024_BL-FC1_S with tf.variable_scope('discriminator', reuse=reuse): net = lrelu(conv2d(x, 64, (4, 4), (2, 2), name='d_conv1')) net = lrelu( batch_norm(conv2d(net, 128, (4, 4), (2, 2), name='d_conv2'), is_training=is_training, scope='d_bn2')) net = tf.reshape(net, [self.batch_size, -1]) net = lrelu( batch_norm(linear(net, 1024, scope='d_fc3'), is_training=is_training, scope='d_bn3')) out_logit = linear(net, 1, scope='d_fc4') out = tf.nn.sigmoid(out_logit) return out, out_logit, net