def discriminator(x, args, reuse=False): """Adds discriminator nodes to the graph. From the input image, successively applies convolutions with striding to scale down layer sizes until we get to a single output value, representing the discriminator's estimate of fake vs real. The single final output acts similar to a sigmoid activation function. Args: x: Tensor, input. args: Argparse structure. reuse: Boolean, whether to reuse variables. Returns: Final output of discriminator pipeline. """ use_bn = False if args.model == 'iwgan' else True final_activation = None if args.model in ['wgan', 'iwgan' ] else tf.nn.sigmoid with arg_scope([conv2d], use_batch_norm=use_bn, activation=lrelu, reuse=reuse): x = tf.reshape(x, [-1, 64, 64, 3]) x = conv2d(x, 3, args.latent_size, 5, 2, name='c1', use_batch_norm=False) x = conv2d(x, args.latent_size, args.latent_size * 2, 5, 2, name='c2') x = conv2d(x, args.latent_size * 2, args.latent_size * 4, 5, 2, name='c3') x = tf.reshape(x, [-1, 4 * 4 * 4 * args.latent_size]) x = dense(x, 4 * 4 * 4 * args.latent_size, 1, use_batch_norm=False, activation=final_activation, name='fc2', reuse=reuse) x = tf.reshape(x, [-1]) return x
def _discriminator(inputs, reuse): d = Ck(inputs, 64, reuse, is_norm=False, name='C64') # [bs, 128, 128, 64] d = Ck(d, 128, reuse, is_norm=True, name='C128') # [bs, 64, 64, 128] d = Ck(d, 256, reuse, is_norm=True, name='C256') # [bs, 32, 32, 256] d = Ck(d, 512, reuse, is_norm=True, name='C512') # [bs, 16, 16, 512] d = conv2d(d, 1, 4, stride=1, reuse=reuse, name='last_conv') logits = d return logits
def Ck(inputs, k, reuse, is_norm=True, name='Ck'): """ Ck is a 4*4 Convolution-InstanceNorm-LeakyReLU layer with k filters and stride 2. """ with tf.variable_scope(name, reuse=reuse): h = conv2d(inputs, k, 4, stride=2, name='conv') if is_norm: h = instance_norm(h, name='instance_norm') h = leakly_relu(h, 0.2, name='leakly_relu') return h
def dk(inputs, k, reuse, name='dk'): """ dk is a 3*3 Convolution-InstanceNorm-ReLU layer with k filters and stride 2. """ with tf.variable_scope(name, reuse=reuse): h = conv2d(inputs, k, 3, stride=2, name='conv') h = instance_norm(h, name='instance_norm') h = tf.nn.relu(h, name='relu') return h
def decoder(x, latent_size, reuse=False): """Adds decoder nodes to the graph. Args: x: Tensor, encoded image representation. latent_size: Integer, size of latent vector. reuse: Boolean, whether to reuse variables. """ with arg_scope([dense, conv2d, deconv2d], reuse = reuse, activation = tf.nn.relu): x = dense(x, latent_size, 32*4*4, name='d1') x = tf.reshape(x, [-1, 4, 4, 32]) # un-flatten x = conv2d(x, 32, 96, 1, name='c1') x = conv2d(x, 96, 256, 1, name='c2') x = deconv2d(x, 256, 256, 5, 2, name='dc1') x = deconv2d(x, 256, 128, 5, 2, name='dc2') x = deconv2d(x, 128, 64, 5, 2, name='dc3') x = deconv2d(x, 64, 3, 5, 2, name='dc4', activation=tf.nn.tanh) return x
def __call__(self, x, growing_step, reuse=False, *args, **kwargs): with tf.variable_scope(self.name) as vs: if reuse: vs.reuse_variables() for block in self.blocks[:growing_step + 1]: x = block(inputs=x) with tf.variable_scope('toRGB_%d' % growing_step): x = conv2d(x, self.channel, activation_='tanh') return x
def __call__(self, x, growing_step, reuse=False, *args, **kwargs): with tf.variable_scope(self.name) as vs: if reuse: vs.reuse_variables() with tf.variable_scope('fromRGB_%d' % growing_step): f = self.filters[growing_step] x = conv2d(x, f, activation_='tanh') for block in self.blocks[:growing_step + 1][::-1]: x = block(inputs=x) x = flatten(x) x = self.dense(x) return x
def encoder(x, reuse=False): """Adds encoder nodes to the graph. Args: x: Tensor, input images. reuse: Boolean, whether to reuse variables. """ with arg_scope([conv2d], reuse = reuse, activation = lrelu): x = conv2d(x, 3, 64, 5, 2, name='c1') x = conv2d(x, 64, 128, 5, 2, name='c2') x = conv2d(x, 128, 256, 5, 2, name='c3') x = conv2d(x, 256, 256, 5, 2, name='c4') x = conv2d(x, 256, 96, 1, name='c5') x = conv2d(x, 96, 32, 1, name='c6') return x