def encode(self, inputs, target_space, hparams, features=None, losses=None): """Add two layers strided convolutions ontop of encode.""" inputs = common_layers.conv_block(inputs, hparams.hidden_size, [((1, 1), (3, 3))], first_relu=False, padding="SAME", force2d=True, name="small_image_conv") hparams.num_compress_steps = 2 compressed_inputs = transformer_vae.compress(inputs, None, is_2d=True, hparams=hparams, name="convolutions") return super(TransformerSketch, self).encode(compressed_inputs, target_space, hparams, features=features, losses=losses)
def discriminator(x, compress, hparams, name, reuse=None): with tf.variable_scope(name, reuse=reuse): x = tf.stop_gradient(2 * x) - x # Reverse gradient. if compress: x = transformer_vae.compress(x, None, False, hparams, "compress") else: x = transformer_vae.residual_conv(x, 1, 3, hparams, "compress_rc") y = tf.reduce_mean(x, axis=1) return tf.tanh(tf.layers.dense(y, 1, name="reduce"))
def discriminator(x, compress, hparams, name, reuse=None): with tf.variable_scope(name, reuse=reuse): x = tf.stop_gradient(2 * x) - x # Reverse gradient. if compress: x = transformer_vae.compress(x, None, False, hparams, "compress") else: x = transformer_vae.residual_conv(x, 1, 3, hparams, "compress_rc") y = tf.reduce_mean(x, axis=1) return tf.tanh(tf.layers.dense(y, 1, name="reduce"))
def encode(self, inputs, target_space, hparams): """Add two layers strided convolutions ontop of encode.""" inputs = common_layers.conv_block( inputs, hparams.hidden_size, [((1, 1), (3, 3))], first_relu=False, padding="SAME", force2d=True, name="small_image_conv") hparams.num_compress_steps = 2 compressed_inputs = transformer_vae.compress(inputs, is_2d=True, hparams=hparams, name="convolutions") return super(TransformerSketch, self).encode( compressed_inputs, target_space, hparams)