Exemple #1
0
    def test_four_layers_negative_padding(self):
        batch_size = 2
        input_size = 256

        images = tf.ones((batch_size, input_size, input_size, 3))
        with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
            with self.assertRaises(ValueError):
                pix2pix.pix2pix_discriminator(images,
                                              num_filters=[64, 128, 256, 512],
                                              padding=-1)
Exemple #2
0
def discriminator(image_batch, unused_conditioning=None):
    """A thin wrapper around the Pix2Pix discriminator to conform to TFGAN API."""
    with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
        logits_4d, _ = pix2pix.pix2pix_discriminator(
            image_batch, num_filters=[64, 128, 256, 512])
        logits_4d.shape.assert_has_rank(4)
    # Output of logits is 4D. Reshape to 2D, for TFGAN.
    logits_2d = tf.contrib.layers.flatten(logits_4d)

    return logits_2d
Exemple #3
0
    def test_four_layers_no_padding(self):
        batch_size = 2
        input_size = 256

        output_size = self._layer_output_size(input_size, pad=0)
        output_size = self._layer_output_size(output_size, pad=0)
        output_size = self._layer_output_size(output_size, pad=0)
        output_size = self._layer_output_size(output_size, stride=1, pad=0)
        output_size = self._layer_output_size(output_size, stride=1, pad=0)

        images = tf.ones((batch_size, input_size, input_size, 3))
        with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
            logits, end_points = pix2pix.pix2pix_discriminator(
                images, num_filters=[64, 128, 256, 512], padding=0)
        self.assertListEqual([batch_size, output_size, output_size, 1],
                             logits.shape.as_list())
        self.assertListEqual([batch_size, output_size, output_size, 1],
                             end_points['predictions'].shape.as_list())
Exemple #4
0
def discriminator(image_batch, unused_conditioning=None, depth=64):
    """A thin wrapper around the pix2pix discriminator to conform to TFGAN API."""
    logits, _ = pix2pix.pix2pix_discriminator(
        image_batch, num_filters=[depth, 2 * depth, 4 * depth, 8 * depth])
    return tf.layers.flatten(logits)