예제 #1
0
def discriminator(image_batch, unused_conditioning=None):
    """A thin wrapper around the Pix2Pix discriminator to conform to TFGAN API."""
    with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
        logits_4d, _ = pix2pix.pix2pix_discriminator(
            image_batch, num_filters=[64, 128, 256, 512])
        logits_4d.shape.assert_has_rank(4)
    # Output of logits is 4D. Reshape to 2D, for TFGAN.
    logits_2d = tf.contrib.layers.flatten(logits_4d)

    return logits_2d
예제 #2
0
파일: networks.py 프로젝트: ALISCIFP/models
def discriminator(image_batch, unused_conditioning=None):
  """A thin wrapper around the Pix2Pix discriminator to conform to TFGAN API."""
  with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
    logits_4d, _ = pix2pix.pix2pix_discriminator(
        image_batch, num_filters=[64, 128, 256, 512])
    logits_4d.shape.assert_has_rank(4)
  # Output of logits is 4D. Reshape to 2D, for TFGAN.
  logits_2d = tf.contrib.layers.flatten(logits_4d)

  return logits_2d
예제 #3
0
    def test_four_layers_negative_padding(self):
        batch_size = 2
        input_size = 256

        images = tf.ones((batch_size, input_size, input_size, 3))
        with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
            with self.assertRaises(ValueError):
                pix2pix.pix2pix_discriminator(images,
                                              num_filters=[64, 128, 256, 512],
                                              padding=-1)
예제 #4
0
def pix2pix_D(image_batch, unused_conditioning=None):

    with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
        logits_4d, _ = pix2pix.pix2pix_discriminator(
            image_batch, num_filters=[64, 128, 256, 512])
        logits_4d.shape.assert_has_rank(4)

    # Output of logits is 4D. Reshape to 2D, for TFGAN.
    net = layers.flatten(logits_4d)
    # net = layers.fully_connected(net, 1024, normalizer_fn=layers.layer_norm)
    net = layers.fully_connected(
        net, 1024, activation_fn=None)  # default: normalizer_fn=None
    return net
예제 #5
0
    def test_output_size_conv2d_transpose(self):
        batch_size = 2
        height, width = 256, 256
        num_outputs = 4

        images = tf.ones((batch_size, height, width, 3))
        with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
            logits, _ = pix2pix.pix2pix_generator(
                images,
                num_outputs,
                blocks=self._reduced_default_blocks(),
                upsample_method='conv2d_transpose')

        with self.test_session() as session:
            session.run(tf.global_variables_initializer())
            np_outputs = session.run(logits)
            self.assertListEqual([batch_size, height, width, num_outputs],
                                 list(np_outputs.shape))
예제 #6
0
    def test_four_layers_no_padding(self):
        batch_size = 2
        input_size = 256

        output_size = self._layer_output_size(input_size, pad=0)
        output_size = self._layer_output_size(output_size, pad=0)
        output_size = self._layer_output_size(output_size, pad=0)
        output_size = self._layer_output_size(output_size, stride=1, pad=0)
        output_size = self._layer_output_size(output_size, stride=1, pad=0)

        images = tf.ones((batch_size, input_size, input_size, 3))
        with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
            logits, end_points = pix2pix.pix2pix_discriminator(
                images, num_filters=[64, 128, 256, 512], padding=0)
        self.assertListEqual([batch_size, output_size, output_size, 1],
                             logits.shape.as_list())
        self.assertListEqual([batch_size, output_size, output_size, 1],
                             end_points['predictions'].shape.as_list())
예제 #7
0
def pix2pix_G(input_images, is_training=True):

    blocks = [
        pix2pix.Block(64, 0.5),
        pix2pix.Block(128, 0.5),
        pix2pix.Block(256, 0.5),
        pix2pix.Block(512, 0),
        pix2pix.Block(512, 0),
    ]

    with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
        output_images, _ = pix2pix.pix2pix_generator(
            input_images,
            num_outputs=1,
            blocks=blocks,
            upsample_method='nn_upsample_conv',
            is_training=is_training)

    return (tf.tanh(output_images) + 1) / 2
예제 #8
0
    def test_block_number_dictates_number_of_layers(self):
        batch_size = 2
        height, width = 256, 256
        num_outputs = 4

        images = tf.ones((batch_size, height, width, 3))
        blocks = [
            pix2pix.Block(64, 0.5),
            pix2pix.Block(128, 0),
        ]
        with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
            _, end_points = pix2pix.pix2pix_generator(images, num_outputs,
                                                      blocks)

        num_encoder_layers = 0
        num_decoder_layers = 0
        for end_point in end_points:
            if end_point.startswith('encoder'):
                num_encoder_layers += 1
            elif end_point.startswith('decoder'):
                num_decoder_layers += 1

        self.assertEqual(num_encoder_layers, len(blocks))
        self.assertEqual(num_decoder_layers, len(blocks))