示例#1
0
  def test_four_layers_negative_padding(self):
    batch_size = 2
    input_size = 256

    images = tf.ones((batch_size, input_size, input_size, 3))
    with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
      with self.assertRaises(ValueError):
        pix2pix.pix2pix_discriminator(
            images, num_filters=[64, 128, 256, 512], padding=-1)
示例#2
0
  def test_four_layers_negative_padding(self):
    batch_size = 2
    input_size = 256

    images = tf.ones((batch_size, input_size, input_size, 3))
    with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
      with self.assertRaises(ValueError):
        pix2pix.pix2pix_discriminator(
            images, num_filters=[64, 128, 256, 512], padding=-1)
示例#3
0
    def test_four_layers_wrog_paddig(self):
        batch_size = 2
        input_size = 256

        images = tf.ones((batch_size, input_size, input_size, 3))
        with contrib_framework.arg_scope(pix2pix.pix2pix_arg_scope()):
            with self.assertRaises(TypeError):
                pix2pix.pix2pix_discriminator(images,
                                              num_filters=[64, 128, 256, 512],
                                              padding=1.5)
示例#4
0
  def test_nonsquare_inputs_raise_exception(self):
    batch_size = 2
    height, width = 240, 320
    num_outputs = 4

    images = tf.ones((batch_size, height, width, 3))

    with self.assertRaises(ValueError):
      with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
        pix2pix.pix2pix_generator(
            images, num_outputs, upsample_method='nn_upsample_conv')
示例#5
0
  def test_nonsquare_inputs_raise_exception(self):
    batch_size = 2
    height, width = 240, 320
    num_outputs = 4

    images = tf.ones((batch_size, height, width, 3))

    with self.assertRaises(ValueError):
      with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
        pix2pix.pix2pix_generator(
            images, num_outputs, upsample_method='nn_upsample_conv')
示例#6
0
  def test_output_size_conv2d_transpose(self):
    batch_size = 2
    height, width = 256, 256
    num_outputs = 4

    images = tf.ones((batch_size, height, width, 3))
    with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
      logits, _ = pix2pix.pix2pix_generator(
          images, num_outputs, blocks=self._reduced_default_blocks(),
          upsample_method='conv2d_transpose')

    with self.test_session() as session:
      session.run(tf.global_variables_initializer())
      np_outputs = session.run(logits)
      self.assertListEqual([batch_size, height, width, num_outputs],
                           list(np_outputs.shape))
示例#7
0
  def test_output_size_conv2d_transpose(self):
    batch_size = 2
    height, width = 256, 256
    num_outputs = 4

    images = tf.ones((batch_size, height, width, 3))
    with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
      logits, _ = pix2pix.pix2pix_generator(
          images, num_outputs, blocks=self._reduced_default_blocks(),
          upsample_method='conv2d_transpose')

    with self.test_session() as session:
      session.run(tf.global_variables_initializer())
      np_outputs = session.run(logits)
      self.assertListEqual([batch_size, height, width, num_outputs],
                           list(np_outputs.shape))
示例#8
0
    def test_four_layers_no_padding(self):
        batch_size = 2
        input_size = 256

        output_size = self._layer_output_size(input_size, pad=0)
        output_size = self._layer_output_size(output_size, pad=0)
        output_size = self._layer_output_size(output_size, pad=0)
        output_size = self._layer_output_size(output_size, stride=1, pad=0)
        output_size = self._layer_output_size(output_size, stride=1, pad=0)

        images = tf.ones((batch_size, input_size, input_size, 3))
        with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
            logits, end_points = pix2pix.pix2pix_discriminator(
                images, num_filters=[64, 128, 256, 512], padding=0)
        self.assertListEqual([batch_size, output_size, output_size, 1],
                             logits.shape.as_list())
        self.assertListEqual([batch_size, output_size, output_size, 1],
                             end_points['predictions'].shape.as_list())
示例#9
0
  def test_four_layers(self):
    batch_size = 2
    input_size = 256

    output_size = self._layer_output_size(input_size)
    output_size = self._layer_output_size(output_size)
    output_size = self._layer_output_size(output_size)
    output_size = self._layer_output_size(output_size, stride=1)
    output_size = self._layer_output_size(output_size, stride=1)

    images = tf.ones((batch_size, input_size, input_size, 3))
    with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
      logits, end_points = pix2pix.pix2pix_discriminator(
          images, num_filters=[64, 128, 256, 512])
    self.assertListEqual([batch_size, output_size, output_size, 1],
                         logits.shape.as_list())
    self.assertListEqual([batch_size, output_size, output_size, 1],
                         end_points['predictions'].shape.as_list())
示例#10
0
    def test_four_layers(self):
        batch_size = 2
        input_size = 256

        output_size = self._layer_output_size(input_size)
        output_size = self._layer_output_size(output_size)
        output_size = self._layer_output_size(output_size)
        output_size = self._layer_output_size(output_size, stride=1)
        output_size = self._layer_output_size(output_size, stride=1)

        images = tf.ones((batch_size, input_size, input_size, 3))
        with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
            logits, end_points = pix2pix.pix2pix_discriminator(
                images, num_filters=[64, 128, 256, 512])
        self.assertListEqual([batch_size, output_size, output_size, 1],
                             logits.shape.as_list())
        self.assertListEqual([batch_size, output_size, output_size, 1],
                             end_points['predictions'].shape.as_list())
示例#11
0
    def test_block_number_dictates_number_of_layers(self):
        batch_size = 2
        height, width = 256, 256
        num_outputs = 4

        images = tf.ones((batch_size, height, width, 3))
        blocks = [
            pix2pix.Block(64, 0.5),
            pix2pix.Block(128, 0),
        ]
        with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
            _, end_points = pix2pix.pix2pix_generator(images, num_outputs,
                                                      blocks)

        num_encoder_layers = 0
        num_decoder_layers = 0
        for end_point in end_points:
            if end_point.startswith('encoder'):
                num_encoder_layers += 1
            elif end_point.startswith('decoder'):
                num_decoder_layers += 1

        self.assertEqual(num_encoder_layers, len(blocks))
        self.assertEqual(num_decoder_layers, len(blocks))
示例#12
0
  def test_block_number_dictates_number_of_layers(self):
    batch_size = 2
    height, width = 256, 256
    num_outputs = 4

    images = tf.ones((batch_size, height, width, 3))
    blocks = [
        pix2pix.Block(64, 0.5),
        pix2pix.Block(128, 0),
    ]
    with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
      _, end_points = pix2pix.pix2pix_generator(
          images, num_outputs, blocks)

    num_encoder_layers = 0
    num_decoder_layers = 0
    for end_point in end_points:
      if end_point.startswith('encoder'):
        num_encoder_layers += 1
      elif end_point.startswith('decoder'):
        num_decoder_layers += 1

    self.assertEqual(num_encoder_layers, len(blocks))
    self.assertEqual(num_decoder_layers, len(blocks))