Esempio n. 1
0
    def test_upsampling_is_correct(self, shape=(1, 2, 2, 3)):
        input_tensor = tf.random.normal(shape=shape)

        upsampled_tensor = keras_layers.TwoByTwoNearestNeighborUpSampling()(
            input_tensor)

        self.assertAllEqual(upsampled_tensor[:, ::2, ::2, :], input_tensor)
        self.assertAllEqual(upsampled_tensor[:, 1::2, ::2, :], input_tensor)
        self.assertAllEqual(upsampled_tensor[:, ::2, 1::2, :], input_tensor)
        self.assertAllEqual(upsampled_tensor[:, 1::2, 1::2, :], input_tensor)
Esempio n. 2
0
    def test_upsampling_blur_is_bilinear_upsampling(self, shape=(1, 4, 4, 3)):
        input_tensor = tf.random.normal(shape=shape)

        upsample_tensor = keras_layers.TwoByTwoNearestNeighborUpSampling()(
            input_tensor)
        blurred_tensor = keras_layers.Blur2D()(upsample_tensor)

        height, width = shape[1:3]
        bilinear_interpolated_tensor = tf.image.resize(
            input_tensor, (height * 2, width * 2),
            method=tf.image.ResizeMethod.BILINEAR)
        self.assertAllClose(blurred_tensor[:, 1:-1, 1:-1, :],
                            bilinear_interpolated_tensor[:, 1:-1, 1:-1, :])
def _maybe_upsample_and_add_outputs(
        current_level_output,
        previous_level_output: Optional[tf.Tensor] = None,
        use_bilinear_upsampling: bool = True):
    """Upsamples and adds previous and current level or returns current level.

  Args:
    current_level_output: Output of the current level.
    previous_level_output: Output of the previous level. If None
      current_level_output will be returned.
    use_bilinear_upsampling: If true bilinear upsampling is used else nearest
      neighbor.

  Returns:
    The sum of the upsampled previous level and the current level.
  """
    if previous_level_output is None:
        return current_level_output
    else:
        upsampled_output = keras_layers.TwoByTwoNearestNeighborUpSampling()(
            previous_level_output)
        if use_bilinear_upsampling:
            upsampled_output = keras_layers.Blur2D()(upsampled_output)
        return current_level_output + upsampled_output
def create_generator(
        latent_code_dimension: int = 128,
        upsampling_blocks_num_channels: Sequence[int] = (512, 256, 128, 64),
        relu_leakiness: float = 0.2,
        kernel_initializer: Optional[_KerasInitializer] = None,
        use_pixel_normalization: bool = True,
        use_batch_normalization: bool = False,
        generate_intermediate_outputs: bool = False,
        normalize_latent_code: bool = True,
        name: str = 'progressive_gan_generator') -> tf.keras.Model:
    """Creates a Keras model for the generator network architecture.

  This architecture is implemented according to the paper "Progressive growing
  of GANs for Improved Quality, Stability, and Variation"
  https://arxiv.org/abs/1710.10196
  The intermediate outputs are optionally provided for the architecture of
  "MSG-GAN: Multi-Scale Gradient GAN for Stable Image Synthesis"
  https://arxiv.org/abs/1903.06048

  Args:
    latent_code_dimension: The number of dimensions in the latent code.
    upsampling_blocks_num_channels: The number of channels for each upsampling
      block. This argument also determines how many upsampling blocks are added.
    relu_leakiness: Slope of the negative part of the leaky relu.
    kernel_initializer: Initializer of the kernel. If none TruncatedNormal is
      used.
    use_pixel_normalization: If pixel normalization layers should be inserted to
      the network.
    use_batch_normalization: If batch normalization layers should be inserted to
      the network.
    generate_intermediate_outputs: If true the model outputs a list of
      tf.Tensors with increasing resolution starting with the starting_size up
      to the final resolution output.
    normalize_latent_code: If true the latent code is normalized to unit length
      before feeding it to the network.
    name: The name of the Keras model.

  Returns:
     The created generator keras model object.
  """
    if kernel_initializer is None:
        kernel_initializer = tf.keras.initializers.TruncatedNormal(mean=0.0,
                                                                   stddev=1.0)

    input_tensor = tf.keras.Input(shape=(latent_code_dimension, ))
    if normalize_latent_code:
        maybe_normzlized_input_tensor = keras_layers.PixelNormalization(
            axis=1)(input_tensor)
    else:
        maybe_normzlized_input_tensor = input_tensor

    tensor = keras_layers.FanInScaledDense(
        multiplier=math.sqrt(2.0) / 4.0,
        units=4 * 4 * latent_code_dimension,
        kernel_initializer=kernel_initializer)(maybe_normzlized_input_tensor)
    tensor = tf.keras.layers.Reshape(
        target_shape=(4, 4, latent_code_dimension))(tensor)
    tensor = tf.keras.layers.LeakyReLU(alpha=relu_leakiness)(tensor)
    if use_batch_normalization:
        tensor = tf.keras.layers.BatchNormalization()(tensor)
    if use_pixel_normalization:
        tensor = keras_layers.PixelNormalization(axis=3)(tensor)
    tensor = keras_layers.FanInScaledConv2D(
        filters=upsampling_blocks_num_channels[0],
        kernel_size=3,
        strides=1,
        padding='same',
        kernel_initializer=kernel_initializer)(tensor)
    tensor = tf.keras.layers.LeakyReLU(alpha=relu_leakiness)(tensor)
    if use_batch_normalization:
        tensor = tf.keras.layers.BatchNormalization()(tensor)
    if use_pixel_normalization:
        tensor = keras_layers.PixelNormalization(axis=3)(tensor)

    outputs = []
    for index, channels in enumerate(upsampling_blocks_num_channels):
        if generate_intermediate_outputs:
            outputs.append(
                to_rgb(input_tensor=tensor,
                       kernel_initializer=kernel_initializer,
                       name='side_output_%d_conv' % index))
        tensor = keras_layers.TwoByTwoNearestNeighborUpSampling()(tensor)

        for _ in range(2):
            tensor = keras_layers.FanInScaledConv2D(
                filters=channels,
                kernel_size=3,
                strides=1,
                padding='same',
                kernel_initializer=kernel_initializer)(tensor)
            tensor = tf.keras.layers.LeakyReLU(alpha=relu_leakiness)(tensor)
            if use_batch_normalization:
                tensor = tf.keras.layers.BatchNormalization()(tensor)
            if use_pixel_normalization:
                tensor = keras_layers.PixelNormalization(axis=3)(tensor)

    tensor = to_rgb(input_tensor=tensor,
                    kernel_initializer=kernel_initializer,
                    name='final_output')
    if generate_intermediate_outputs:
        outputs.append(tensor)

        return tf.keras.Model(inputs=input_tensor, outputs=outputs, name=name)
    else:
        return tf.keras.Model(inputs=input_tensor, outputs=tensor, name=name)
def create_synthesis_network(latent_code_dimension: int = 128,
                             upsampling_blocks_num_channels: Sequence[int] = (
                                 512, 256, 128, 64),
                             relu_leakiness: float = 0.2,
                             generate_intermediate_outputs: bool = False,
                             use_bilinear_upsampling: bool = True,
                             name: str = 'synthesis') -> tf.keras.Model:
  """Creates the synthesis network using the functional API.

  The function creates the synthesis network as defined in "A Style-Based
  Generator Architecture for Generative Adversarial Networks"
  https://arxiv.org/abs/1812.04948 using the Keras functional API.

  Args:
    latent_code_dimension: The number of dimensions in the latent code.
    upsampling_blocks_num_channels: The number of channels for each upsampling
      block. This argument also determines how many upsampling blocks are added.
    relu_leakiness: Slope of the negative part of the leaky relu.
    generate_intermediate_outputs: If true the model outputs a list of
      tf.Tensors with increasing resolution starting with the starting_size up
      to the final resolution output.
    use_bilinear_upsampling: If true bilinear upsampling is used.
    name: The name of the Keras model.

  Returns:
    The synthesis network.
  """

  kernel_initializer = tf.keras.initializers.TruncatedNormal(
      mean=0.0, stddev=1.0)

  mapped_latent_code_input = tf.keras.Input(shape=(latent_code_dimension,))

  tensor = keras_layers.LearnedConstant()(mapped_latent_code_input)
  tensor = keras_layers.Noise()(tensor)
  tensor = tf.keras.layers.LeakyReLU(alpha=relu_leakiness)(tensor)
  tensor = apply_style_with_adain(
      mapped_latent_code=mapped_latent_code_input, input_tensor=tensor)
  tensor = keras_layers.FanInScaledConv2D(
      filters=upsampling_blocks_num_channels[0],
      kernel_size=3,
      strides=1,
      padding='same',
      kernel_initializer=kernel_initializer)(
          tensor)
  tensor = keras_layers.Noise()(tensor)
  tensor = tf.keras.layers.LeakyReLU(alpha=relu_leakiness)(tensor)
  tensor = apply_style_with_adain(
      mapped_latent_code=mapped_latent_code_input, input_tensor=tensor)

  outputs = []
  for index, channels in enumerate(upsampling_blocks_num_channels):
    if generate_intermediate_outputs:
      outputs.append(
          architectures_progressive_gan.to_rgb(
              input_tensor=tensor,
              kernel_initializer=kernel_initializer,
              name='side_output_%d_conv' % index))
    tensor = keras_layers.TwoByTwoNearestNeighborUpSampling()(tensor)
    if use_bilinear_upsampling:
      tensor = keras_layers.Blur2D()(tensor)
    for _ in range(2):
      tensor = keras_layers.FanInScaledConv2D(
          filters=channels,
          kernel_size=3,
          strides=1,
          padding='same',
          kernel_initializer=kernel_initializer)(
              tensor)
      tensor = keras_layers.Noise()(tensor)
      tensor = tf.keras.layers.LeakyReLU(alpha=relu_leakiness)(tensor)
      tensor = apply_style_with_adain(
          mapped_latent_code=mapped_latent_code_input, input_tensor=tensor)

  tensor = architectures_progressive_gan.to_rgb(
      input_tensor=tensor,
      kernel_initializer=kernel_initializer,
      name='final_output')
  if generate_intermediate_outputs:
    outputs.append(tensor)

    return tf.keras.Model(
        inputs=mapped_latent_code_input, outputs=outputs, name=name)
  else:
    return tf.keras.Model(
        inputs=mapped_latent_code_input, outputs=tensor, name=name)
def create_synthesis_network(
        latent_code_dimension: int = 128,
        upsampling_blocks_num_channels: Sequence[int] = (512, 256, 128, 64),
        relu_leakiness: float = 0.2,
        use_bilinear_upsampling: bool = True,
        use_noise_inputs: bool = False,
        name: str = 'synthesis'):
    """Creates the synthesis network using the functional API.

  The function creates the synthesis network as defined in "Analyzing and
  Improving the Image Quality of StyleGAN" https://arxiv.org/pdf/1912.04958.pdf
  using the Keras functional API.

  Args:
    latent_code_dimension: The number of dimensions in the latent code.
    upsampling_blocks_num_channels: The number of channels for each upsampling
      block. This argument also determines how many upsampling blocks are added.
    relu_leakiness: Slope of the negative part of the leaky relu.
    use_bilinear_upsampling: If true bilinear upsampling is used.
    use_noise_inputs: If the model takes noise as input, if false noise is
      sampled randomly.
    name: The name of the Keras model.

  Returns:
    The synthesis network.
  """
    kernel_initializer = tf.keras.initializers.TruncatedNormal(mean=0.0,
                                                               stddev=1.0)
    mapped_latent_code_input = tf.keras.Input(shape=(latent_code_dimension, ))

    if use_noise_inputs:
        noise_inputs = _create_noise_inputs(
            len(upsampling_blocks_num_channels))

    tensor = keras_layers.LearnedConstant()(mapped_latent_code_input)
    tensor = keras_layers.DemodulatedConvolution(
        filters=upsampling_blocks_num_channels[0], kernel_size=3)(
            (tensor, mapped_latent_code_input))
    if use_noise_inputs:
        tensor = keras_layers.Noise()((tensor, noise_inputs[0]))
    else:
        tensor = keras_layers.Noise()(tensor)
    tensor = tf.keras.layers.LeakyReLU(alpha=relu_leakiness)(tensor)

    output = None
    for index, channels in enumerate(upsampling_blocks_num_channels):
        output = _maybe_upsample_and_add_outputs(
            architectures_progressive_gan.to_rgb(
                input_tensor=tensor,
                kernel_initializer=kernel_initializer,
                name='side_output_%d_conv' % index),
            output,
            use_bilinear_upsampling=use_bilinear_upsampling)
        tensor = keras_layers.TwoByTwoNearestNeighborUpSampling()(tensor)
        if use_bilinear_upsampling:
            tensor = keras_layers.Blur2D()(tensor)
        for inner_index in range(2):
            tensor = keras_layers.DemodulatedConvolution(
                filters=channels, kernel_size=3)(
                    (tensor, mapped_latent_code_input))
            if use_noise_inputs:
                noise_index = 2 * index + inner_index + 1
                tensor = keras_layers.Noise()(
                    (tensor, noise_inputs[noise_index]))
            else:
                tensor = keras_layers.Noise()(tensor)
            tensor = tf.keras.layers.LeakyReLU(alpha=relu_leakiness)(tensor)

    output = _maybe_upsample_and_add_outputs(
        architectures_progressive_gan.to_rgb(
            input_tensor=tensor, kernel_initializer=kernel_initializer),
        output,
        use_bilinear_upsampling=use_bilinear_upsampling)

    if use_noise_inputs:
        inputs = [mapped_latent_code_input] + noise_inputs
    else:
        inputs = mapped_latent_code_input

    return tf.keras.Model(inputs=inputs, outputs=output, name=name)