Пример #1
0
  def test_conv3d(self):
    with tf.Graph().as_default():
      x = 10.0 * tf.random_uniform(shape=(16, 4, 5, 5, 32))

      with arg_scope([glow_ops.actnorm], init=True):
        conv3d = glow_ops.conv(
            "conv3d", x, output_channels=64, apply_actnorm=True)
        conv3d_zeros = glow_ops.conv(
            "conv3d_zeros", x, output_channels=64, apply_actnorm=False,
            conv_init="zeros")

      with tf.Session() as session:
        session.run(tf.global_variables_initializer())

        # test if apply_actnorm is set to True, the first minibatch has
        # zero mean and unit variance.
        conv3d_np, conv3d_zeros_np = session.run([conv3d, conv3d_zeros])
        self.assertEqual(conv3d_np.shape, (16, 4, 5, 5, 64))
        for i in range(4):
          curr_step = conv3d_np[:, i, :, :, :]
          mean = np.mean(curr_step, axis=(0, 1, 2))
          var = np.var(curr_step, axis=(0, 1, 2))
          self.assertTrue(np.allclose(mean, 0.0, atol=1e-5))
          self.assertTrue(np.allclose(var, 1.0, atol=1e-5))

        # test shape in case apply_actnorm is set to False,
        self.assertTrue(np.allclose(conv3d_zeros_np, 0.0))
Пример #2
0
    def test_conv2d(self):
        with tf.Graph().as_default():
            x = 10.0 * tf.random_uniform(shape=(16, 5, 5, 32))

            with arg_scope([glow_ops.actnorm], init=True):
                actnorm_conv2d = glow_ops.conv("actnorm_conv2d",
                                               x,
                                               output_channels=64,
                                               apply_actnorm=True)
                actnorm_zeros2d = glow_ops.conv("actnorm_zeros2d",
                                                x,
                                                output_channels=64,
                                                apply_actnorm=False)

            with tf.Session() as session:
                session.run(tf.global_variables_initializer())

                # test if apply_actnorm is set to True, the first minibatch has
                # zero mean and unit variance.
                actnorm_np, zeros_np = session.run(
                    [actnorm_conv2d, actnorm_zeros2d])
                self.assertEqual(actnorm_np.shape, (16, 5, 5, 64))
                mean = np.mean(actnorm_np, axis=(0, 1, 2))
                var = np.var(actnorm_np, axis=(0, 1, 2))
                self.assertTrue(np.allclose(mean, 0.0, atol=1e-5))
                self.assertTrue(np.allclose(var, 1.0, atol=1e-5))

                # test shape in case apply_actnorm is set to False,
                self.assertEqual(zeros_np.shape, (16, 5, 5, 64))
Пример #3
0
  def test_conv3d(self):
    with tf.Graph().as_default():
      x = 10.0 * tf.random_uniform(shape=(16, 4, 5, 5, 32))

      with arg_scope([glow_ops.actnorm], init=True):
        conv3d = glow_ops.conv(
            "conv3d", x, output_channels=64, apply_actnorm=True)
        conv3d_zeros = glow_ops.conv(
            "conv3d_zeros", x, output_channels=64, apply_actnorm=False,
            conv_init="zeros")

      with tf.Session() as session:
        session.run(tf.global_variables_initializer())

        # test if apply_actnorm is set to True, the first minibatch has
        # zero mean and unit variance.
        conv3d_np, conv3d_zeros_np = session.run([conv3d, conv3d_zeros])
        self.assertEqual(conv3d_np.shape, (16, 4, 5, 5, 64))
        for i in range(4):
          curr_step = conv3d_np[:, i, :, :, :]
          mean = np.mean(curr_step, axis=(0, 1, 2))
          var = np.var(curr_step, axis=(0, 1, 2))
          self.assertTrue(np.allclose(mean, 0.0, atol=1e-5))
          self.assertTrue(np.allclose(var, 1.0, atol=1e-5))

        # test shape in case apply_actnorm is set to False,
        self.assertTrue(np.allclose(conv3d_zeros_np, 0.0))
Пример #4
0
  def get_squeeze_prior(self):
    """Model the prior over z_{t} as a function of X_{t-1}.

    Returns:
      objective: float, log-likelihood.
      dist: instance of tfp.distributions.Normal.

    Raises:
      ValueError: If input_height is not equal to input_width, not even
                   or if the image width is smaller than the latent width.
    """
    _, prior_height, _, prior_channels = self.z_top_shape
    _, input_height, input_width, _ = common_layers.shape_list(self.input_frame)

    if input_height != input_width:
      raise ValueError("input height should be equal to input width")
    if input_height % 2 != 0:
      raise ValueError("input height should be even")
    if input_height < prior_height:
      raise ValueError("input should be larger than the prior.")

    # mean, log_std = NN(X_0)
    # Reduce the spatial dimension by a factor of "squeeze_factor".
    # and convolve with a stride of 2
    squeeze_factor = input_height // (2 * prior_height)
    x = glow_ops.squeeze(
        "prior_squeeze", self.input_frame, factor=squeeze_factor, reverse=False)
    mean_and_log_std = glow_ops.conv(
        "prior_conv", x, 2*prior_channels, stride=[2, 2], apply_actnorm=False,
        conv_init="zeros")
    mean, log_scale = tf.split(mean_and_log_std, num_or_size_splits=2, axis=-1)
    return tfp.distributions.Normal(mean, tf.exp(log_scale))
Пример #5
0
  def test_conv2d(self):
    with tf.Graph().as_default():
      x = 10.0 * tf.random_uniform(shape=(16, 5, 5, 32))

      with arg_scope([glow_ops.actnorm], init=True):
        actnorm_conv2d = glow_ops.conv(
            "actnorm_conv2d", x, output_channels=64, apply_actnorm=True)
        actnorm_zeros2d = glow_ops.conv(
            "actnorm_zeros2d", x, output_channels=64, apply_actnorm=False)

      with tf.Session() as session:
        session.run(tf.global_variables_initializer())

        # test if apply_actnorm is set to True, the first minibatch has
        # zero mean and unit variance.
        actnorm_np, zeros_np = session.run([actnorm_conv2d, actnorm_zeros2d])
        self.assertEqual(actnorm_np.shape, (16, 5, 5, 64))
        mean = np.mean(actnorm_np, axis=(0, 1, 2))
        var = np.var(actnorm_np, axis=(0, 1, 2))
        self.assertTrue(np.allclose(mean, 0.0, atol=1e-5))
        self.assertTrue(np.allclose(var, 1.0, atol=1e-5))

        # test shape in case apply_actnorm is set to False,
        self.assertEqual(zeros_np.shape, (16, 5, 5, 64))