def testKLDivergenceIsZero(self):
     """Tests that KL divergence of identical distributions is zero."""
     with self.session() as sess:
         mean = tf.random.normal((3, 3, 3))
         std = tf.random.normal((3, 3, 3))
         kl_divergence = dynamics.KLDivergence()([mean, std, mean, std])
         result = sess.run([kl_divergence])[0]
     np.testing.assert_array_equal(result, result * 0.0)
Esempio n. 2
0
 def testKLDivergenceAnnealing(self):
   inputs = tf.keras.Input(1)
   outputs = dynamics.KLDivergence(kl_annealing_steps=4)([
       inputs, inputs,
       tf.keras.layers.Lambda(lambda x: x + 1.0)(inputs), inputs
   ])
   model = tf.keras.Model(inputs=inputs, outputs=outputs)
   model.compile('sgd', 'mse')
   obtained_kl = [model.predict(x=[1], steps=1)]  # Should be zero before fit.
   for _ in range(5):
     model.fit(x=[1], y=[1], epochs=1, steps_per_epoch=1)
     obtained_kl.append(model.predict(x=[1], steps=1))
   obtained_kl = np.array(obtained_kl).ravel()
   np.testing.assert_array_almost_equal(
       obtained_kl, [0, 0.125, 0.25, 0.375, 0.5, 0.5])
Esempio n. 3
0
  def testNonzeroKLDivergence(self):
    """Test that KL divergence layer provides correct result."""
    mu = 2.0
    sigma = 2.0
    n = 3

    # https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#Multivariate_normal_distributions
    result_np = 0.5 * (sigma ** 2 + mu ** 2 - np.log(sigma ** 2) - 1) * n

    with self.session() as sess:
      kl_divergence = dynamics.KLDivergence()(
          [tf.zeros(n), tf.ones(n), tf.zeros(n) + mu, tf.ones(n) * sigma])
      result_tf = sess.run(kl_divergence)

    np.testing.assert_almost_equal(result_tf, result_np, decimal=4)