示例#1
0
 def testHalfCauchyKLDivergence(self):
     shape = (3, )
     regularizer = ed.regularizers.get('half_cauchy_kl_divergence')
     variational_posterior = ed.Independent(ed.LogNormal(
         loc=tf.zeros(shape), scale=1.).distribution,
                                            reinterpreted_batch_ndims=1)
     kl = regularizer(variational_posterior)
     kl_value = self.evaluate(kl)
     self.assertGreaterEqual(kl_value, 0.)
示例#2
0
 def testHalfCauchyKLDivergence(self):
   shape = (3,)
   regularizer = ed.regularizers.get('half_cauchy_kl_divergence')
   variational_posterior = ed.Independent(
       ed.LogNormal(loc=tf.zeros(shape), scale=1.).distribution,
       reinterpreted_batch_ndims=1)
   kl = regularizer(variational_posterior)
   # KL uses a single-sample estimate, which is not necessarily >0. We only
   # check shape.
   self.assertEqual(kl.shape, ())
示例#3
0
  def testLogNormalKLDivergence(self):
    shape = (3,)
    regularizer = ed.regularizers.get('log_normal_kl_divergence')
    variational_posterior = ed.Independent(
        ed.LogNormal(loc=tf.zeros(shape), scale=1.).distribution,
        reinterpreted_batch_ndims=1)
    kl = regularizer(variational_posterior)
    self.assertGreaterEqual(kl, 0.)

    dataset_size = 100
    scale_factor = 1. / dataset_size
    regularizer = ed.regularizers.LogNormalKLDivergence(
        scale_factor=scale_factor)
    scaled_kl = regularizer(variational_posterior)
    self.assertEqual(scale_factor * kl, scaled_kl)