def testKLBatchBroadcast(self):
    batch_shape = [2]
    event_shape = [3]
    loc_a, scale_a = self._random_loc_and_scale(batch_shape, event_shape)
    # No batch shape.
    loc_b, scale_b = self._random_loc_and_scale([], event_shape)
    mvn_a = tfd.MultivariateNormalLinearOperator(
        loc=loc_a, scale=scale_a, validate_args=True)
    mvn_b = tfd.MultivariateNormalLinearOperator(
        loc=loc_b, scale=scale_b, validate_args=True)

    kl = tfd.kl_divergence(mvn_a, mvn_b)
    self.assertEqual(batch_shape, kl.shape)

    kl_v = self.evaluate(kl)
    expected_kl_0 = self._compute_non_batch_kl(
        loc_a[0, :],
        self.evaluate(scale_a.to_dense())[0, :, :], loc_b,
        self.evaluate(scale_b.to_dense()))
    expected_kl_1 = self._compute_non_batch_kl(
        loc_a[1, :],
        self.evaluate(scale_a.to_dense())[1, :, :], loc_b,
        self.evaluate(scale_b.to_dense()))
    self.assertAllClose(expected_kl_0, kl_v[0])
    self.assertAllClose(expected_kl_1, kl_v[1])
示例#2
0
 def testNamePropertyIsSetByInitArg(self):
     loc = [1., 2.]
     scale = tf.linalg.LinearOperatorIdentity(2)
     mvn = tfd.MultivariateNormalLinearOperator(loc,
                                                scale,
                                                name='Billy',
                                                validate_args=True)
     self.assertStartsWith(mvn.name, 'Billy')
 def testVariableLocation(self):
   loc = tf.Variable([1., 1.])
   scale = tf.linalg.LinearOperatorLowerTriangular(
       tf.eye(2), is_non_singular=True)
   d = tfd.MultivariateNormalLinearOperator(loc, scale, validate_args=True)
   self.evaluate(loc.initializer)
   with tf.GradientTape() as tape:
     lp = d.log_prob([0., 0.])
   self.assertIsNotNone(tape.gradient(lp, loc))
示例#4
0
  def testMeanAndCovariance(self):
    loc, scale = self._random_loc_and_scale(
        batch_shape=[3, 4], event_shape=[5])
    mvn = tfd.MultivariateNormalLinearOperator(loc, scale, validate_args=True)

    self.assertAllEqual(self.evaluate(mvn.mean()), loc)
    self.assertAllClose(
        self.evaluate(mvn.covariance()),
        self.evaluate(scale.matmul(scale, adjoint_arg=True).to_dense()))
  def testMeanAndCovariance(self):
    loc, scale = self._random_loc_and_scale(
        batch_shape=[3, 4], event_shape=[5])
    mvn = tfd.MultivariateNormalLinearOperator(loc, scale)

    self.assertAllEqual(self.evaluate(mvn.mean()), loc)
    self.assertAllClose(
        self.evaluate(mvn.covariance()),
        np.matmul(
            self.evaluate(scale.to_dense()),
            np.transpose(self.evaluate(scale.to_dense()), [0, 1, 3, 2])))
  def testShapes(self):
    loc = self.rng.rand(3, 5, 2)
    scale = tf.linalg.LinearOperatorLowerTriangular(
        self._random_tril_matrix([3, 5, 2, 2]), is_non_singular=True)

    mvn = tfd.MultivariateNormalLinearOperator(loc, scale, validate_args=True)

    # Shapes known at graph construction time.
    self.assertEqual((2,), tuple(tensorshape_util.as_list(mvn.event_shape)))
    self.assertEqual((3, 5), tuple(tensorshape_util.as_list(mvn.batch_shape)))

    # Shapes known at runtime.
    self.assertEqual((2,), tuple(self.evaluate(mvn.event_shape_tensor())))
    self.assertEqual((3, 5), tuple(self.evaluate(mvn.batch_shape_tensor())))
 def testVariableScaleAssertions(self):
   # We test that changing the scale to be non-invertible raises an exception
   # when validate_args is True. This is really just testing the underlying
   # AffineLinearOperator instance, but we include it to demonstrate that it
   # works as expected.
   loc = tf.constant([1., 1.])
   scale_tensor = tf.Variable(np.eye(2, dtype=np.float32))
   scale = tf.linalg.LinearOperatorLowerTriangular(
       scale_tensor,
       is_non_singular=True)
   d = tfd.MultivariateNormalLinearOperator(loc, scale, validate_args=True)
   self.evaluate(scale_tensor.initializer)
   with self.assertRaises(Exception):
     with tf.control_dependencies([scale_tensor.assign([[1., 0.], [1., 0.]])]):
       self.evaluate(d.sample())
示例#8
0
  def testMeanAndCovarianceAndVarAndStddevLocIsNone(self):
    loc = None

    scale_shape = (2, 4, 4)
    scale = tf.linalg.LinearOperatorLowerTriangular(
        self._random_tril_matrix(scale_shape), is_non_singular=True)

    mvn = tfd.MultivariateNormalLinearOperator(loc, scale, validate_args=True)

    self.assertAllEqual(
        self.evaluate(mvn.mean()), np.zeros((2, 4), dtype=np.float32))

    expected_cov = self.evaluate(
        scale.matmul(scale, adjoint_arg=True).to_dense())
    self.assertAllClose(self.evaluate(mvn.covariance()), expected_cov)

    expected_var = self.evaluate(tf.linalg.diag_part(expected_cov))
    self.assertAllClose(self.evaluate(mvn.variance()), expected_var)
    self.assertAllClose(self.evaluate(mvn.stddev()), expected_var**0.5)
  def testLogPDFScalarBatch(self):
    loc = self.rng.rand(2)
    scale = tf.linalg.LinearOperatorLowerTriangular(
        self._random_tril_matrix([2, 2]), is_non_singular=True)
    mvn = tfd.MultivariateNormalLinearOperator(loc, scale, validate_args=True)
    x = self.rng.rand(2)

    log_pdf = mvn.log_prob(x)
    pdf = mvn.prob(x)

    covariance = self.evaluate(
        tf.matmul(scale.to_dense(), scale.to_dense(), adjoint_b=True))
    scipy_mvn = stats.multivariate_normal(mean=loc, cov=covariance)

    expected_log_pdf = scipy_mvn.logpdf(x)
    expected_pdf = scipy_mvn.pdf(x)
    self.assertEqual((), log_pdf.shape)
    self.assertEqual((), pdf.shape)
    self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf))
    self.assertAllClose(expected_pdf, self.evaluate(pdf))
示例#10
0
  def testStatsBroadcastWhenLocIsBiggerThanScale(self):
    batch_shape = (3,)
    event_shape = (4,)
    loc_shape = batch_shape + event_shape
    loc = self.rng.randn(*loc_shape)

    scale_shape = event_shape * 2
    scale = tf.linalg.LinearOperatorLowerTriangular(
        self._random_tril_matrix(scale_shape),
        is_non_singular=True)

    mvn = tfd.MultivariateNormalLinearOperator(loc, scale, validate_args=True)

    self.assertAllEqual(self.evaluate(mvn.mean()), loc)
    cov = mvn.covariance()
    self.assertAllEqual(batch_shape + event_shape * 2, cov.shape)
    self.assertAllClose(
        self.evaluate(cov),
        self.evaluate(scale.matmul(scale, adjoint_arg=True).to_dense()) +
        np.zeros(loc_shape + (1,), dtype=np.float32))

    entropy = mvn.entropy()
    self.assertAllEqual(batch_shape, entropy.shape)
 def testRaisesIfScaleNotProvided(self):
   loc = self.rng.rand(2)
   with self.assertRaises(ValueError):
     tfd.MultivariateNormalLinearOperator(loc, scale=None, validate_args=True)