Пример #1
0
    def testNormalConjugateKnownSigmaNDPosteriorND(self):
        with tf1.Session():
            batch_size = 6
            mu0 = tf.constant([[3.0, -3.0]] * batch_size)
            sigma0 = tf.constant(
                [[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
            sigma = tf.constant(
                [[math.sqrt(2.0), math.sqrt(4.0)]] * batch_size)
            x = tf.constant([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0],
                             [2.5, -2.5, -4.0, 0.0, 1.0, -2.0]],
                            dtype=tf.float32)
            s = tf.reduce_sum(x, axis=[1])
            x = tf.transpose(a=x)  # Reshape to shape (6, 2)
            n = tf.constant([6] * 2)
            prior = tfd.Normal(loc=mu0, scale=sigma0)
            posterior = tfd.normal_conjugates_known_scale_posterior(
                prior=prior, scale=sigma, s=s, n=n)

            # Smoke test
            self.assertTrue(isinstance(posterior, tfd.Normal))

            # Calculate log_pdf under the 2 models
            posterior_log_pdf = posterior.log_prob(x)
            self.assertEqual(posterior_log_pdf.shape, (6, 2))
            self.assertEqual(self.evaluate(posterior_log_pdf).shape, (6, 2))
Пример #2
0
    def testNormalConjugateKnownSigmaPosterior(self):
        with tf1.Session():
            mu0 = tf.constant([3.0])
            sigma0 = tf.constant([math.sqrt(10.0)])
            sigma = tf.constant([math.sqrt(2.0)])
            x = tf.constant([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0])
            s = tf.reduce_sum(x)
            n = tf.size(x)
            prior = tfd.Normal(loc=mu0, scale=sigma0)
            posterior = tfd.normal_conjugates_known_scale_posterior(
                prior=prior, scale=sigma, s=s, n=n)

            # Smoke test
            self.assertTrue(isinstance(posterior, tfd.Normal))
            posterior_log_pdf = self.evaluate(posterior.log_prob(x))
            self.assertEqual(posterior_log_pdf.shape, (6, ))
Пример #3
0
  def testNormalConjugateKnownSigmaPosteriorND(self):
    with tf1.Session():
      batch_size = 6
      mu0 = tf.constant([[3.0, -3.0]] * batch_size)
      sigma0 = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
      sigma = tf.constant([[math.sqrt(2.0)]] * batch_size)
      x = tf.transpose(
          a=tf.constant([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=tf.float32))
      s = tf.reduce_sum(x)
      n = tf.size(x)
      prior = tfd.Normal(loc=mu0, scale=sigma0)
      posterior = tfd.normal_conjugates_known_scale_posterior(
          prior=prior, scale=sigma, s=s, n=n)

      # Smoke test
      self.assertTrue(isinstance(posterior, tfd.Normal))
      posterior_log_pdf = self.evaluate(posterior.log_prob(x))
      self.assertEqual(posterior_log_pdf.shape, (6, 2))
Пример #4
0
    def testMVNConjugateLinearUpdatePreservesStructuredLinops(self):
        strm = test_util.test_seed_stream()
        num_outputs = 4

        prior_scale = tf.linalg.LinearOperatorScaledIdentity(num_outputs, 4.)
        likelihood_scale = tf.linalg.LinearOperatorScaledIdentity(
            num_outputs, 0.2)
        linear_transformation = tf.linalg.LinearOperatorIdentity(num_outputs)
        observation = tf.random.normal([num_outputs], seed=strm())
        posterior_mean, posterior_prec = (tfd.mvn_conjugate_linear_update(
            prior_scale=prior_scale,
            linear_transformation=linear_transformation,
            likelihood_scale=likelihood_scale,
            observation=observation))
        # TODO(davmre): enable next line once internal CI is updated to recent TF.
        # self.assertIsInstance(posterior_prec,
        #                       tf.linalg.LinearOperatorScaledIdentity)

        self._mvn_linear_update_test_helper(
            prior_mean=tf.zeros([num_outputs]),
            prior_scale=prior_scale.to_dense(),
            linear_transformation=linear_transformation.to_dense(),
            likelihood_scale=likelihood_scale.to_dense(),
            observation=observation,
            candidate_posterior_mean=posterior_mean,
            candidate_posterior_prec=posterior_prec.to_dense())

        # Also check the result against the scalar calculation.
        scalar_posterior_dist = tfd.normal_conjugates_known_scale_posterior(
            prior=tfd.Normal(loc=0., scale=prior_scale.diag_part()),
            scale=likelihood_scale.diag_part(),
            s=observation,
            n=1)
        (posterior_mean_, posterior_prec_, scalar_posterior_mean_,
         scalar_posterior_prec_) = self.evaluate(
             (posterior_mean, posterior_prec.to_dense(),
              scalar_posterior_dist.mean(),
              tf.linalg.diag(1. / scalar_posterior_dist.variance())))
        self.assertAllClose(posterior_mean_, scalar_posterior_mean_)
        self.assertAllClose(posterior_prec_, scalar_posterior_prec_)