Exemple #1
0
    def testNormalNormalSample(self):
        # Standard normal prior.
        # Samples are shape [2].
        normal_prior = tfd.Normal(self.dtype([0., 0.]), self.dtype(1.))

        def normal_sampler(seed):
            return normal_prior.sample(seed=seed)

        # A single data point at the mode.
        # The state is expected to be 2 dimensional, so
        # we reduce sum on the last axis.
        def normal_log_likelihood(state):
            return tf.reduce_sum(tfd.Normal(state, self.dtype(2.)).log_prob(
                self.dtype(0.)),
                                 axis=-1)

        kernel = elliptical.EllipticalSliceSampler(
            normal_sampler_fn=normal_sampler,
            log_likelihood_fn=normal_log_likelihood,
            seed=tfp_test_util.test_seed(),
        )

        samples = tf.function(lambda: tfp.mcmc.sample_chain(  # pylint: disable=g-long-lambda
            num_results=int(3e5),
            current_state=self.dtype(np.random.randn(2)),
            kernel=kernel,
            num_burnin_steps=int(1e4),
            parallel_iterations=1,
            trace_fn=None))()

        mean, variance = self.evaluate(tf.nn.moments(samples, axes=[0]))
        # Computed exactly from the formula in normal-normal posterior.
        self.assertAllClose([0., 0.], mean, rtol=5e-2, atol=6e-3)
        self.assertAllClose([4. / 5, 4. / 5], variance, rtol=5e-2)
Exemple #2
0
    def testSampleChainSeedReproducible(self):
        normal_prior = tfd.Normal(5 * [[0., 0.]], 1.)

        def normal_sampler(seed):
            return normal_prior.sample(seed=seed)

        def normal_log_likelihood(state):
            return tf.reduce_sum(tfd.Normal(state, 2.).log_prob(0.), axis=-1)

        num_results = 10
        seed = tfp_test_util.test_seed()

        current_state = np.float32(np.random.rand(5, 2))
        samples0 = tf.function(lambda: tfp.mcmc.sample_chain(  # pylint: disable=g-long-lambda
            num_results=2 * num_results,
            num_steps_between_results=0,
            current_state=current_state,
            kernel=elliptical.EllipticalSliceSampler(
                normal_sampler_fn=normal_sampler,
                log_likelihood_fn=normal_log_likelihood,
                seed=seed),
            num_burnin_steps=150,
            trace_fn=None,
            parallel_iterations=1))()

        samples1 = tf.function(lambda: tfp.mcmc.sample_chain(  # pylint: disable=g-long-lambda
            num_results=num_results,
            num_steps_between_results=1,
            current_state=current_state,
            kernel=elliptical.EllipticalSliceSampler(
                normal_sampler_fn=normal_sampler,
                log_likelihood_fn=normal_log_likelihood,
                seed=seed),
            trace_fn=None,
            num_burnin_steps=150,
            parallel_iterations=1))()
        samples0_, samples1_ = self.evaluate([samples0, samples1])

        self.assertAllClose(samples0_[::2], samples1_, atol=1e-5, rtol=1e-5)
Exemple #3
0
    def testNormalNormalSampleMultipleDatapoints(self):
        # Two independent chains, of states of shape [3].
        prior_stddev = self.dtype(np.exp(np.random.rand(2, 3)))

        likelihood_stddev = self.dtype(np.exp(np.random.rand(2, 3)))
        # 10 data points.
        data = self.dtype(np.random.randn(10, 2, 3))

        # Standard normal prior.
        normal_prior = tfd.Normal(self.dtype(0.), prior_stddev)

        def normal_sampler(seed):
            return normal_prior.sample(seed=seed)

        # 10 samples at 2 chains.
        def normal_log_likelihood(state):
            return tf.reduce_sum(
                tfd.Normal(state, likelihood_stddev).log_prob(data),
                axis=[0, -1],
            )

        kernel = elliptical.EllipticalSliceSampler(
            normal_sampler_fn=normal_sampler,
            log_likelihood_fn=normal_log_likelihood,
            seed=tfp_test_util.test_seed(),
        )

        samples = tf.function(lambda: tfp.mcmc.sample_chain(  # pylint: disable=g-long-lambda
            num_results=int(3e5),
            current_state=self.dtype(np.random.randn(2, 3)),
            kernel=kernel,
            num_burnin_steps=int(1e4),
            parallel_iterations=1,
            trace_fn=None))()

        mean, variance = self.evaluate(tf.nn.moments(samples, axes=[0]))
        posterior_mean, posterior_variance = normal_normal_posterior(
            prior_mean=0.,
            prior_stddev=prior_stddev,
            likelihood_stddev=likelihood_stddev,
            data=data)
        # Computed exactly from the formula in normal-normal posterior.
        self.assertAllClose(posterior_mean, mean, rtol=2e-2, atol=6e-3)
        self.assertAllClose(posterior_variance, variance, rtol=5e-2)