Esempio n. 1
0
    def testCorrectStepSizeTransformedkernel(self):
        scalings = .1
        bijector = tfb.Sigmoid()
        prior = tfd.Beta(.1, .1)
        likelihood = tfd.Beta(5., 5.)
        init_state = [tf.clip_by_value(prior.sample(10000), 1e-5, 1. - 1e-5)]
        make_transform_kernel_fn = gen_make_transform_hmc_kernel_fn(
            [bijector], num_leapfrog_steps=1)

        kernel = make_transform_kernel_fn(likelihood.log_prob,
                                          init_state,
                                          scalings=scalings)
        step_size, expected_step_size = self.evaluate([
            tf.squeeze(kernel.inner_kernel.step_size),
            scalings * tf.math.reduce_std(bijector.inverse(init_state))
        ])
        self.assertAllGreater(step_size, 0.)
        self.assertAllEqual(step_size, expected_step_size)
Esempio n. 2
0
    def testSampleEndtoEndXLA(self):
        """An end-to-end test of sampling using SMC."""
        if tf.executing_eagerly(
        ) or tf.config.experimental_functions_run_eagerly():
            self.skipTest('No need to test XLA under all execution regimes.')

        seed = test_util.test_seed()
        dtype = tf.float32
        # Set up data.
        predictors = np.asarray([
            201., 244., 47., 287., 203., 58., 210., 202., 198., 158., 165.,
            201., 157., 131., 166., 160., 186., 125., 218., 146.
        ])
        obs = np.asarray([
            592., 401., 583., 402., 495., 173., 479., 504., 510., 416., 393.,
            442., 317., 311., 400., 337., 423., 334., 533., 344.
        ])
        y_sigma = np.asarray([
            61., 25., 38., 15., 21., 15., 27., 14., 30., 16., 14., 25., 52.,
            16., 34., 31., 42., 26., 16., 22.
        ])
        y_sigma = tf.cast(y_sigma / (2 * obs.std(axis=0)), dtype)
        obs = tf.cast((obs - obs.mean(axis=0)) / (2 * obs.std(axis=0)), dtype)
        predictors = tf.cast((predictors - predictors.mean(axis=0)) /
                             (2 * predictors.std(axis=0)), dtype)

        hyper_mean = tf.cast(0, dtype)
        hyper_scale = tf.cast(2.5, dtype)
        # Generate model prior_log_prob_fn and likelihood_log_prob_fn.
        prior_jd = tfd.JointDistributionSequential([
            tfd.Normal(loc=hyper_mean, scale=hyper_scale),
            tfd.Normal(loc=hyper_mean, scale=hyper_scale),
            tfd.Normal(loc=hyper_mean, scale=hyper_scale),
            tfd.HalfNormal(scale=tf.cast(.5, dtype)),
            tfd.Uniform(low=tf.cast(0, dtype), high=.5),
        ],
                                                   validate_args=True)

        def likelihood_log_prob_fn(b0, b1, mu_out, sigma_out, weight):
            return tfd.Independent(
                tfd.Mixture(
                    tfd.Categorical(probs=tf.stack([
                        tf.repeat(1 - weight[..., tf.newaxis], 20, axis=-1),
                        tf.repeat(weight[..., tf.newaxis], 20, axis=-1)
                    ], -1)), [
                        tfd.Normal(loc=b0[..., tf.newaxis] +
                                   b1[..., tf.newaxis] * predictors,
                                   scale=y_sigma),
                        tfd.Normal(loc=mu_out[..., tf.newaxis],
                                   scale=y_sigma + sigma_out[..., tf.newaxis])
                    ]), 1).log_prob(obs)

        unconstraining_bijectors = [
            tfb.Identity(),
            tfb.Identity(),
            tfb.Identity(),
            tfb.Softplus(),
            tfb.Sigmoid(tf.constant(0., dtype), .5),
        ]
        make_transform_hmc_kernel_fn = gen_make_transform_hmc_kernel_fn(
            unconstraining_bijectors, num_leapfrog_steps=5)

        @tf.function(autograph=False, jit_compile=True)
        def run_smc():
            # Ensure we're really in graph mode.
            assert hasattr(tf.constant([]), 'graph')

            return tfp.experimental.mcmc.sample_sequential_monte_carlo(
                prior_jd.log_prob,
                likelihood_log_prob_fn,
                prior_jd.sample([1000, 5], seed=seed),
                make_kernel_fn=make_transform_hmc_kernel_fn,
                tuning_fn=functools.partial(simple_heuristic_tuning,
                                            optimal_accept=.6),
                min_num_steps=5,
                seed=seed)

        n_stage, (b0, b1, mu_out, sigma_out, weight), _ = run_smc()

        (n_stage, b0, b1, mu_out, sigma_out, weight) = self.evaluate(
            (n_stage, b0, b1, mu_out, sigma_out, weight))

        self.assertTrue(n_stage, 10)

        # Compare the SMC posterior with the result from a calibrated HMC.
        self.assertAllClose(tf.reduce_mean(b0), 0.016, atol=0.005, rtol=0.005)
        self.assertAllClose(tf.reduce_mean(b1), 1.245, atol=0.005, rtol=0.035)
        self.assertAllClose(tf.reduce_mean(weight), 0.28, atol=0.03, rtol=0.02)
        self.assertAllClose(tf.reduce_mean(mu_out), 0.13, atol=0.2, rtol=0.2)
        self.assertAllClose(tf.reduce_mean(sigma_out),
                            0.46,
                            atol=0.5,
                            rtol=0.5)

        self.assertAllClose(tf.math.reduce_std(b0),
                            0.031,
                            atol=0.015,
                            rtol=0.3)
        self.assertAllClose(tf.math.reduce_std(b1), 0.068, atol=0.1, rtol=0.1)
        self.assertAllClose(tf.math.reduce_std(weight),
                            0.1,
                            atol=0.1,
                            rtol=0.1)