def test_beta_binomial_hmc(): num_samples = 1000 total = 10 counts = dist.Binomial(total, 0.3).sample() concentration1 = torch.tensor(0.5) concentration0 = torch.tensor(1.5) prior = dist.Beta(concentration1, concentration0) likelihood = dist.Beta(1 + counts, 1 + total - counts) posterior = dist.Beta(concentration1 + counts, concentration0 + total - counts) def model(): prob = pyro.sample("prob", prior) pyro.sample("counts", dist.Binomial(total, prob), obs=counts) reparam_model = poutine.reparam(model, {"prob": ConjugateReparam(likelihood)}) kernel = HMC(reparam_model) samples = MCMC(kernel, num_samples, warmup_steps=0).run() pred = Predictive(reparam_model, samples, num_samples=num_samples) trace = pred.get_vectorized_trace() samples = trace.nodes["prob"]["value"] assert_close(samples.mean(), posterior.mean, atol=0.01) assert_close(samples.std(), posterior.variance.sqrt(), atol=0.01)
def test_posterior_predictive_svi_auto_diag_normal_guide(return_trace): true_probs = torch.ones(5) * 0.7 num_trials = torch.ones(5) * 1000 num_success = dist.Binomial(num_trials, true_probs).sample() conditioned_model = poutine.condition(model, data={"obs": num_success}) guide = AutoDiagonalNormal(conditioned_model) svi = SVI(conditioned_model, guide, optim.Adam(dict(lr=0.1)), Trace_ELBO()) for i in range(1000): svi.step(num_trials) posterior_predictive = Predictive(model, guide=guide, num_samples=10000, parallel=True) if return_trace: marginal_return_vals = posterior_predictive.get_vectorized_trace(num_trials).nodes["obs"]["value"] else: marginal_return_vals = posterior_predictive.get_samples(num_trials)["obs"] assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 700, rtol=0.05)