def test_klqp_betabernoulli(self): with self.test_session() as sess: # model z = Beta(1., 1., name="z") xs = Bernoulli(probs=z, sample_shape=10) x_obs = np.asarray([0, 0, 1, 1, 0, 0, 0, 0, 0, 1], dtype=np.int32) # inference qz_mean = tf.get_variable("qz_mean", initializer=tf.random_normal(())) qz_std = tf.nn.softplus( tf.get_variable(name="qz_prestd", initializer=tf.random_normal(()))) qz_unconstrained = ed.models.Normal(loc=qz_mean, scale=qz_std, name="z_posterior") inference_klqp = ed.inferences.KLqp({z: qz_unconstrained}, data={xs: x_obs}) inference_klqp.run(n_iter=500, auto_transform=True) z_unconstrained = inference_klqp.transformations[z] qz_constrained = z_unconstrained.bijector.inverse( qz_unconstrained.sample(1000)) qz_mean, qz_var = sess.run(tf.nn.moments(qz_constrained, 0)) true_posterior = Beta(np.sum(x_obs) + 1., np.sum(1 - x_obs) + 1.) pz_mean, pz_var = sess.run( (true_posterior.mean(), true_posterior.variance())) self.assertAllClose(qz_mean, pz_mean, rtol=5e-2, atol=5e-2) self.assertAllClose(qz_var, pz_var, rtol=1e-2, atol=1e-2)
def test_hmc_betabernoulli(self): """Do we correctly handle dependencies of transformed variables?""" with self.test_session() as sess: # model z = Beta(1., 1., name="z") xs = Bernoulli(probs=z, sample_shape=10) x_obs = np.asarray([0, 0, 1, 1, 0, 0, 0, 0, 0, 1], dtype=np.int32) # inference qz_samples = tf.Variable(tf.random_uniform(shape=(1000, ))) qz = ed.models.Empirical(params=qz_samples, name="z_posterior") inference_hmc = ed.inferences.HMC({z: qz}, data={xs: x_obs}) inference_hmc.run(step_size=1.0, n_steps=5, auto_transform=True) # check that inferred posterior mean/variance is close to # that of the exact Beta posterior z_unconstrained = inference_hmc.transformations[z] qz_constrained = z_unconstrained.bijector.inverse(qz_samples) qz_mean, qz_var = sess.run(tf.nn.moments(qz_constrained, 0)) true_posterior = Beta(1. + np.sum(x_obs), 1. + np.sum(1 - x_obs)) pz_mean, pz_var = sess.run( (true_posterior.mean(), true_posterior.variance())) self.assertAllClose(qz_mean, pz_mean, rtol=5e-2, atol=5e-2) self.assertAllClose(qz_var, pz_var, rtol=1e-2, atol=1e-2)
def test_klqp_betabernoulli(self): with self.test_session() as sess: # model z = Beta(1., 1., name="z") xs = Bernoulli(probs=z, sample_shape=10) x_obs = np.asarray([0, 0, 1, 1, 0, 0, 0, 0, 0, 1], dtype=np.int32) # inference qz_mean = tf.get_variable("qz_mean", initializer=tf.random_normal(())) qz_std = tf.nn.softplus(tf.get_variable(name="qz_prestd", initializer=tf.random_normal(()))) qz_unconstrained = ed.models.Normal( loc=qz_mean, scale=qz_std, name="z_posterior") inference_klqp = ed.inferences.KLqp( {z: qz_unconstrained}, data={xs: x_obs}) inference_klqp.run(n_iter=500, auto_transform=True) z_unconstrained = inference_klqp.transformations[z] qz_constrained = z_unconstrained.bijector.inverse( qz_unconstrained.sample(1000)) qz_mean, qz_var = sess.run(tf.nn.moments(qz_constrained, 0)) true_posterior = Beta(np.sum(x_obs) + 1., np.sum(1 - x_obs) + 1.) pz_mean, pz_var = sess.run((true_posterior.mean(), true_posterior.variance())) self.assertAllClose(qz_mean, pz_mean, rtol=5e-2, atol=5e-2) self.assertAllClose(qz_var, pz_var, rtol=1e-2, atol=1e-2)
def test_hmc_betabernoulli(self): """Do we correctly handle dependencies of transformed variables?""" with self.test_session() as sess: # model z = Beta(1., 1., name="z") xs = Bernoulli(probs=z, sample_shape=10) x_obs = np.asarray([0, 0, 1, 1, 0, 0, 0, 0, 0, 1], dtype=np.int32) # inference qz_samples = tf.Variable(tf.random_uniform(shape=(1000,))) qz = ed.models.Empirical(params=qz_samples, name="z_posterior") inference_hmc = ed.inferences.HMC({z: qz}, data={xs: x_obs}) inference_hmc.run(step_size=1.0, n_steps=5, auto_transform=True) # check that inferred posterior mean/variance is close to # that of the exact Beta posterior z_unconstrained = inference_hmc.transformations[z] qz_constrained = z_unconstrained.bijector.inverse(qz_samples) qz_mean, qz_var = sess.run(tf.nn.moments(qz_constrained, 0)) true_posterior = Beta(1. + np.sum(x_obs), 1. + np.sum(1 - x_obs)) pz_mean, pz_var = sess.run((true_posterior.mean(), true_posterior.variance())) self.assertAllClose(qz_mean, pz_mean, rtol=5e-2, atol=5e-2) self.assertAllClose(qz_var, pz_var, rtol=1e-2, atol=1e-2)
def test_beta_bernoulli(self): with self.test_session() as sess: x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1]) p = Beta(1.0, 1.0) x = Bernoulli(probs=p, sample_shape=10) qp = Empirical(tf.Variable(tf.zeros(1000))) inference = ed.Gibbs({p: qp}, data={x: x_data}) inference.run() true_posterior = Beta(3.0, 9.0) val_est, val_true = sess.run([qp.mean(), true_posterior.mean()]) self.assertAllClose(val_est, val_true, rtol=1e-2, atol=1e-2) val_est, val_true = sess.run([qp.variance(), true_posterior.variance()]) self.assertAllClose(val_est, val_true, rtol=1e-2, atol=1e-2)
"""A simple coin flipping example. Inspired by Stan's toy example. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import edward as ed import numpy as np import tensorflow as tf from edward.models import Bernoulli, Beta ed.set_seed(42) # DATA x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1]) # MODEL p = Beta(1.0, 1.0) x = Bernoulli(probs=p, sample_shape=10) # INFERENCE qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([]))) qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([]))) qp = Beta(qp_a, qp_b) inference = ed.KLqp({p: qp}, data={x: x_data}) inference.run(n_iter=500) print("Posterior mean of probability: {}".format(qp.mean().eval()))