def test_normalnormal_float32(self): with self.test_session() as sess: x_data = np.array([0.0] * 50, dtype=np.float32) mu = Normal(loc=0.0, scale=1.0) x = Normal(loc=mu, scale=1.0, sample_shape=50) n_samples = 2000 qmu = Empirical(params=tf.Variable(tf.ones(n_samples))) # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140) inference = ed.MetropolisHastings({mu: qmu}, {mu: mu}, data={x: x_data}) inference.run() self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1) self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51), rtol=1e-1, atol=1e-1) old_t, old_n_accept = sess.run([inference.t, inference.n_accept]) self.assertEqual(old_t, n_samples) self.assertGreater(old_n_accept, 0.1) sess.run(inference.reset) new_t, new_n_accept = sess.run([inference.t, inference.n_accept]) self.assertEqual(new_t, 0) self.assertEqual(new_n_accept, 0)
def main(_): ed.set_seed(42) # MODEL z = MultivariateNormalTriL( loc=tf.ones(2), scale_tril=tf.cholesky(tf.constant([[1.0, 0.8], [0.8, 1.0]]))) # INFERENCE qz = Empirical(params=tf.get_variable("qz/params", [1000, 2])) inference = ed.HMC({z: qz}) inference.run() # CRITICISM sess = ed.get_session() mean, stddev = sess.run([qz.mean(), qz.stddev()]) print("Inferred posterior mean:") print(mean) print("Inferred posterior stddev:") print(stddev) fig, ax = plt.subplots() trace = sess.run(qz.params) ax.scatter(trace[:, 0], trace[:, 1], marker=".") mvn_plot_contours(z, ax=ax) plt.show()
def main(_): ed.set_seed(42) # DATA x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1]) # MODEL p = Beta(1.0, 1.0) x = Bernoulli(probs=p, sample_shape=10) # INFERENCE qp = Empirical(params=tf.get_variable( "qp/params", [1000], initializer=tf.constant_initializer(0.5))) proposal_p = Beta(3.0, 9.0) inference = ed.MetropolisHastings({p: qp}, {p: proposal_p}, data={x: x_data}) inference.run() # CRITICISM # exact posterior has mean 0.25 and std 0.12 sess = ed.get_session() mean, stddev = sess.run([qp.mean(), qp.stddev()]) print("Inferred posterior mean:") print(mean) print("Inferred posterior stddev:") print(stddev) x_post = ed.copy(x, {p: qp}) tx_rep, tx = ed.ppc( lambda xs, zs: tf.reduce_mean(tf.cast(xs[x_post], tf.float32)), data={x_post: x_data}) ed.ppc_stat_hist_plot( tx[0], tx_rep, stat_name=r'$T \equiv$mean', bins=10) plt.show()
def test_normalnormal_run(self): with self.test_session() as sess: x_data = np.array([0.0] * 50, dtype=np.float32) mu = Normal(loc=0.0, scale=1.0) x = Normal(loc=tf.ones(50) * mu, scale=1.0) n_samples = 2000 qmu = Empirical(params=tf.Variable(tf.ones(n_samples))) proposal_mu = Normal(loc=0.0, scale=1.0) # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140) inference = ed.MetropolisHastings({mu: qmu}, {mu: proposal_mu}, data={x: x_data}) inference.run() self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2) self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51), rtol=1e-1, atol=1e-1) old_t, old_n_accept = sess.run([inference.t, inference.n_accept]) self.assertEqual(old_t, n_samples) self.assertGreater(old_n_accept, 0.1) sess.run(inference.reset) new_t, new_n_accept = sess.run([inference.t, inference.n_accept]) self.assertEqual(new_t, 0) self.assertEqual(new_n_accept, 0)
def _test_normal_normal(self, default, dtype): with self.test_session() as sess: x_data = np.array([0.0] * 50, dtype=np.float32) mu = Normal(loc=tf.constant(0.0, dtype=dtype), scale=tf.constant(1.0, dtype=dtype)) x = Normal(loc=mu, scale=tf.constant(1.0, dtype=dtype), sample_shape=50) n_samples = 2000 # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140) if not default: qmu = Empirical(params=tf.Variable(tf.ones(n_samples, dtype=dtype))) inference = ed.Gibbs({mu: qmu}, data={x: x_data}) else: inference = ed.Gibbs([mu], data={x: x_data}) qmu = inference.latent_vars[mu] inference.run() self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1) self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51), rtol=1e-1, atol=1e-1) old_t, old_n_accept = sess.run([inference.t, inference.n_accept]) if not default: self.assertEqual(old_t, n_samples) else: self.assertEqual(old_t, 1e4) self.assertGreater(old_n_accept, 0.1) sess.run(inference.reset) new_t, new_n_accept = sess.run([inference.t, inference.n_accept]) self.assertEqual(new_t, 0) self.assertEqual(new_n_accept, 0)
def _test_normal_normal(self, default, dtype): with self.test_session() as sess: x_data = np.array([0.0] * 50, dtype=np.float32) mu = Normal(loc=tf.constant(0.0, dtype=dtype), scale=tf.constant(1.0, dtype=dtype)) x = Normal(loc=mu, scale=tf.constant(1.0, dtype=dtype), sample_shape=50) n_samples = 2000 # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140) if not default: qmu = Empirical(params=tf.Variable(tf.ones(n_samples, dtype=dtype))) inference = ed.HMC({mu: qmu}, data={x: x_data}) else: inference = ed.HMC([mu], data={x: x_data}) qmu = inference.latent_vars[mu] inference.run() self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1) self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51), rtol=1e-1, atol=1e-1) old_t, old_n_accept = sess.run([inference.t, inference.n_accept]) if not default: self.assertEqual(old_t, n_samples) else: self.assertEqual(old_t, 1e4) self.assertGreater(old_n_accept, 0.1) sess.run(inference.reset) new_t, new_n_accept = sess.run([inference.t, inference.n_accept]) self.assertEqual(new_t, 0) self.assertEqual(new_n_accept, 0)
def test_normalnormal_run(self): with self.test_session() as sess: x_data = np.array([0.0] * 50, dtype=np.float32) mu = Normal(loc=0.0, scale=1.0) x = Normal(loc=tf.ones(50) * mu, scale=1.0) qmu = Empirical(params=tf.Variable(tf.ones(2000))) # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140) inference = ed.HMC({mu: qmu}, data={x: x_data}) inference.run() self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2) self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51), rtol=1e-2, atol=1e-2)
def test_data_tensor(self): with self.test_session() as sess: x_data = tf.zeros(50) mu = Normal(0.0, 1.0) x = Normal(mu, 1.0, sample_shape=50) qmu = Empirical(tf.Variable(tf.ones(1000))) # analytic solution: N(mu=0.0, sigma=\sqrt{1/51}=0.140) inference = ed.Gibbs({mu: qmu}, data={x: x_data}) inference.run() self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2) self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51), rtol=1e-2, atol=1e-2)
def main(_): ed.set_seed(42) # MODEL z = MultivariateNormalTriL( loc=tf.ones(2), scale_tril=tf.cholesky(tf.constant([[1.0, 0.8], [0.8, 1.0]]))) # INFERENCE qz = Empirical(params=tf.get_variable("qz/params", [2000, 2])) inference = ed.SGLD({z: qz}) inference.run(step_size=5.0) # CRITICISM sess = ed.get_session() mean, stddev = sess.run([qz.mean(), qz.stddev()]) print("Inferred posterior mean:") print(mean) print("Inferred posterior stddev:") print(stddev)
def main(_): ed.set_seed(42) # MODEL z = MultivariateNormalTriL(loc=tf.ones(2), scale_tril=tf.cholesky( tf.constant([[1.0, 0.8], [0.8, 1.0]]))) # INFERENCE qz = Empirical(params=tf.get_variable("qz/params", [2000, 2])) inference = ed.SGLD({z: qz}) inference.run(step_size=5.0) # CRITICISM sess = ed.get_session() mean, stddev = sess.run([qz.mean(), qz.stddev()]) print("Inferred posterior mean:") print(mean) print("Inferred posterior stddev:") print(stddev)
def test_normalnormal_float64(self): with self.test_session() as sess: x_data = np.array([0.0] * 50, dtype=np.float64) mu = Normal(loc=tf.constant(0.0, dtype=tf.float64), scale=tf.constant(1.0, dtype=tf.float64)) x = Normal(loc=mu, scale=tf.constant(1.0, dtype=tf.float64), sample_shape=50) qmu = Empirical( params=tf.Variable(tf.ones(5000, dtype=tf.float64))) # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140) inference = ed.SGHMC({mu: qmu}, data={x: x_data}) inference.run(step_size=0.025) self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1.5e-2) self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51), rtol=5e-2, atol=5e-2)
def test_normalnormal_float32(self): with self.test_session() as sess: x_data = np.array([0.0] * 50, dtype=np.float32) mu = Normal(loc=tf.constant(0.0, dtype=tf.float64), scale=tf.constant(1.0, dtype=tf.float64)) x = Normal(loc=mu, scale=tf.constant(1.0, dtype=tf.float64), sample_shape=50) n_samples = 2000 qmu = Empirical(params=tf.Variable(tf.ones(n_samples, dtype=tf.float64))) # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140) inference = ed.MetropolisHastings({mu: qmu}, {mu: mu}, data={x: x_data}) inference.run() self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1) self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51), rtol=1e-1, atol=1e-1)
def main(_): ed.set_seed(42) # DATA x_data = np.array([0.0] * 50) # MODEL: Normal-Normal with known variance mu = Normal(loc=0.0, scale=1.0) x = Normal(loc=mu, scale=1.0, sample_shape=50) # INFERENCE qmu = Empirical(params=tf.get_variable("qmu/params", [1000], initializer=tf.zeros_initializer())) # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140) inference = ed.HMC({mu: qmu}, data={x: x_data}) inference.run() # CRITICISM sess = ed.get_session() mean, stddev = sess.run([qmu.mean(), qmu.stddev()]) print("Inferred posterior mean:") print(mean) print("Inferred posterior stddev:") print(stddev) # Check convergence with visual diagnostics. samples = sess.run(qmu.params) # Plot histogram. plt.hist(samples, bins='auto') plt.show() # Trace plot. plt.plot(samples) plt.show()
import numpy as np import tensorflow as tf from edward.models import Bernoulli, Beta, Empirical ed.set_seed(42) # DATA x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1]) # MODEL p = Beta(1.0, 1.0) x = Bernoulli(tf.ones(10) * p) # INFERENCE qp = Empirical(params=tf.Variable(tf.zeros([1000]) + 0.5)) proposal_p = Beta(3.0, 9.0) inference = ed.MetropolisHastings({p: qp}, {p: proposal_p}, data={x: x_data}) inference.run() # CRITICISM # exact posterior has mean 0.25 and std 0.12 sess = ed.get_session() mean, stddev = sess.run([qp.mean(), qp.stddev()]) print("Inferred posterior mean:") print(mean) print("Inferred posterior stddev:") print(stddev)
import numpy as np import tensorflow as tf from edward.models import Bernoulli, Beta, Empirical ed.set_seed(42) # DATA x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1]) # MODEL p = Beta(1.0, 1.0) x = Bernoulli(probs=p, sample_shape=10) # INFERENCE qp = Empirical(params=tf.Variable(tf.zeros([1000]) + 0.5)) proposal_p = Beta(3.0, 9.0) inference = ed.MetropolisHastings({p: qp}, {p: proposal_p}, data={x: x_data}) inference.run() # CRITICISM # exact posterior has mean 0.25 and std 0.12 sess = ed.get_session() mean, stddev = sess.run([qp.mean(), qp.stddev()]) print("Inferred posterior mean:") print(mean) print("Inferred posterior stddev:") print(stddev)
x = Normal(loc=tf.ones(50) * mu, scale=1.0) # INFERENCE qmu = Empirical(params=tf.Variable(tf.zeros([1000]))) proposal_mu = Normal(loc=0.0, scale=tf.sqrt(1.0 / 51.0)) # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140) inference = ed.MetropolisHastings({mu: qmu}, {mu: proposal_mu}, data={x: x_data}) inference.run() # CRITICISM # Check convergence with visual diagnostics. sess = ed.get_session() mean, stddev = sess.run([qmu.mean(), qmu.stddev()]) print("Inferred posterior mean:") print(mean) print("Inferred posterior stddev:") print(stddev) # Check convergence with visual diagnostics. samples = sess.run(qmu.params) # Plot histogram. plt.hist(samples, bins='auto') plt.show() # Trace plot. plt.plot(samples) plt.show()
""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import edward as ed import tensorflow as tf from edward.models import Empirical, MultivariateNormalTriL ed.set_seed(42) # MODEL z = MultivariateNormalTriL( loc=tf.ones(2), scale_tril=tf.cholesky(tf.constant([[1.0, 0.8], [0.8, 1.0]]))) # INFERENCE qz = Empirical(params=tf.Variable(tf.random_normal([2000, 2]))) inference = ed.SGLD({z: qz}) inference.run(step_size=5.0) # CRITICISM sess = ed.get_session() mean, stddev = sess.run([qz.mean(), qz.stddev()]) print("Inferred posterior mean:") print(mean) print("Inferred posterior stddev:") print(stddev)