def _test_normal_normal(self, default, dtype):
        with self.test_session() as sess:
            x_data = np.array([0.0] * 50, dtype=np.float32)

            mu = Normal(loc=tf.constant(0.0, dtype=dtype),
                        scale=tf.constant(1.0, dtype=dtype))
            x = Normal(loc=mu,
                       scale=tf.constant(1.0, dtype=dtype),
                       sample_shape=50)

            n_samples = 2000
            # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
            if not default:
                qmu = Empirical(
                    params=tf.Variable(tf.ones(n_samples, dtype=dtype)))
                inference = ed.MetropolisHastings({mu: qmu}, {mu: mu},
                                                  data={x: x_data})
            else:
                inference = ed.MetropolisHastings([mu], {mu: mu},
                                                  data={x: x_data})
                qmu = inference.latent_vars[mu]
            inference.run()

            self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1)
            self.assertAllClose(qmu.stddev().eval(),
                                np.sqrt(1 / 51),
                                rtol=1e-1,
                                atol=1e-1)

            old_t, old_n_accept = sess.run([inference.t, inference.n_accept])
            if not default:
                self.assertEqual(old_t, n_samples)
            else:
                self.assertEqual(old_t, 1e4)
            self.assertGreater(old_n_accept, 0.1)
            sess.run(inference.reset)
            new_t, new_n_accept = sess.run([inference.t, inference.n_accept])
            self.assertEqual(new_t, 0)
            self.assertEqual(new_n_accept, 0)
Exemplo n.º 2
0
  def test_normalnormal_run(self):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(mu=0.0, sigma=1.0)
      x = Normal(mu=tf.ones(50) * mu, sigma=1.0)

      qmu = Empirical(params=tf.Variable(tf.ones(2000)))
      proposal_mu = Normal(mu=0.0, sigma=1.0)

      # analytic solution: N(mu=0.0, sigma=\sqrt{1/51}=0.140)
      inference = ed.MetropolisHastings({mu: qmu},
                                        {mu: proposal_mu},
                                        data={x: x_data})
      inference.run()

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2)
      self.assertAllClose(qmu.std().eval(), np.sqrt(1 / 51),
                          rtol=1e-2, atol=1e-2)
  def test_normalnormal_float32(self):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(loc=tf.constant(0.0, dtype=tf.float64),
                  scale=tf.constant(1.0, dtype=tf.float64))
      x = Normal(loc=mu,
                 scale=tf.constant(1.0, dtype=tf.float64),
                 sample_shape=50)

      n_samples = 2000
      qmu = Empirical(params=tf.Variable(tf.ones(n_samples, dtype=tf.float64)))

      # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
      inference = ed.MetropolisHastings({mu: qmu},
                                        {mu: mu},
                                        data={x: x_data})
      inference.run()

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1)
      self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
                          rtol=1e-1, atol=1e-1)
Exemplo n.º 4
0
def main(_):
    # Data generation (known mean)
    xn_data = np.random.normal(FLAGS.loc, FLAGS.scale, FLAGS.N)
    print("scale: {}".format(FLAGS.scale))

    # Prior definition
    alpha = 0.5
    beta = 0.7

    # Posterior inference
    # Probabilistic model
    ig = InverseGamma(alpha, beta)
    xn = Normal(FLAGS.loc, tf.sqrt(ig), sample_shape=FLAGS.N)

    # Inference
    qig = Empirical(params=tf.get_variable(
        "qig/params", [1000], initializer=tf.constant_initializer(0.5)))
    proposal_ig = InverseGamma(2.0, 2.0)
    inference = ed.MetropolisHastings({ig: qig}, {ig: proposal_ig},
                                      data={xn: xn_data})
    inference.run()

    sess = ed.get_session()
    print("Inferred scale: {}".format(sess.run(tf.sqrt(qig.mean()))))
Exemplo n.º 5
0
ed.set_seed(42)

# DATA
x_data = np.array([0.0] * 50, dtype=np.float32)

# MODEL: Normal-Normal with known variance
mu = Normal(mu=0.0, sigma=1.0)
x = Normal(mu=tf.ones(50) * mu, sigma=1.0)

# INFERENCE
qmu = Empirical(params=tf.Variable(tf.zeros([1000])))

proposal_mu = Normal(mu=0.0, sigma=tf.sqrt(1.0 / 51.0))

# analytic solution: N(mu=0.0, sigma=\sqrt{1/51}=0.140)
inference = ed.MetropolisHastings({mu: qmu}, {mu: proposal_mu},
                                  data={x: x_data})
inference.run()

# CRITICISM
# Check convergence with visual diagnostics.
sess = ed.get_session()
mean, std = sess.run([qmu.mean(), qmu.std()])
print("Inferred posterior mean:")
print(mean)
print("Inferred posterior std:")
print(std)

# Check convergence with visual diagnostics.
samples = sess.run(qmu.params)

# Plot histogram.
Exemplo n.º 6
0
import numpy as np
import tensorflow as tf

from edward.models import Bernoulli, Beta, Empirical

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(1.0, 1.0)
x = Bernoulli(probs=p, sample_shape=10)

# INFERENCE
qp = Empirical(params=tf.Variable(tf.zeros([1000]) + 0.5))

proposal_p = Beta(3.0, 9.0)

inference = ed.MetropolisHastings({p: qp}, {p: proposal_p}, data={x: x_data})
inference.run()

# CRITICISM
# exact posterior has mean 0.25 and std 0.12
sess = ed.get_session()
mean, stddev = sess.run([qp.mean(), qp.stddev()])
print("Inferred posterior mean:")
print(mean)
print("Inferred posterior stddev:")
print(stddev)
Exemplo n.º 7
0
import tensorflow as tf

from edward.models import InverseGamma, Normal, Empirical

N = 1000

# Data generation (known mean)
mu = 7.0
sigma = 0.7
xn_data = np.random.normal(mu, sigma, N)
print('sigma={}'.format(sigma))

# Prior definition
alpha = tf.Variable(0.5, dtype=tf.float32, trainable=False)
beta = tf.Variable(0.7, dtype=tf.float32, trainable=False)

# Posterior inference
# Probabilistic model
ig = InverseGamma(alpha=alpha, beta=beta)
xn = Normal(mu=mu, sigma=tf.ones([N]) * tf.sqrt(ig))

# Inference
qig = Empirical(params=tf.Variable(tf.zeros(1000) + 0.5))
proposal_ig = InverseGamma(alpha=2.0, beta=2.0)
inference = ed.MetropolisHastings({ig: qig}, {ig: proposal_ig},
                                  data={xn: xn_data})
inference.run()

sess = ed.get_session()
print('Inferred sigma={}'.format(sess.run(tf.sqrt(qig.mean()))))
Exemplo n.º 8
0
qc = Empirical(params=tf.Variable(tf.zeros([T, N], dtype=tf.int32)))

gpi = Dirichlet(alpha=tf.constant([1.4, 1.6]))
gmu = Normal(mu=tf.constant([[1.0, 1.0], [-1.0, -1.0]]),
             sigma=tf.constant([[0.5, 0.5], [0.5, 0.5]]))
gsigma = InverseGamma(alpha=tf.constant([[1.1, 1.1], [1.1, 1.1]]),
                      beta=tf.constant([[1.0, 1.0], [1.0, 1.0]]))
gc = Categorical(logits=tf.zeros([N, K]))

inference = ed.MetropolisHastings(latent_vars={
    pi: qpi,
    mu: qmu,
    sigma: qsigma,
    c: qc
},
                                  proposal_vars={
                                      pi: gpi,
                                      mu: gmu,
                                      sigma: gsigma,
                                      c: gc
                                  },
                                  data={x: x_data})

inference.initialize()

sess = ed.get_session()
init = tf.initialize_all_variables()
init.run()

for _ in range(T):
    info_dict = inference.update()
Exemplo n.º 9
0
    def test_monte_carlo(self):
        tf.InteractiveSession()
        ed.set_seed(42)

        # DATA
        X_train = np.zeros([500, 100])
        y_train = np.zeros(500)

        N = X_train.shape[0]  # data points
        D = X_train.shape[1]  # feature
        T = 1  # number of MCMC samples

        # MODEL
        W_1 = Normal(mu=tf.zeros([D, 20]), sigma=tf.ones([D, 20]) * 100)
        W_2 = Normal(mu=tf.zeros([20, 15]), sigma=tf.ones([20, 15]) * 100)
        W_3 = Normal(mu=tf.zeros([15, 1]), sigma=tf.ones([15, 1]) * 100)
        b_1 = Normal(mu=tf.zeros(20), sigma=tf.ones(20) * 100)
        b_2 = Normal(mu=tf.zeros(15), sigma=tf.ones(15) * 100)

        x_ph = tf.placeholder(tf.float32, [N, D])
        y = Bernoulli(logits=four_layer_nn(x_ph, W_1, W_2, W_3, b_1, b_2))

        # INFERENCE
        qW_1 = Empirical(params=tf.Variable(tf.random_normal([T, D, 20])))
        qW_2 = Empirical(params=tf.Variable(tf.random_normal([T, 20, 15])))
        qW_3 = Empirical(params=tf.Variable(tf.random_normal([T, 15, 1])))
        qb_1 = Empirical(params=tf.Variable(tf.random_normal([T, 20])))
        qb_2 = Empirical(params=tf.Variable(tf.random_normal([T, 15])))

        # note ideally these would be separate test methods; there's an
        # issue with the tensorflow graph when re-running the above
        # unfortunately
        inference = ed.HMC(
            {
                W_1: qW_1,
                b_1: qb_1,
                W_2: qW_2,
                b_2: qb_2,
                W_3: qW_3
            },
            data={
                y: y_train,
                x_ph: X_train
            })
        inference.run()

        inference = ed.SGLD(
            {
                W_1: qW_1,
                b_1: qb_1,
                W_2: qW_2,
                b_2: qb_2,
                W_3: qW_3
            },
            data={
                y: y_train,
                x_ph: X_train
            })
        inference.run()

        inference = ed.MetropolisHastings(
            {
                W_1: qW_1,
                b_1: qb_1,
                W_2: qW_2,
                b_2: qb_2,
                W_3: qW_3
            }, {
                W_1: W_1,
                b_1: b_1,
                W_2: W_2,
                b_2: b_2,
                W_3: W_3
            },
            data={
                y: y_train,
                x_ph: X_train
            })
        inference.run()
Exemplo n.º 10
0
def main(_):
    ed.set_seed(42)

    # DATA
    x_data = build_toy_dataset(FLAGS.N)

    # MODEL
    pi = Dirichlet(concentration=tf.ones(FLAGS.K))
    mu = Normal(0.0, 1.0, sample_shape=[FLAGS.K, FLAGS.D])
    sigma = InverseGamma(concentration=1.0,
                         rate=1.0,
                         sample_shape=[FLAGS.K, FLAGS.D])
    c = Categorical(logits=tf.log(pi) - tf.log(1.0 - pi), sample_shape=FLAGS.N)
    x = Normal(loc=tf.gather(mu, c), scale=tf.gather(sigma, c))

    # INFERENCE
    qpi = Empirical(
        params=tf.get_variable("qpi/params", [FLAGS.T, FLAGS.K],
                               initializer=tf.constant_initializer(1.0 /
                                                                   FLAGS.K)))
    qmu = Empirical(
        params=tf.get_variable("qmu/params", [FLAGS.T, FLAGS.K, FLAGS.D],
                               initializer=tf.zeros_initializer()))
    qsigma = Empirical(
        params=tf.get_variable("qsigma/params", [FLAGS.T, FLAGS.K, FLAGS.D],
                               initializer=tf.ones_initializer()))
    qc = Empirical(params=tf.get_variable("qc/params", [FLAGS.T, FLAGS.N],
                                          initializer=tf.zeros_initializer(),
                                          dtype=tf.int32))

    gpi = Dirichlet(concentration=tf.constant([1.4, 1.6]))
    gmu = Normal(loc=tf.constant([[1.0, 1.0], [-1.0, -1.0]]),
                 scale=tf.constant([[0.5, 0.5], [0.5, 0.5]]))
    gsigma = InverseGamma(concentration=tf.constant([[1.1, 1.1], [1.1, 1.1]]),
                          rate=tf.constant([[1.0, 1.0], [1.0, 1.0]]))
    gc = Categorical(logits=tf.zeros([FLAGS.N, FLAGS.K]))

    inference = ed.MetropolisHastings(latent_vars={
        pi: qpi,
        mu: qmu,
        sigma: qsigma,
        c: qc
    },
                                      proposal_vars={
                                          pi: gpi,
                                          mu: gmu,
                                          sigma: gsigma,
                                          c: gc
                                      },
                                      data={x: x_data})

    inference.initialize()

    sess = ed.get_session()
    tf.global_variables_initializer().run()

    for _ in range(inference.n_iter):
        info_dict = inference.update()
        inference.print_progress(info_dict)

        t = info_dict['t']
        if t == 1 or t % inference.n_print == 0:
            qpi_mean, qmu_mean = sess.run([qpi.mean(), qmu.mean()])
            print("")
            print("Inferred membership probabilities:")
            print(qpi_mean)
            print("Inferred cluster means:")
            print(qmu_mean)
Exemplo n.º 11
0
    "qmu/params", [T, K, d], initializer=tf.zeros_initializer()))
qsigma = ed.models.Empirical(params=tf.get_variable(
    "qsigma/params", [T, K, d], initializer=tf.ones_initializer()))
qz = ed.models.Empirical(params=tf.get_variable(
    "qz/params", [T, N], initializer=tf.zeros_initializer(), dtype=tf.int32))

gp = ed.models.Dirichlet(concentration=tf.ones(K))
gmu = ed.models.Normal(loc=tf.ones([K, d]), scale=tf.ones([K, d]))
gsigma = ed.models.InverseGamma(concentration=tf.ones([K, d]),
                                rate=tf.ones([K, d]))
gz = ed.models.Categorical(logits=tf.zeros([N, K]))

inference = ed.MetropolisHastings(latent_vars={
    p: qp,
    mu: qmu,
    sigma: qsigma,
    z: qz
},
                                  proposal_vars={
                                      p: gp,
                                      mu: gmu,
                                      sigma: gsigma,
                                      z: gz
                                  },
                                  data={x: x_train})

inference.run()

# print the posterior
print(qmu.params.eval())
Exemplo n.º 12
0
    def _test_linear_regression(self, default, dtype):
        def build_toy_dataset(N, w, noise_std=0.1):
            D = len(w)
            x = np.random.randn(N, D)
            y = np.dot(x, w) + np.random.normal(0, noise_std, size=N)
            return x, y

        with self.test_session() as sess:
            N = 40  # number of data points
            D = 10  # number of features

            w_true = np.random.randn(D)
            X_train, y_train = build_toy_dataset(N, w_true)
            X_test, y_test = build_toy_dataset(N, w_true)

            X = tf.placeholder(dtype, [N, D])
            w = Normal(loc=tf.zeros(D, dtype=dtype),
                       scale=tf.ones(D, dtype=dtype))
            b = Normal(loc=tf.zeros(1, dtype=dtype),
                       scale=tf.ones(1, dtype=dtype))
            y = Normal(loc=ed.dot(X, w) + b,
                       scale=0.1 * tf.ones(N, dtype=dtype))

            proposal_w = Normal(loc=w, scale=0.5 * tf.ones(D, dtype=dtype))
            proposal_b = Normal(loc=b, scale=0.5 * tf.ones(1, dtype=dtype))

            n_samples = 2000
            if not default:
                qw = Empirical(
                    tf.Variable(tf.zeros([n_samples, D], dtype=dtype)))
                qb = Empirical(
                    tf.Variable(tf.zeros([n_samples, 1], dtype=dtype)))
                inference = ed.MetropolisHastings({
                    w: qw,
                    b: qb
                }, {
                    w: proposal_w,
                    b: proposal_b
                },
                                                  data={
                                                      X: X_train,
                                                      y: y_train
                                                  })
            else:
                inference = ed.MetropolisHastings([w, b], {
                    w: proposal_w,
                    b: proposal_b
                },
                                                  data={
                                                      X: X_train,
                                                      y: y_train
                                                  })
                qw = inference.latent_vars[w]
                qb = inference.latent_vars[b]
            inference.run()

            self.assertAllClose(qw.mean().eval(), w_true, rtol=5e-1, atol=5e-1)
            self.assertAllClose(qb.mean().eval(), [0.0], rtol=5e-1, atol=5e-1)

            old_t, old_n_accept = sess.run([inference.t, inference.n_accept])
            if not default:
                self.assertEqual(old_t, n_samples)
            else:
                self.assertEqual(old_t, 1e4)
            self.assertGreater(old_n_accept, 0.1)
            sess.run(inference.reset)
            new_t, new_n_accept = sess.run([inference.t, inference.n_accept])
            self.assertEqual(new_t, 0)
            self.assertEqual(new_n_accept, 0)