def test_hmc_betabernoulli(self):
    """Do we correctly handle dependencies of transformed variables?"""

    with self.test_session() as sess:
      # model
      z = Beta(1., 1., name="z")
      xs = Bernoulli(probs=z, sample_shape=10)
      x_obs = np.asarray([0, 0, 1, 1, 0, 0, 0, 0, 0, 1], dtype=np.int32)

      # inference
      qz_samples = tf.Variable(tf.random_uniform(shape=(1000,)))
      qz = ed.models.Empirical(params=qz_samples, name="z_posterior")
      inference_hmc = ed.inferences.HMC({z: qz}, data={xs: x_obs})
      inference_hmc.run(step_size=1.0, n_steps=5, auto_transform=True)

      # check that inferred posterior mean/variance is close to
      # that of the exact Beta posterior
      z_unconstrained = inference_hmc.transformations[z]
      qz_constrained = z_unconstrained.bijector.inverse(qz_samples)
      qz_mean, qz_var = sess.run(tf.nn.moments(qz_constrained, 0))

      true_posterior = Beta(1. + np.sum(x_obs), 1. + np.sum(1 - x_obs))
      pz_mean, pz_var = sess.run((true_posterior.mean(),
                                  true_posterior.variance()))
      self.assertAllClose(qz_mean, pz_mean, rtol=5e-2, atol=5e-2)
      self.assertAllClose(qz_var, pz_var, rtol=1e-2, atol=1e-2)
    def test_klqp_betabernoulli(self):
        with self.test_session() as sess:
            # model
            z = Beta(1., 1., name="z")
            xs = Bernoulli(probs=z, sample_shape=10)
            x_obs = np.asarray([0, 0, 1, 1, 0, 0, 0, 0, 0, 1], dtype=np.int32)

            # inference
            qz_mean = tf.get_variable("qz_mean",
                                      initializer=tf.random_normal(()))
            qz_std = tf.nn.softplus(
                tf.get_variable(name="qz_prestd",
                                initializer=tf.random_normal(())))
            qz_unconstrained = ed.models.Normal(loc=qz_mean,
                                                scale=qz_std,
                                                name="z_posterior")

            inference_klqp = ed.inferences.KLqp({z: qz_unconstrained},
                                                data={xs: x_obs})
            inference_klqp.run(n_iter=500, auto_transform=True)

            z_unconstrained = inference_klqp.transformations[z]
            qz_constrained = z_unconstrained.bijector.inverse(
                qz_unconstrained.sample(1000))
            qz_mean, qz_var = sess.run(tf.nn.moments(qz_constrained, 0))

            true_posterior = Beta(np.sum(x_obs) + 1., np.sum(1 - x_obs) + 1.)
            pz_mean, pz_var = sess.run(
                (true_posterior.mean(), true_posterior.variance()))
            self.assertAllClose(qz_mean, pz_mean, rtol=5e-2, atol=5e-2)
            self.assertAllClose(qz_var, pz_var, rtol=1e-2, atol=1e-2)
  def test_klqp_betabernoulli(self):
    with self.test_session() as sess:
      # model
      z = Beta(1., 1., name="z")
      xs = Bernoulli(probs=z, sample_shape=10)
      x_obs = np.asarray([0, 0, 1, 1, 0, 0, 0, 0, 0, 1], dtype=np.int32)

      # inference
      qz_mean = tf.get_variable("qz_mean",
                                initializer=tf.random_normal(()))
      qz_std = tf.nn.softplus(tf.get_variable(name="qz_prestd",
                                              initializer=tf.random_normal(())))
      qz_unconstrained = ed.models.Normal(
          loc=qz_mean, scale=qz_std, name="z_posterior")

      inference_klqp = ed.inferences.KLqp(
          {z: qz_unconstrained}, data={xs: x_obs})
      inference_klqp.run(n_iter=500, auto_transform=True)

      z_unconstrained = inference_klqp.transformations[z]
      qz_constrained = z_unconstrained.bijector.inverse(
          qz_unconstrained.sample(1000))
      qz_mean, qz_var = sess.run(tf.nn.moments(qz_constrained, 0))

      true_posterior = Beta(np.sum(x_obs) + 1., np.sum(1 - x_obs) + 1.)
      pz_mean, pz_var = sess.run((true_posterior.mean(),
                                  true_posterior.variance()))
      self.assertAllClose(qz_mean, pz_mean, rtol=5e-2, atol=5e-2)
      self.assertAllClose(qz_var, pz_var, rtol=1e-2, atol=1e-2)
    def test_hmc_betabernoulli(self):
        """Do we correctly handle dependencies of transformed variables?"""

        with self.test_session() as sess:
            # model
            z = Beta(1., 1., name="z")
            xs = Bernoulli(probs=z, sample_shape=10)
            x_obs = np.asarray([0, 0, 1, 1, 0, 0, 0, 0, 0, 1], dtype=np.int32)

            # inference
            qz_samples = tf.Variable(tf.random_uniform(shape=(1000, )))
            qz = ed.models.Empirical(params=qz_samples, name="z_posterior")
            inference_hmc = ed.inferences.HMC({z: qz}, data={xs: x_obs})
            inference_hmc.run(step_size=1.0, n_steps=5, auto_transform=True)

            # check that inferred posterior mean/variance is close to
            # that of the exact Beta posterior
            z_unconstrained = inference_hmc.transformations[z]
            qz_constrained = z_unconstrained.bijector.inverse(qz_samples)
            qz_mean, qz_var = sess.run(tf.nn.moments(qz_constrained, 0))

            true_posterior = Beta(1. + np.sum(x_obs), 1. + np.sum(1 - x_obs))
            pz_mean, pz_var = sess.run(
                (true_posterior.mean(), true_posterior.variance()))
            self.assertAllClose(qz_mean, pz_mean, rtol=5e-2, atol=5e-2)
            self.assertAllClose(qz_var, pz_var, rtol=1e-2, atol=1e-2)
Esempio n. 5
0
def main(_):
  ed.set_seed(42)

  # DATA
  x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

  # MODEL
  p = Beta(1.0, 1.0)
  x = Bernoulli(probs=p, sample_shape=10)

  # INFERENCE
  qp = Empirical(params=tf.get_variable(
      "qp/params", [1000], initializer=tf.constant_initializer(0.5)))

  proposal_p = Beta(3.0, 9.0)

  inference = ed.MetropolisHastings({p: qp}, {p: proposal_p}, data={x: x_data})
  inference.run()

  # CRITICISM
  # exact posterior has mean 0.25 and std 0.12
  sess = ed.get_session()
  mean, stddev = sess.run([qp.mean(), qp.stddev()])
  print("Inferred posterior mean:")
  print(mean)
  print("Inferred posterior stddev:")
  print(stddev)

  x_post = ed.copy(x, {p: qp})
  tx_rep, tx = ed.ppc(
      lambda xs, zs: tf.reduce_mean(tf.cast(xs[x_post], tf.float32)),
      data={x_post: x_data})
  ed.ppc_stat_hist_plot(
      tx[0], tx_rep, stat_name=r'$T \equiv$mean', bins=10)
  plt.show()
Esempio n. 6
0
def _test(a, b, n):
    rv = Beta(a=a, b=b)
    rv_sample = rv.sample(n)
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    a = a.eval()
    b = b.eval()
    assert np.allclose(rv.log_prob(x_tf).eval(), stats.beta.logpdf(x, a, b))
Esempio n. 7
0
def _test(a, b, n):
  rv = Beta(a=a, b=b)
  rv_sample = rv.sample(n)
  x = rv_sample.eval()
  x_tf = tf.constant(x, dtype=tf.float32)
  a = a.eval()
  b = b.eval()
  assert np.allclose(rv.log_prob(x_tf).eval(),
                     stats.beta.logpdf(x, a, b))
Esempio n. 8
0
def _test(shape, n):
    rv = Beta(shape, alpha=tf.zeros(shape) + 0.5, beta=tf.zeros(shape) + 0.5)
    rv_sample = rv.sample(n)
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    alpha = rv.alpha.eval()
    beta = rv.beta.eval()
    for idx in range(shape[0]):
        assert np.allclose(rv.log_prob_idx((idx,), x_tf).eval(), stats.beta.logpdf(x[:, idx], alpha[idx], beta[idx]))
Esempio n. 9
0
def _test(shape, n):
    rv = Beta(shape, alpha=tf.zeros(shape) + 0.5, beta=tf.zeros(shape) + 0.5)
    rv_sample = rv.sample(n)
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    alpha = rv.alpha.eval()
    beta = rv.beta.eval()
    for idx in range(shape[0]):
        assert np.allclose(
            rv.log_prob_idx((idx, ), x_tf).eval(),
            stats.beta.logpdf(x[:, idx], alpha[idx], beta[idx]))
Esempio n. 10
0
 def test_01(self):
     with self.test_session():
         x = Beta(1.0, 1.0)
         y = ed.transform(x)
         self.assertIsInstance(y, TransformedDistribution)
         sample = y.sample(10, seed=1).eval()
         self.assertSamplePosNeg(sample)
Esempio n. 11
0
    def run(self, adj_mat, n_iter=1000):
        assert adj_mat.shape[0] == adj_mat.shape[1]
        n_node = adj_mat.shape[0]

        # model
        gamma = Dirichlet(concentration=tf.ones([self.n_cluster]))
        Pi = Beta(concentration0=tf.ones([self.n_cluster, self.n_cluster]),
                  concentration1=tf.ones([self.n_cluster, self.n_cluster]))
        Z = Multinomial(total_count=1., probs=gamma, sample_shape=n_node)
        X = Bernoulli(probs=tf.matmul(Z, tf.matmul(Pi, tf.transpose(Z))))

        # inference (point estimation)
        qgamma = PointMass(params=tf.nn.softmax(
            tf.Variable(tf.random_normal([self.n_cluster]))))
        qPi = PointMass(params=tf.nn.sigmoid(
            tf.Variable(tf.random_normal([self.n_cluster, self.n_cluster]))))
        qZ = PointMass(params=tf.nn.softmax(
            tf.Variable(tf.random_normal([n_node, self.n_cluster]))))

        # map estimation
        inference = ed.MAP({gamma: qgamma, Pi: qPi, Z: qZ}, data={X: adj_mat})
        inference.initialize(n_iter=n_iter)

        tf.global_variables_initializer().run()

        for _ in range(inference.n_iter):
            info_dict = inference.update()
            inference.print_progress(info_dict)
        inference.finalize()
        return qZ.mean().eval().argmax(axis=1)
Esempio n. 12
0
    def __init__(self, M, C, theta_prior, delta_prior, a_prior):

        self.M = M
        self.C = C
        self.theta_prior = theta_prior  # prior of ability
        self.delta_prior = delta_prior  # prior of difficulty
        self.a_prior = a_prior  # prior of discrimination

        if isinstance(a_prior, ed.RandomVariable):
            # variational posterior of discrimination
            self.qa = Normal(loc=tf.Variable(tf.ones([M])),
                             scale=tf.nn.softplus(
                                 tf.Variable(tf.ones([M]) * .5)),
                             name='qa')
        else:
            self.qa = a_prior

        with tf.variable_scope('local'):
            # variational posterior of ability
            if isinstance(self.theta_prior, RandomVariable):
                self.qtheta = TransformedDistribution(distribution=Normal(loc=tf.Variable(tf.random_normal([C])), scale=tf.nn.softplus(tf.Variable(tf.random_normal([C])))),\
                                                           bijector=ds.bijectors.Sigmoid(), sample_shape=[M],name='qtheta')
            else:
                self.qtheta = self.theta_prior
            # variational posterior of difficulty
            self.qdelta = TransformedDistribution(distribution=Normal(loc=tf.Variable(tf.random_normal([M])), scale=tf.nn.softplus(tf.Variable(tf.random_normal([M])))), \
                                                            bijector=ds.bijectors.Sigmoid(), sample_shape=[C],name='qdelta')

        alpha = (tf.transpose(self.qtheta) / self.qdelta)**self.qa

        beta = ((1. - tf.transpose(self.qtheta)) / (1. - self.qdelta))**self.qa

        # observed variable
        self.x = Beta(tf.transpose(alpha), tf.transpose(beta))
def main(_):
    ed.set_seed(42)

    # DATA
    x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

    # MODEL
    p = Beta(1.0, 1.0)
    x = Bernoulli(probs=p, sample_shape=10)

    # COMPLETE CONDITIONAL
    p_cond = ed.complete_conditional(p)

    sess = ed.get_session()

    print('p(probs | x) type:', p_cond.parameters['name'])
    param_vals = sess.run(
        {
            key: val
            for key, val in six.iteritems(p_cond.parameters)
            if isinstance(val, tf.Tensor)
        }, {x: x_data})
    print('parameters:')
    for key, val in six.iteritems(param_vals):
        print('%s:\t%.3f' % (key, val))
Esempio n. 14
0
def dirichlet_process(alpha, base_cls, sample_n=50, *args, **kwargs):
  """Dirichlet process DP(``alpha``, ``base_cls(*args, **kwargs)``).

  Only works for scalar alpha and scalar base distribution.

  Parameters
  ----------
  alpha : tf.Tensor
    Concentration parameter. Its shape determines the batch shape of the DP.
  base_cls : RandomVariable
    Class of base distribution. Its shape (when instantiated)
    determines the event shape of the DP.
  sample_n : int, optional
    Number of samples for each DP in the batch shape.
  *args, **kwargs : optional
    Arguments passed into ``base_cls``.

  Returns
  -------
  tf.Tensor
    A ``tf.Tensor`` of shape ``[sample_n] + batch_shape + event_shape``,
    where ``sample_n`` is the number of samples for each DP,
    ``batch_shape`` is the number of independent DPs, and
    ``event_shape`` is the shape of the base distribution.
  """
  def cond(k, beta_k, draws, bools):
    # Proceed if at least one bool is True.
    return tf.reduce_any(bools)

  def body(k, beta_k, draws, bools):
    k = k + 1
    beta_k = beta_k * Beta(a=1.0, b=alpha)
    theta_k = base_cls(*args, **kwargs)

    # Assign ongoing samples to the new theta_k.
    indicator = tf.cast(bools, draws.dtype)
    new = indicator * theta_k
    draws = draws * (1.0 - indicator) + new

    flips = tf.cast(Bernoulli(p=beta_k), tf.bool)
    bools = tf.logical_and(flips, tf.equal(draws, theta_k))
    return k, beta_k, draws, bools

  k = 0
  beta_k = Beta(a=tf.ones(sample_n), b=alpha * tf.ones(sample_n))
  theta_k = base_cls(*args, **kwargs)

  # Initialize all samples as theta_k.
  draws = tf.ones(sample_n) * theta_k
  # Flip ``sample_n`` coins, one for each sample.
  flips = tf.cast(Bernoulli(p=beta_k), tf.bool)
  # Get boolean tensor for samples that return heads
  # and are currently equal to theta_k.
  bools = tf.logical_and(flips, tf.equal(draws, theta_k))

  total_sticks, _, samples, _ = tf.while_loop(
      cond, body, loop_vars=[k, beta_k, draws, bools])
  return total_sticks, samples
Esempio n. 15
0
  def test_beta_bernoulli(self):
    with self.test_session() as sess:
      x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

      p = Beta(1.0, 1.0)
      x = Bernoulli(probs=p, sample_shape=10)

      qp = Empirical(tf.Variable(tf.zeros(1000)))
      inference = ed.Gibbs({p: qp}, data={x: x_data})
      inference.run()

      true_posterior = Beta(3.0, 9.0)

      val_est, val_true = sess.run([qp.mean(), true_posterior.mean()])
      self.assertAllClose(val_est, val_true, rtol=1e-2, atol=1e-2)

      val_est, val_true = sess.run([qp.variance(), true_posterior.variance()])
      self.assertAllClose(val_est, val_true, rtol=1e-2, atol=1e-2)
def main():
    data = ed.Data(np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1]))
    model = BetaBernoulli()
    variational = Variational()
    variational.add(Beta())

    # mean-field variational inference.
    inference = ed.MFVI(model, variational, data)

    inference.run(n_iter=10000)
Esempio n. 17
0
def dirichlet_process(alpha):
    def cond(k, beta_k):
        flip = Bernoulli(p=beta_k)
        return tf.equal(flip, tf.constant(1))

    def body(k, beta_k):
        beta_k = beta_k * Beta(a=1.0, b=alpha)
        return k + 1, beta_k

    k = tf.constant(0)
    beta_k = Beta(a=1.0, b=alpha)
    stick_num, stick_beta = tf.while_loop(cond, body, loop_vars=[k, beta_k])
    return stick_num
Esempio n. 18
0
  def body(k, beta_k, draws, bools):
    k = k + 1
    beta_k = beta_k * Beta(a=1.0, b=alpha)
    theta_k = base_cls(*args, **kwargs)

    # Assign ongoing samples to the new theta_k.
    indicator = tf.cast(bools, draws.dtype)
    new = indicator * theta_k
    draws = draws * (1.0 - indicator) + new

    flips = tf.cast(Bernoulli(p=beta_k), tf.bool)
    bools = tf.logical_and(flips, tf.equal(draws, theta_k))
    return k, beta_k, draws, bools
Esempio n. 19
0
def dirichlet_process(alpha):
    """Demo of stochastic while loop for stick breaking construction."""
    def cond(k, beta_k):
        # End while loop (return False) when flip is heads.
        flip = Bernoulli(p=beta_k)
        return tf.cast(1 - flip, tf.bool)

    def body(k, beta_k):
        beta_k = Beta(a=1.0, b=alpha)
        return k + 1, beta_k

    k = tf.constant(0)
    beta_k = Beta(a=1.0, b=alpha)
    stick_num, stick_beta = tf.while_loop(cond, body, loop_vars=[k, beta_k])
    return stick_num
Esempio n. 20
0
def mmsb(N, K, data):
    # sparsity
    rho = 0.3
    # MODEL
    # probability of belonging to each of K blocks for each node
    gamma = Dirichlet(concentration=tf.ones([K]))
    # block connectivity
    Pi = Beta(concentration0=tf.ones([K, K]), concentration1=tf.ones([K, K]))
    # probability of belonging to each of K blocks for all nodes
    Z = Multinomial(total_count=1.0, probs=gamma, sample_shape=N)
    # adjacency
    X = Bernoulli(probs=(1 - rho) *
                  tf.matmul(Z, tf.matmul(Pi, tf.transpose(Z))))

    # INFERENCE (EM algorithm)
    qgamma = PointMass(
        params=tf.nn.softmax(tf.Variable(tf.random_normal([K]))))
    qPi = PointMass(
        params=tf.nn.sigmoid(tf.Variable(tf.random_normal([K, K]))))
    qZ = PointMass(params=tf.nn.softmax(tf.Variable(tf.random_normal([N, K]))))

    #qgamma = Normal(loc=tf.get_variable("qgamma/loc", [K]),
    #                scale=tf.nn.softplus(
    #                        tf.get_variable("qgamma/scale", [K])))
    #qPi = Normal(loc=tf.get_variable("qPi/loc", [K, K]),
    #                scale=tf.nn.softplus(
    #                        tf.get_variable("qPi/scale", [K, K])))
    #qZ = Normal(loc=tf.get_variable("qZ/loc", [N, K]),
    #                scale=tf.nn.softplus(
    #                        tf.get_variable("qZ/scale", [N, K])))

    #inference = ed.KLqp({gamma: qgamma, Pi: qPi, Z: qZ}, data={X: data})
    inference = ed.MAP({gamma: qgamma, Pi: qPi, Z: qZ}, data={X: data})

    #inference.run()
    n_iter = 6000
    inference.initialize(optimizer=tf.train.AdamOptimizer(learning_rate=0.01),
                         n_iter=n_iter)

    tf.global_variables_initializer().run()

    for _ in range(inference.n_iter):
        info_dict = inference.update()
        inference.print_progress(info_dict)

    inference.finalize()
    print('qgamma after: ', qgamma.mean().eval())
    return qZ.mean().eval(), qPi.eval()
Esempio n. 21
0
def main(_):
    ed.set_seed(42)

    # DATA
    X_data, Z_true = karate("~/data")
    N = X_data.shape[0]  # number of vertices
    K = 2  # number of clusters

    # MODEL
    gamma = Dirichlet(concentration=tf.ones([K]))
    Pi = Beta(concentration0=tf.ones([K, K]), concentration1=tf.ones([K, K]))
    Z = Multinomial(total_count=1.0, probs=gamma, sample_shape=N)
    X = Bernoulli(probs=tf.matmul(Z, tf.matmul(Pi, tf.transpose(Z))))

    # INFERENCE (EM algorithm)
    qgamma = PointMass(tf.nn.softmax(tf.get_variable("qgamma/params", [K])))
    qPi = PointMass(tf.nn.sigmoid(tf.get_variable("qPi/params", [K, K])))
    qZ = PointMass(tf.nn.softmax(tf.get_variable("qZ/params", [N, K])))

    inference = ed.MAP({gamma: qgamma, Pi: qPi, Z: qZ}, data={X: X_data})
    inference.initialize(n_iter=250)

    tf.global_variables_initializer().run()

    for _ in range(inference.n_iter):
        info_dict = inference.update()
        inference.print_progress(info_dict)

    # CRITICISM
    Z_pred = qZ.mean().eval().argmax(axis=1)
    print("Result (label flip can happen):")
    print("Predicted")
    print(Z_pred)
    print("True")
    print(Z_true)
    print("Adjusted Rand Index =", adjusted_rand_score(Z_pred, Z_true))
Esempio n. 22
0
class BetaBernoulli:
    """p(x, p) = Bernoulli(x | p) * Beta(p | 1, 1)"""
    def log_prob(self, xs, zs):
        log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
        log_lik = tf.reduce_sum(bernoulli.logpmf(xs['x'], p=zs['p']))
        return log_lik + log_prior

    def sample_likelihood(self, zs):
        """x | p ~ p(x | p)"""
        return {'x': bernoulli.sample(p=tf.ones(10) * zs['p'])}


def T(xs, zs):
    return tf.reduce_mean(tf.cast(xs['x'], tf.float32))


ed.set_seed(42)
data = {'x': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])}

model = BetaBernoulli()

qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(a=qp_a, b=qp_b)

inference = ed.MFVI({'p': qp}, data, model)
inference.run(n_iter=200)

print(ed.ppc(T, data, latent_vars={'p': qp}, model_wrapper=model))
Esempio n. 23
0
"""A simple coin flipping example. Inspired by Stan's toy example.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import numpy as np
import tensorflow as tf

from edward.models import Bernoulli, Beta

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(1.0, 1.0)
x = Bernoulli(probs=p, sample_shape=10)

# INFERENCE
qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(qp_a, qp_b)

inference = ed.KLqp({p: qp}, data={x: x_data})
inference.run(n_iter=500)

print("Posterior mean of probability: {}".format(qp.mean().eval()))
Esempio n. 24
0
from __future__ import division
from __future__ import print_function

import edward as ed
import numpy as np
import tensorflow as tf

from edward.models import Bernoulli, Beta, Empirical

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(1.0, 1.0)
x = Bernoulli(probs=p, sample_shape=10)

# INFERENCE
qp = Empirical(params=tf.Variable(tf.zeros([1000]) + 0.5))

proposal_p = Beta(3.0, 9.0)

inference = ed.MetropolisHastings({p: qp}, {p: proposal_p}, data={x: x_data})
inference.run()

# CRITICISM
# exact posterior has mean 0.25 and std 0.12
sess = ed.get_session()
mean, stddev = sess.run([qp.mean(), qp.stddev()])
print("Inferred posterior mean:")
Esempio n. 25
0
 def body(k, beta_k):
     beta_k = beta_k * Beta(a=tf.constant([1.0]), b=alpha)
     return k + 1, beta_k
Esempio n. 26
0
def _test(shape, a, b, size):
    x = Beta(shape, a, b)
    val_est = tuple(get_dims(x.sample(size=size)))
    val_true = (size, ) + shape
    assert val_est == val_true
Esempio n. 27
0
def _test(shape, a, b, n):
    x = Beta(shape, a, b)
    val_est = tuple(get_dims(x.sample(n)))
    val_true = (n, ) + shape
    assert val_est == val_true
Esempio n. 28
0
    Prior: Beta
    Likelihood: Bernoulli
Variational model
    Likelihood: Mean-field Beta
"""
import edward as ed
from edward.models import Variational, Beta

model_code = """
    data {
      int<lower=0> N;
      int<lower=0,upper=1> y[N];
    }
    parameters {
      real<lower=0,upper=1> theta;
    }
    model {
      theta ~ beta(1.0, 1.0);
      for (n in 1:N)
        y[n] ~ bernoulli(theta);
    }
"""
ed.set_seed(42)
model = ed.StanModel(model_code=model_code)
variational = Variational()
variational.add(Beta())
data = ed.Data(dict(N=10, y=[0, 1, 0, 0, 0, 0, 0, 0, 0, 1]))

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=10000)
Esempio n. 29
0
Variational model
  Likelihood: Mean-field Beta
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import numpy as np
import tensorflow as tf

from edward.models import Bernoulli, Beta

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(a=1.0, b=1.0)
x = Bernoulli(p=tf.ones(10) * p)

# INFERENCE
qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(a=qp_a, b=qp_b)

data = {x: x_data}
inference = ed.MFVI({p: qp}, data)
inference.run(n_iter=500)
Esempio n. 30
0
import tensorflow as tf
import edward as ed
from edward.models import Bernoulli, Beta, Binomial, Empirical, Normal
import matplotlib.pyplot as plt
import seaborn as sns

##Single coin, multiple tosses weight inference

##Model:
theta = Beta(1.0, 1.0, sample_shape=(1, ))
x = Bernoulli(probs=tf.ones(10) * theta)
#x = Binomial(total_count=5, probs=theta) #Sampling not implemented in tf
print(theta.shape)

##Sampling:
# with tf.Session() as sess:
#     for i in range(10):
#         print(x.eval())

##Observations:
#data=tf.ones(10, dtype=tf.int32) #NOT WORKING!
data = [1, 1, 1, 1, 1, 1, 1, 1, 0, 1]

##Infer:

#Variational
#qtheta = Beta(tf.Variable(1.0), tf.Variable(1.0))  #Why need tf.Variable here?
# inference = ed.KLqp({theta: qtheta}, {x: data})
# inference.run(n_samples=5, n_iter=1000)

#MonteCarlo
Esempio n. 31
0
from __future__ import division
from __future__ import print_function

import edward as ed
import numpy as np
import tensorflow as tf

from edward.models import Bernoulli, Beta, Empirical

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(a=1.0, b=1.0)
x = Bernoulli(p=tf.ones(10) * p)

# INFERENCE
qp = Empirical(params=tf.Variable(tf.zeros([1000]) + 0.5))

proposal_p = Beta(a=3.0, b=9.0)

data = {x: x_data}
inference = ed.MetropolisHastings({p: qp}, {p: proposal_p}, data)
inference.run()

# CRITICISM
# exact posterior has mean 0.25 and std 0.12
sess = ed.get_session()
mean, std = sess.run([qp.mean(), qp.std()])
Esempio n. 32
0
#!/usr/bin/env python
"""A simple coin flipping example. Inspired by Stan's toy example.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import numpy as np
import tensorflow as tf

from edward.models import Bernoulli, Beta, PointMass

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(a=1.0, b=1.0)
x = Bernoulli(p=tf.ones(10) * p)

# INFERENCE
qp_params = tf.nn.sigmoid(tf.Variable(tf.random_normal([])))
qp = PointMass(params=qp_params)

data = {x: x_data}
inference = ed.MAP({p: qp}, data)
inference.run(n_iter=50)
Esempio n. 33
0
xtest = pd.read_csv(fpath+'xtest_'+partial_name+'.csv') # read original data

if args.fixed_a:
    partial_save_name = partial_name +'_fixed_am'+str(args.a_prior_mean).replace('.','@')
else:
    partial_save_name = partial_name +'_am'+str(args.a_prior_mean).replace('.','@')+'_as'+str(args.a_prior_std).replace('.','@')



# setup Beta IRT model #

M = irt_data.shape[0] #number of items
C = irt_data.shape[1] #number of classifiers


theta = Beta(tf.ones([C]),tf.ones([C]),sample_shape=[M],name='theta')
delta = Beta(tf.ones([M]),tf.ones([M]),sample_shape=[C],name='delta')
if args.fixed_a:
    a = tf.ones(M)*args.a_prior_mean
else:
    a = Normal(tf.ones(M)*args.a_prior_mean,tf.ones([M])*args.a_prior_std,sample_shape=[C],name='a')

model = Beta_IRT(M,C,theta,delta,a)

D = np.float32(irt_data.values)

model.init_inference(data=D,n_iter=niter)
model.fit()

# generate output files #
Esempio n. 34
0
    def cond(theta, sentence):
        return tf.cast(Bernoulli(probs=theta), tf.bool)

    def body(theta, sentence):
        return theta, a + sentence

    #sentence = tf.constant("B")
    sentence = tf.constant(0, dtype=tf.int32)
    #a = tf.constant("A")
    a = tf.constant(1, dtype=tf.int32)
    formula = tf.while_loop(cond, body, loop_vars=[theta, sentence])[1]
    return formula

N=8
theta = Beta(1.0, 1.0)
#formulas = tf.constant("", shape=(N,))+generateFormula(theta)[1]
#formulas = tf.constant(0, shape=(N,))+generateFormula(theta)[1]
#formulas = tf.stack([generateFormula(theta) for i in range(N)])
#formulas = tf.stack([generateFormula(theta), generateFormula(theta),generateFormula(theta), generateFormula(theta),
#generateFormula(theta), generateFormula(theta),generateFormula(theta), generateFormula(theta)])
formulaList=[]
for i in range(N):
    formulaList.append(generateFormula(theta))
formulas = tf.stack(formulaList)


##Sampling:
with tf.Session() as sess:
    for i in range(10):
        print(generateFormula(0.9).eval())
Esempio n. 35
0
 def body(k, beta_k):
     beta_k = beta_k * Beta(a=1.0, b=alpha)
     return k + 1, beta_k
import edward as ed
from edward.models import Bernoulli, Beta, Uniform
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

# D = np.array([1, 0, 0, 1, 0, 0, 0, 1, 0, 0])
D = np.concatenate([np.zeros(70), np.ones(30)])

p = Uniform(0., 1.)

ed_beta_binomial = Bernoulli(probs=p, sample_shape=len(D))

qp = Beta(concentration1=tf.nn.softplus(tf.get_variable("alpha", [])),
          concentration0=tf.nn.softplus(tf.get_variable("beta", []))
          )

inference = ed.KLqp({p: qp},
                    {ed_beta_binomial: D})

inference.run(n_iter=1000)

plt.hist(qp.sample(10000).eval(), bins=200)
plt.show()