Exemplo n.º 1
0
# we like to calculate posterior p(\theta|x) where \theta=[mu_1,..., mu_3, sigma_1,...,sigma_3, z_1,...,z_3]

# Model
pi = Dirichlet(np.ones(K, np.float32))
mu = Normal(0.0, 9.0, sample_shape=[K])
sigma = InverseGamma(1.0, 1.0, sample_shape=[K])

c = Categorical(logits=tf.log(pi) - tf.log(1.0 - pi), sample_shape=N)
ed_x = Normal(loc=tf.gather(mu, c), scale=tf.gather(sigma, c))

# parameters
q_pi = Dirichlet(
    tf.nn.softplus(
        tf.get_variable("qpi", [K],
                        initializer=tf.constant_initializer(1.0 / K))))
q_mu = Normal(loc=tf.get_variable("qmu", [K]), scale=1.0)
q_sigma = Normal(loc=tf.nn.softplus(tf.get_variable("qsigma", [K])), scale=1.0)

inference = ed.KLqp(latent_vars={
    mu: q_mu,
    sigma: q_sigma
}, data={ed_x: x})  # this will fail if we include qpi: pi

inference.run(n_iter=1000)

print q_pi.value().eval()

print q_mu.value().eval()
print q_sigma.value().eval()
Exemplo n.º 2
0
We build a random variable whose size depends on a sample from another
random variable.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import tensorflow as tf

from edward.models import Exponential, Dirichlet, Gamma

ed.set_seed(42)

# Prior on scalar hyperparameter to Dirichlet.
alpha = Gamma(alpha=1.0, beta=1.0)

# Prior on size of Dirichlet.
n = 1 + tf.cast(Exponential(lam=0.5), tf.int32)

# Build a vector of ones whose size is n; multiply it by alpha.
p = Dirichlet(alpha=tf.ones([n]) * alpha)

sess = ed.get_session()
print(sess.run(p.value()))
# [ 0.01012419  0.02939712  0.05036638  0.51287931  0.31020424  0.0485355
#   0.0384932 ]
print(sess.run(p.value()))
# [ 0.12836078  0.23335715  0.63828212]