Exemple #1
0
import tensorflow as tf
from edward.models import InverseGamma, Normal

N = 1000

# Data generation (known mean)
mu = 7.0
sigma = 0.55
xn_data = np.random.normal(mu, sigma, N)
print('sigma={}'.format(sigma))

# Prior definition
alpha = tf.Variable(0.9, dtype=tf.float32, trainable=False)
beta = tf.Variable(0.5, dtype=tf.float32, trainable=False)

# Posterior inference
# Probabilistic model
ig = InverseGamma(alpha, beta)
xn = Normal(mu, tf.ones([N]) * tf.sqrt(ig))

# Variational model
qig = InverseGamma(tf.nn.softplus(tf.Variable(tf.random_normal([]))),
                   tf.nn.softplus(tf.Variable(tf.random_normal([]))))

# Inference
inference = ed.KLqp({ig: qig}, data={xn: xn_data})
inference.run(n_iter=2000, n_samples=150)

sess = ed.get_session()
print('Inferred sigma={}'.format(sess.run(tf.sqrt(qig.mean()))))
Exemple #2
0
plt.title("Simulated dataset")
plt.show()

K = 2
D = 2
model = MixtureGaussian(K, D)

qpi_alpha = tf.nn.softplus(tf.Variable(tf.random_normal([K])))
qmu_mu = tf.Variable(tf.random_normal([K * D]))
qmu_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))
qsigma_alpha = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))
qsigma_beta = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))

qpi = Dirichlet(alpha=qpi_alpha)
qmu = Normal(mu=qmu_mu, sigma=qmu_sigma)
qsigma = InverseGamma(alpha=qsigma_alpha, beta=qsigma_beta)

data = {'x': x_train}
inference = ed.MFVI({'pi': qpi, 'mu': qmu, 'sigma': qsigma}, data, model)
inference.run(n_iter=2500, n_samples=10, n_minibatch=20)

# Average per-cluster and per-data point likelihood over many posterior samples.
log_liks = []
for s in range(100):
    zrep = {
        'pi': qpi.sample(()),
        'mu': qmu.sample(()),
        'sigma': qsigma.sample(())
    }
    log_liks += [model.predict(data, zrep)]
Exemple #3
0
        k = np.argmax(np.random.multinomial(1, pi))
        x[n, :] = np.random.multivariate_normal(mus[k], np.diag(stds[k]))
        C.append(k)
    return x, np.array(C)


N = 500  # number of data points
K = 3  # number of components
D = 2  # dimensionality of data
ed.set_seed(42)

x_train, C = build_toy_dataset(N)

pi = Dirichlet(tf.ones(K))
mu = Normal(tf.zeros(D), tf.ones(D), sample_shape=K)
sigmasq = InverseGamma(tf.ones(D), tf.ones(D), sample_shape=K)
x = ParamMixture(pi, {
    'loc': mu,
    'scale_diag': tf.sqrt(sigmasq)
},
                 MultivariateNormalDiag,
                 sample_shape=N)
z = x.cat

T = 500  # number of MCMC samples
qpi = Empirical(tf.Variable(tf.ones([T, K]) / K))
qmu = Empirical(tf.Variable(tf.zeros([T, K, D])))
qsigmasq = Empirical(tf.Variable(tf.ones([T, K, D])))
qz = Empirical(tf.Variable(tf.zeros([T, N], dtype=tf.int32)))

inference = ed.Gibbs({
def main(_):
    # Generate data
    true_mu = np.array([-1.0, 0.0, 1.0], np.float32) * 10
    true_sigmasq = np.array([1.0**2, 2.0**2, 3.0**2], np.float32)
    true_pi = np.array([0.2, 0.3, 0.5], np.float32)
    N = 10000
    K = len(true_mu)
    true_z = np.random.choice(np.arange(K), size=N, p=true_pi)
    x_data = true_mu[true_z] + np.random.randn(N) * np.sqrt(
        true_sigmasq[true_z])

    # Prior hyperparameters
    pi_alpha = np.ones(K, dtype=np.float32)
    mu_sigma = np.std(true_mu)
    sigmasq_alpha = 1.0
    sigmasq_beta = 2.0

    # Model
    pi = Dirichlet(pi_alpha)
    mu = Normal(0.0, mu_sigma, sample_shape=K)
    sigmasq = InverseGamma(sigmasq_alpha, sigmasq_beta, sample_shape=K)
    x = ParamMixture(pi, {
        'loc': mu,
        'scale': tf.sqrt(sigmasq)
    },
                     Normal,
                     sample_shape=N)
    z = x.cat

    # Conditionals
    mu_cond = ed.complete_conditional(mu)
    sigmasq_cond = ed.complete_conditional(sigmasq)
    pi_cond = ed.complete_conditional(pi)
    z_cond = ed.complete_conditional(z)

    sess = ed.get_session()

    # Initialize randomly
    pi_est, mu_est, sigmasq_est, z_est = sess.run([pi, mu, sigmasq, z])

    print('Initial parameters:')
    print('pi:', pi_est)
    print('mu:', mu_est)
    print('sigmasq:', sigmasq_est)
    print()

    # Gibbs sampler
    cond_dict = {
        pi: pi_est,
        mu: mu_est,
        sigmasq: sigmasq_est,
        z: z_est,
        x: x_data
    }
    t0 = time()
    T = 500
    for t in range(T):
        z_est = sess.run(z_cond, cond_dict)
        cond_dict[z] = z_est
        pi_est, mu_est = sess.run([pi_cond, mu_cond], cond_dict)
        cond_dict[pi] = pi_est
        cond_dict[mu] = mu_est
        sigmasq_est = sess.run(sigmasq_cond, cond_dict)
        cond_dict[sigmasq] = sigmasq_est
    print('took %.3f seconds to run %d iterations' % (time() - t0, T))

    print()
    print('Final sample for parameters::')
    print('pi:', pi_est)
    print('mu:', mu_est)
    print('sigmasq:', sigmasq_est)
    print()

    print()
    print('True parameters:')
    print('pi:', true_pi)
    print('mu:', true_mu)
    print('sigmasq:', true_sigmasq)
    print()

    plt.figure(figsize=[10, 10])
    plt.subplot(2, 1, 1)
    plt.hist(x_data, 50)
    plt.title('Empirical Distribution of $x$')
    plt.xlabel('$x$')
    plt.ylabel('frequency')
    xl = plt.xlim()
    plt.subplot(2, 1, 2)
    plt.hist(sess.run(x, {pi: pi_est, mu: mu_est, sigmasq: sigmasq_est}), 50)
    plt.title("Predictive distribution $p(x \mid \mathrm{inferred }\ "
              "\pi, \mu, \sigma^2)$")
    plt.xlabel('$x$')
    plt.ylabel('frequency')
    plt.xlim(xl)
    plt.show()
Exemple #5
0
            tf.expand_dims(hpos, axis=1)  # shape=(N, 1, 2)
        )  # shape=(N, M, 2)
    )
    distance_factor = tf.divide(1., euclidean_distance)  # shape=(N, M, 2)
    mean = tf.reduce_sum(distance_factor, axis=(0, ))  # shape=(M, 2)
    return mean


# (x, y) ~ Normal([0.5, 0.5], [0.5, 0.5])
galaxies_pos = Normal(mu=tf.fill([nb_datapoints, nb_features], 0.5),
                      sigma=tf.fill([nb_datapoints, nb_features], POS_STD))

# latent variable z
mu = Normal(mu=tf.fill([nb_components, nb_features], 0.5),
            sigma=tf.fill([nb_components, nb_features], POS_STD))
sigma = InverseGamma(alpha=tf.ones([nb_components, nb_features]),
                     beta=tf.ones([nb_components, nb_features]))
cat = Categorical(logits=tf.zeros([nb_datapoints, nb_components]))
components = [
    MultivariateNormalDiag(mu=calculte_mean_from_distance_factor(
        galaxies_pos, mu[k]),
                           diag_stdev=tf.ones([nb_datapoints, 1]) * sigma[k])
    for k in range(nb_components)
]
x = Mixture(cat=cat, components=components)

# ====== inference ====== #
qmu = Normal(mu=tf.Variable(
    tf.random_normal([nb_components, nb_features], mean=0., stddev=0.5)),
             sigma=tf.nn.softplus(
                 tf.Variable(tf.zeros([nb_components, nb_features]))))
qsigma = InverseGamma(alpha=tf.nn.softplus(
    return x


N = 500  # number of data points
K = 2  # number of components
D = 2  # dimensionality of data
ed.set_seed(42)

# DATA
x_data = build_toy_dataset(N)

# MODEL
pi = Dirichlet(concentration=tf.constant([1.0] * K))
mu = Normal(loc=tf.zeros([K, D]), scale=tf.ones([K, D]))
sigma = InverseGamma(concentration=tf.ones([K, D]), rate=tf.ones([K, D]))
c = Categorical(
    logits=tf.tile(tf.reshape(tf.log(pi) - tf.log(1.0 - pi), [1, K]), [N, 1]))
x = Normal(loc=tf.gather(mu, c), scale=tf.gather(sigma, c))

# INFERENCE
T = 5000
qpi = Empirical(params=tf.Variable(tf.ones([T, K]) / K))
qmu = Empirical(params=tf.Variable(tf.zeros([T, K, D])))
qsigma = Empirical(params=tf.Variable(tf.ones([T, K, D])))
qc = Empirical(params=tf.Variable(tf.zeros([T, N], dtype=tf.int32)))

gpi = Dirichlet(concentration=tf.constant([1.4, 1.6]))
gmu = Normal(loc=tf.constant([[1.0, 1.0], [-1.0, -1.0]]),
             scale=tf.constant([[0.5, 0.5], [0.5, 0.5]]))
gsigma = InverseGamma(concentration=tf.constant([[1.1, 1.1], [1.1, 1.1]]),
Exemple #7
0
import tensorflow as tf

from edward.models import InverseGamma, Normal, Empirical

N = 1000

# Data generation (known mean)
loc = 7.0
scale = 0.7
xn_data = np.random.normal(loc, scale, N)
print('scale={}'.format(scale))

# Prior definition
alpha = tf.Variable(0.5, trainable=False)
beta = tf.Variable(0.7, trainable=False)

# Posterior inference
# Probabilistic model
ig = InverseGamma(alpha, beta)
xn = Normal(loc, tf.ones([N]) * tf.sqrt(ig))

# Inference
qig = Empirical(params=tf.Variable(tf.zeros(1000) + 0.5))
proposal_ig = InverseGamma(2.0, 2.0)
inference = ed.MetropolisHastings({ig: qig}, {ig: proposal_ig},
                                  data={xn: xn_data})
inference.run()

sess = ed.get_session()
print('Inferred scale={}'.format(sess.run(tf.sqrt(qig.mean()))))
Exemple #8
0
def main(_):
    ed.set_seed(42)

    # DATA
    x_data = build_toy_dataset(FLAGS.N)

    # MODEL
    pi = Dirichlet(concentration=tf.ones(FLAGS.K))
    mu = Normal(0.0, 1.0, sample_shape=[FLAGS.K, FLAGS.D])
    sigma = InverseGamma(concentration=1.0,
                         rate=1.0,
                         sample_shape=[FLAGS.K, FLAGS.D])
    c = Categorical(logits=tf.log(pi) - tf.log(1.0 - pi), sample_shape=FLAGS.N)
    x = Normal(loc=tf.gather(mu, c), scale=tf.gather(sigma, c))

    # INFERENCE
    qpi = Empirical(
        params=tf.get_variable("qpi/params", [FLAGS.T, FLAGS.K],
                               initializer=tf.constant_initializer(1.0 /
                                                                   FLAGS.K)))
    qmu = Empirical(
        params=tf.get_variable("qmu/params", [FLAGS.T, FLAGS.K, FLAGS.D],
                               initializer=tf.zeros_initializer()))
    qsigma = Empirical(
        params=tf.get_variable("qsigma/params", [FLAGS.T, FLAGS.K, FLAGS.D],
                               initializer=tf.ones_initializer()))
    qc = Empirical(params=tf.get_variable("qc/params", [FLAGS.T, FLAGS.N],
                                          initializer=tf.zeros_initializer(),
                                          dtype=tf.int32))

    gpi = Dirichlet(concentration=tf.constant([1.4, 1.6]))
    gmu = Normal(loc=tf.constant([[1.0, 1.0], [-1.0, -1.0]]),
                 scale=tf.constant([[0.5, 0.5], [0.5, 0.5]]))
    gsigma = InverseGamma(concentration=tf.constant([[1.1, 1.1], [1.1, 1.1]]),
                          rate=tf.constant([[1.0, 1.0], [1.0, 1.0]]))
    gc = Categorical(logits=tf.zeros([FLAGS.N, FLAGS.K]))

    inference = ed.MetropolisHastings(latent_vars={
        pi: qpi,
        mu: qmu,
        sigma: qsigma,
        c: qc
    },
                                      proposal_vars={
                                          pi: gpi,
                                          mu: gmu,
                                          sigma: gsigma,
                                          c: gc
                                      },
                                      data={x: x_data})

    inference.initialize()

    sess = ed.get_session()
    tf.global_variables_initializer().run()

    for _ in range(inference.n_iter):
        info_dict = inference.update()
        inference.print_progress(info_dict)

        t = info_dict['t']
        if t == 1 or t % inference.n_print == 0:
            qpi_mean, qmu_mean = sess.run([qpi.mean(), qmu.mean()])
            print("")
            print("Inferred membership probabilities:")
            print(qpi_mean)
            print("Inferred cluster means:")
            print(qmu_mean)
Exemple #9
0
    def __init__(self, n, xdim, n_mixtures=5, mc_samples=500):
        # Compute the shape dynamically from placeholders
        self.x_ph = tf.placeholder(tf.float32, [None, xdim])
        self.k = k = n_mixtures
        self.batch_size = n
        self.d = d = xdim
        self.sample_size = tf.placeholder(tf.int32, ())

        # Build the priors over membership probabilities and mixture parameters
        with tf.variable_scope("priors"):
            pi = Dirichlet(tf.ones(k))

            mu = Normal(tf.zeros(d), tf.ones(d), sample_shape=k)
            sigmasq = InverseGamma(tf.ones(d), tf.ones(d), sample_shape=k)

        # Build the conditional mixture model
        with tf.variable_scope("likelihood"):
            x = ParamMixture(pi, {'loc': mu, 'scale_diag': tf.sqrt(sigmasq)},
                             MultivariateNormalDiag,
                             sample_shape=n)
            z = x.cat

        # Build approximate posteriors as Empirical samples
        t = mc_samples
        with tf.variable_scope("posteriors_samples"):
            qpi = Empirical(tf.get_variable(
                "qpi/params", [t, k],
                initializer=tf.constant_initializer(1.0 / k)))
            qmu = Empirical(tf.get_variable(
                "qmu/params", [t, k, d],
                initializer=tf.zeros_initializer()))
            qsigmasq = Empirical(tf.get_variable(
                "qsigmasq/params", [t, k, d],
                initializer=tf.ones_initializer()))
            qz = Empirical(tf.get_variable(
                "qz/params", [t, n],
                initializer=tf.zeros_initializer(),
                dtype=tf.int32))

        # Build inference graph using Gibbs and conditionals
        with tf.variable_scope("inference"):
            self.inference = ed.Gibbs({
                pi: qpi,
                mu: qmu,
                sigmasq: qsigmasq,
                z: qz
            }, data={
                x: self.x_ph
            })
            self.inference.initialize()

        # Build predictive posterior graph by taking samples
        n_samples = self.sample_size
        with tf.variable_scope("posterior"):
            mu_smpl = qmu.sample(n_samples) # shape: [1, 100, k, d]
            sigmasq_smpl = qsigmasq.sample(n_samples)

            x_post = Normal(
                loc=tf.ones((n, 1, 1, 1)) * mu_smpl,
                scale=tf.ones((n, 1, 1, 1)) * tf.sqrt(sigmasq_smpl)
            )
            # NOTE: x_ph has shape [n, d]
            x_broadcasted = tf.tile(
                tf.reshape(self.x_ph, (n, 1, 1, d)),
                (1, n_samples, k, 1)
            )

            x_ll = x_post.log_prob(x_broadcasted)
            x_ll = tf.reduce_sum(x_ll, axis=3)
            x_ll = tf.reduce_mean(x_ll, axis=1)

        self.sample_t_ph = tf.placeholder(tf.int32, ())
        self.eval_ops = {
            'generative_post': x_post,
            'qmu': qmu,
            'qsigma': qsigma,
            'post_running_mu': tf.reduce_mean(
                qmu.params[:self.sample_t_ph],
                axis=0
            )
            'post_log_prob': xll
        }
  return x


N = 500  # number of data points
K = 2  # number of components
D = 2  # dimensionality of data
ed.set_seed(42)

# DATA
x_data = build_toy_dataset(N)

# MODEL
pi = Dirichlet(alpha=tf.constant([1.0] * K))
mu = Normal(mu=tf.zeros([K, D]), sigma=tf.ones([K, D]))
sigma = InverseGamma(alpha=tf.ones([K, D]), beta=tf.ones([K, D]))
c = Categorical(logits=tf.tile(tf.reshape(ed.logit(pi), [1, K]), [N, 1]))
x = Normal(mu=mu[c], sigma=sigma[c])

# INFERENCE
T = 5000
qpi = Empirical(params=tf.Variable(tf.ones([T, K]) / K))
qmu = Empirical(params=tf.Variable(tf.zeros([T, K, D])))
qsigma = Empirical(params=tf.Variable(tf.ones([T, K, D])))
qc = Empirical(params=tf.Variable(tf.zeros([T, N], dtype=tf.int32)))

gpi = Dirichlet(alpha=tf.constant([1.4, 1.6]))
gmu = Normal(mu=tf.constant([[1.0, 1.0], [-1.0, -1.0]]),
             sigma=tf.constant([[0.5, 0.5], [0.5, 0.5]]))
gsigma = InverseGamma(alpha=tf.constant([[1.1, 1.1], [1.1, 1.1]]),
                      beta=tf.constant([[1.0, 1.0], [1.0, 1.0]]))
N = 500  # number of data points
K = 2  # number of components
D = 2  # dimensionality of data
ed.set_seed(42)

# DATA
x_train = build_toy_dataset(N)
plt.scatter(x_train[:, 0], x_train[:, 1])
plt.axis([-3, 3, -3, 3])
plt.title("Simulated dataset")
plt.show()

# MODEL
mu = Normal(mu=tf.zeros([K, D]), sigma=tf.ones([K, D]))
sigma = InverseGamma(alpha=tf.ones([K, D]), beta=tf.ones([K, D]))
cat = Categorical(logits=tf.zeros([N, K]))
components = [
    MultivariateNormalDiag(mu=tf.ones([N, 1]) * tf.gather(mu, k),
                           diag_stdev=tf.ones([N, 1]) * tf.gather(sigma, k))
    for k in range(K)
]
x = Mixture(cat=cat, components=components)

# INFERENCE
qmu = Normal(mu=tf.Variable(tf.random_normal([K, D])),
             sigma=tf.nn.softplus(tf.Variable(tf.zeros([K, D]))))
qsigma = InverseGamma(alpha=tf.nn.softplus(
    tf.Variable(tf.random_normal([K, D]))),
                      beta=tf.nn.softplus(tf.Variable(tf.random_normal([K,
                                                                        D]))))
Exemple #12
0
true_pi = np.array([0.2, 0.3, 0.5], np.float32)
N = 10000
K = len(true_mu)
true_z = np.random.choice(np.arange(K), size=N, p=true_pi)
x_data = true_mu[true_z] + np.random.randn(N) * np.sqrt(true_sigmasq[true_z])

# Prior hyperparameters
pi_alpha = np.ones(K, dtype=np.float32)
mu_sigma = np.std(true_mu)
sigmasq_alpha = 1.0
sigmasq_beta = 2.0

# Model
pi = Dirichlet(pi_alpha)
mu = Normal(0.0, mu_sigma, sample_shape=K)
sigmasq = InverseGamma(sigmasq_alpha, sigmasq_beta, sample_shape=K)
x = ParamMixture(pi, {
    'loc': mu,
    'scale': tf.sqrt(sigmasq)
},
                 Normal,
                 sample_shape=N)
z = x.cat

# Conditionals
mu_cond = ed.complete_conditional(mu)
sigmasq_cond = ed.complete_conditional(sigmasq)
pi_cond = ed.complete_conditional(pi)
z_cond = ed.complete_conditional(z)

sess = ed.get_session()