Exemple #1
0
def _test(a, b, n):
    rv = Beta(a=a, b=b)
    rv_sample = rv.sample(n)
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    a = a.eval()
    b = b.eval()
    assert np.allclose(rv.log_prob(x_tf).eval(), stats.beta.logpdf(x, a, b))
def _test(a, b, n):
  rv = Beta(a=a, b=b)
  rv_sample = rv.sample(n)
  x = rv_sample.eval()
  x_tf = tf.constant(x, dtype=tf.float32)
  a = a.eval()
  b = b.eval()
  assert np.allclose(rv.log_prob(x_tf).eval(),
                     stats.beta.logpdf(x, a, b))
def _test(shape, n):
    rv = Beta(shape, alpha=tf.zeros(shape) + 0.5, beta=tf.zeros(shape) + 0.5)
    rv_sample = rv.sample(n)
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    alpha = rv.alpha.eval()
    beta = rv.beta.eval()
    for idx in range(shape[0]):
        assert np.allclose(rv.log_prob_idx((idx,), x_tf).eval(), stats.beta.logpdf(x[:, idx], alpha[idx], beta[idx]))
def _test(shape, n):
    rv = Beta(shape, alpha=tf.zeros(shape) + 0.5, beta=tf.zeros(shape) + 0.5)
    rv_sample = rv.sample(n)
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    alpha = rv.alpha.eval()
    beta = rv.beta.eval()
    for idx in range(shape[0]):
        assert np.allclose(
            rv.log_prob_idx((idx, ), x_tf).eval(),
            stats.beta.logpdf(x[:, idx], alpha[idx], beta[idx]))
def _test(shape, a, b, n):
    x = Beta(shape, a, b)
    val_est = tuple(get_dims(x.sample(n)))
    val_true = (n, ) + shape
    assert val_est == val_true
def _test(shape, a, b, size):
    x = Beta(shape, a, b)
    val_est = tuple(get_dims(x.sample(size=size)))
    val_true = (size, ) + shape
    assert val_est == val_true
import edward as ed
from edward.models import Bernoulli, Beta, Uniform
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

# D = np.array([1, 0, 0, 1, 0, 0, 0, 1, 0, 0])
D = np.concatenate([np.zeros(70), np.ones(30)])

p = Uniform(0., 1.)

ed_beta_binomial = Bernoulli(probs=p, sample_shape=len(D))

qp = Beta(concentration1=tf.nn.softplus(tf.get_variable("alpha", [])),
          concentration0=tf.nn.softplus(tf.get_variable("beta", []))
          )

inference = ed.KLqp({p: qp},
                    {ed_beta_binomial: D})

inference.run(n_iter=1000)

plt.hist(qp.sample(10000).eval(), bins=200)
plt.show()
Exemple #8
0
def _test(shape, a, b, n):
    x = Beta(shape, a, b)
    val_est = tuple(get_dims(x.sample(n)))
    val_true = (n, ) + shape
    assert val_est == val_true
Exemple #9
0
from __future__ import print_function, division, absolute_import

from data import get_value
import tensorflow as tf
import edward as ed
from edward.models import Beta, Bernoulli

theta = Beta(a=1.0, b=1.0)
# 100-dimensional Bernoulli
x = Bernoulli(p=tf.ones(12) * theta)

# ====== sampling from each marginal variables
theta_sample = theta.sample()
x_sample = x.sample()
print("Marginal theta samples:", get_value(theta_sample))
print("Marginal X samples:", get_value(x_sample))

# ====== sampling from the joint distribution
samples = get_value([x.value(), theta.value()])
print("From joint distribution:")
print("- X:", samples[0])
print("- theta:", samples[1])
class C10BetaDropout(object):
    def __init__(self, epochs, data_size, batch_size):
        self.epochs = epochs
        self.data_size = data_size
        self.batch_size = batch_size

        self.global_step = tf.Variable(initial_value=0,
                                       name='global_step',
                                       trainable=False)
        self.x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
        self.y = tf.placeholder(tf.int32, shape=(None, ))

        self.w1 = tf.get_variable(
            'w1', (5, 5, 3, 20),
            dtype=tf.float32,
            initializer=tf.contrib.layers.xavier_initializer())
        self.b1 = tf.get_variable('b1', (20, ),
                                  dtype=tf.float32,
                                  initializer=tf.constant_initializer(0.))
        self.w2 = tf.get_variable(
            'w2', (5, 5, 20, 50),
            dtype=tf.float32,
            initializer=tf.contrib.layers.xavier_initializer())
        self.b2 = tf.get_variable('b2', (50, ),
                                  dtype=tf.float32,
                                  initializer=tf.constant_initializer(0.))
        self.w3 = tf.get_variable(
            'w3', (8 * 8 * 50, 1000),
            dtype=tf.float32,
            initializer=tf.contrib.layers.xavier_initializer())
        self.b3 = tf.get_variable('b3', (1000, ),
                                  dtype=tf.float32,
                                  initializer=tf.constant_initializer(0.))
        self.w4 = tf.get_variable(
            'w4', (1000, 10),
            dtype=tf.float32,
            initializer=tf.contrib.layers.xavier_initializer())
        self.b4 = tf.get_variable('b4', (10, ),
                                  dtype=tf.float32,
                                  initializer=tf.constant_initializer(0.))

        # Prior distribution
        self.d = Beta(20., 20.)

        self.qd = Beta(tf.Variable(20., tf.float32, name='qd_a'),
                       tf.Variable(20., tf.float32, name='qd_b'))

        self.nn = lenet_dropout(self.w1, self.b1, self.w2, self.b2, self.w3,
                                self.b3, self.w4, self.b4, self.d, self.x)

        self.categorical = Categorical(self.nn)

        self.inference = ed.KLqp({
            self.d: self.qd,
        },
                                 data={self.categorical: self.y})

        self.lr = tf.train.exponential_decay(1e-4,
                                             self.global_step,
                                             10000,
                                             0.95,
                                             staircase=True)

        self.optimizer = tf.train.AdamOptimizer(self.lr)

        self.inference.initialize(optimizer=self.optimizer,
                                  global_step=self.global_step)

    def optimize(self,
                 X,
                 Y,
                 epochs,
                 batch_size,
                 X_test=None,
                 Y_test=None,
                 n_samples=10,
                 saver=None):
        print('Optimizing {} training examples'.format(self.data_size))
        losses = []
        qd_a_list = []
        qd_b_list = []
        accuracies = []

        for i in range(1, epochs + 1):
            print('Optimizing for epoch {}'.format(i))
            loss = 0
            steps = None
            for X_batch, Y_batch in mini_batch(batch_size, X, Y, shuffle=True):
                info_dict = self.inference.update(feed_dict={
                    self.x: X_batch,
                    self.y: Y_batch
                })
                loss += info_dict['loss']
                steps = info_dict['t']
            print('Loss: {}   Steps: {}'.format(loss, steps))
            losses.append(loss)

            variables_names = ['qd_a:0', 'qd_b:0']
            sess = ed.get_session()
            qd_a, qd_b = sess.run(variables_names)
            qd_a_list.append(qd_a)
            qd_b_list.append(qd_b)

            if saver is not None:
                sess = ed.get_session()
                saver.save(sess, '../checkpoint/beta_dropout.ckpt')

            if X_test is not None and Y_test is not None:
                acc = self.validate(X_test[:1000], Y_test[:1000], batch_size,
                                    n_samples)
                print('Validation: {}'.format(acc))
                accuracies.append(acc)

        print(qd_a_list)
        print(qd_b_list)

    def validate(self, X_test, Y_test, batch_size, n_samples):
        X = tf.convert_to_tensor(X_test, np.float32)
        probs = []
        for i in tqdm(range(n_samples)):
            prob = self.realize_network(X)
            probs.append(prob.eval())
        acc = 0
        for prob in probs:
            pred = np.argmax(prob, axis=1)
            acc += (pred == Y_test).sum()
        return acc / (len(X_test) * n_samples)

    def predict(self, X, batch_size, n_samples=10):
        probs = np.zeros((len(X), 10), np.float32)
        X = tf.convert_to_tensor(X, np.float32)
        for i in tqdm(range(n_samples)):
            prob = self.realize_network(X).eval()
            probs += prob
        return probs / n_samples

    def realize_network(self, x):
        sd = self.qd.sample()
        return tf.nn.softmax(
            lenet_dropout(self.w1, self.b1, self.w2, self.b2, self.w3, self.b3,
                          self.w4, self.b4, sd, x))
def _test(a, b, n):
    x = Beta(a=a, b=b)
    val_est = get_dims(x.sample(n))
    val_true = n + get_dims(a)
    assert val_est == val_true
Exemple #12
0
import tensorflow as tf
import edward as ed
from edward.models import Bernoulli, Beta, Binomial
import matplotlib.pyplot as plt
import seaborn as sns

##Single coin weight inference

##Model:
theta = Beta(1.0, 1.0)
x = Bernoulli(probs=theta)

##Sampling:
with tf.Session() as sess:
    for i in range(10):
        print(x.eval())

##Observations:
data = 1

##Infer:
qtheta = Beta(tf.Variable(1.0), tf.Variable(1.0))
inference = ed.KLqp({theta: qtheta}, {x: data})
inference.run()

##Results:
qtheta_samples = qtheta.sample(1000).eval()
print(qtheta_samples.mean())
plt.hist(qtheta_samples)
plt.show()