def build_toy_dataset(coeff, n_data=40, n_data_test=20, noise_std=0.1):
    ed.set_seed(0)
    n_dim = len(coeff)
    x = np.random.randn(n_data + n_data_test, n_dim)
    y = np.dot(x, coeff) + norm.rvs(0, noise_std, size=(n_data + n_data_test))
    y = y.reshape((n_data + n_data_test, 1))

    data = np.concatenate((y[:n_data, :], x[:n_data, :]), axis=1)
    data = tf.constant(data, dtype=tf.float32)

    data_test = np.concatenate((y[n_data:, :], x[n_data:, :]), axis=1)
    data_test = tf.constant(data_test, dtype=tf.float32)
    return ed.Data(data), ed.Data(data_test)
def main():
    data = ed.Data(np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1]))
    model = BetaBernoulli()
    variational = Variational()
    variational.add(Beta())

    # mean-field variational inference.
    inference = ed.MFVI(model, variational, data)

    inference.run(n_iter=10000)
def build_toy_dataset(n_data=40, noise_std=0.1):
    ed.set_seed(0)
    x = np.concatenate(
        [np.linspace(0, 2, num=n_data / 2),
         np.linspace(6, 8, num=n_data / 2)])
    y = 0.075 * x + norm.rvs(0, noise_std, size=n_data)
    x = (x - 4.0) / 4.0
    x = x.reshape((n_data, 1))
    y = y.reshape((n_data, 1))
    data = np.concatenate((y, x), axis=1)  # n_data x 2
    data = tf.constant(data, dtype=tf.float32)
    return ed.Data(data)
Exemple #4
0
def build_toy_dataset(n_data=40, noise_std=0.1):
    ed.set_seed(0)
    D = 1
    x = np.linspace(-3, 3, num=n_data)
    y = np.tanh(x) + norm.rvs(0, noise_std, size=n_data)
    y[y < 0.5] = 0
    y[y >= 0.5] = 1
    x = (x - 4.0) / 4.0
    x = x.reshape((n_data, D))
    y = y.reshape((n_data, 1))
    data = np.concatenate((y, x), axis=1)  # n_data x (D+1)
    data = tf.constant(data, dtype=tf.float32)
    return ed.Data(data)
Exemple #5
0
    Prior: Beta
    Likelihood: Bernoulli
Variational model
    Likelihood: Mean-field Beta
"""
import edward as ed
from edward.models import Variational, Beta

model_code = """
    data {
      int<lower=0> N;
      int<lower=0,upper=1> y[N];
    }
    parameters {
      real<lower=0,upper=1> theta;
    }
    model {
      theta ~ beta(1.0, 1.0);
      for (n in 1:N)
        y[n] ~ bernoulli(theta);
    }
"""
ed.set_seed(42)
model = ed.StanModel(model_code=model_code)
variational = Variational()
variational.add(Beta())
data = ed.Data(dict(N=10, y=[0, 1, 0, 0, 0, 0, 0, 0, 0, 1]))

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=10000)
Exemple #6
0
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)), 1)
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b), 1)

        # Loop over each mini-batch zs[b,:]
        log_lik = []
        n_minibatch = get_dims(zs[0])[0]
        for s in range(n_minibatch):
            log_lik_z = N*tf.reduce_sum(tf.log(pi), 1)
            for k in range(self.K):
                log_lik_z += tf.reduce_sum(multivariate_normal.logpdf(xs,
                    mus[s, (k*self.D):((k+1)*self.D)],
                    sigmas[s, (k*self.D):((k+1)*self.D)]))

            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)

ed.set_seed(42)
x = np.loadtxt('data/mixture_data.txt', dtype='float32', delimiter=',')
data = ed.Data(tf.constant(x, dtype=tf.float32))

model = MixtureGaussian(K=2, D=2)
variational = Variational()
variational.add(Dirichlet(model.K))
variational.add(Normal(model.K*model.D))
variational.add(InvGamma(model.K*model.D))

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=500, n_minibatch=5, n_data=5)
Exemple #7
0
def test_ndarray_multiple_samples():
    data_ndarray = ed.Data(np.array(data))
    _test(data_ndarray, 2, _assert_eq_ndarray)
Exemple #8
0
def test_ndarray_single_sample():
    data_ndarray = ed.Data(np.array(data))
    _test(data_ndarray, 1, _assert_eq_ndarray)
Exemple #9
0
def test_tf_multiple_samples():
    data_tf = ed.Data(tf.constant(data, dtype=tf.float32), shuffled=True)
    _test(data_tf, 2, _assert_eq_tf)
Exemple #10
0
def test_tf_single_sample():
    data_tf = ed.Data(tf.constant(data, dtype=tf.float32), shuffled=True)
    _test(data_tf, 1, _assert_eq_tf)
Exemple #11
0
        np.sin(0.75 * y_data) * 7.0 + y_data * 0.5 + r_data * 1.0)
    return train_test_split(x_data, y_data, random_state=42)


ed.set_seed(42)
model = MixtureDensityNetwork(10)

X_train, X_test, y_train, y_test = build_toy_dataset()
print("Size of features in training data: {:s}".format(X_train.shape))
print("Size of output in training data: {:s}".format(y_train.shape))
print("Size of features in test data: {:s}".format(X_test.shape))
print("Size of output in test data: {:s}".format(y_test.shape))

X = tf.placeholder(tf.float32, shape=(None, 1))
y = tf.placeholder(tf.float32, shape=(None, 1))
data = ed.Data([X, y])

inference = ed.MAP(model, data)
sess = tf.Session()
K.set_session(sess)
inference.initialize(sess=sess)

NEPOCH = 20
train_loss = np.zeros(NEPOCH)
test_loss = np.zeros(NEPOCH)
for i in range(NEPOCH):
    _, train_loss[i] = sess.run([inference.train, inference.loss],
                                feed_dict={
                                    X: X_train,
                                    y: y_train
                                })
# mini-batch,
# q(z_{batch} | x) = prod_{m=1}^{n_data} Normal(z_m | mu, sigma = phi(x))
variational = Variational()
Normal.mapping = mapping
Normal.num_local_vars = model.num_vars
variational.add(Normal(model.num_vars * FLAGS.n_data))

if not os.path.exists(FLAGS.data_directory):
    os.makedirs(FLAGS.data_directory)

mnist = input_data.read_data_sets(FLAGS.data_directory, one_hot=True)

# data uses placeholder in order to build inference's computational
# graph. np.arrays of data are fed in during computation.
x = tf.placeholder(tf.float32, [FLAGS.n_data, 28 * 28])
data = ed.Data(x)

sess = ed.get_session()
inference = ed.MFVI(model, variational, data)
with tf.variable_scope("model") as scope:
    inference.initialize(optimizer="PrettyTensor")
with tf.variable_scope("model", reuse=True) as scope:
    p_rep = model.sample_prior(FLAGS.n_data)

n_epoch = 100
n_iter_per_epoch = 1000
for epoch in range(n_epoch):
    avg_loss = 0.0

    widgets = ["epoch #%d|" % epoch, Percentage(), Bar(), ETA()]
    pbar = ProgressBar(n_iter_per_epoch, widgets=widgets)
Exemple #13
0
from edward.stats import norm


class NormalModel:
    """
    p(x, z) = Normal(x; z, Sigma)Normal(z; mu, Sigma)
    """
    def __init__(self, mu, Sigma):
        self.mu = mu
        self.Sigma = Sigma
        self.num_vars = 1

    def log_prob(self, xs, zs):
        log_prior = tf.pack([norm.logpdf(z, mu, Sigma) for z in tf.unpack(zs)])
        log_lik = tf.pack(
            [tf.reduce_sum(norm.logpdf(xs, z, Sigma)) for z in tf.unpack(zs)])
        return log_lik + log_prior


ed.set_seed(42)
mu = tf.constant(3.0)
Sigma = tf.constant(0.1)
model = NormalModel(mu, Sigma)
data = ed.Data(
    tf.constant((3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0,
                 1, 0, 0, 0, 0, 0, 0, 0, 1),
                dtype=tf.float32))

inference = ed.MAP(model, data)
inference.run(n_iter=200, n_print=50)
Exemple #14
0
"""
A simple coin flipping example. The model is written in PyMC3.
Inspired by Stan's toy example.

Probability model
    Prior: Beta
    Likelihood: Bernoulli
Variational model
    Likelihood: Mean-field Beta
"""
import edward as ed
import pymc3 as pm
import numpy as np
import theano

from edward.models import PyMC3Model, Variational, Beta

data_shared = theano.shared(np.zeros(1))

with pm.Model() as model:
    beta = pm.Beta('beta', 1, 1, transform=None)
    out = pm.Bernoulli('data', beta, observed=data_shared)

data = ed.Data(np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1]))
m = PyMC3Model(model, data_shared)
variational = Variational()
variational.add(Beta())

inference = ed.MFVI(m, variational, data)
inference.run(n_iter=10000)