def main():
    data = ed.Data(np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1]))
    model = BetaBernoulli()
    variational = Variational()
    variational.add(Beta())

    # mean-field variational inference.
    inference = ed.MFVI(model, variational, data)

    inference.run(n_iter=10000)
Ejemplo n.º 2
0
    def __init__(self, model, data=Data(), transform=tf.identity):
        if hasattr(model, 'num_vars'):
            variational = Variational()
            variational.add(PointMass(model.num_vars, transform))
        else:
            variational = Variational()
            variational.add(PointMass(0, transform))

        VariationalInference.__init__(self, model, variational, data)
Ejemplo n.º 3
0
    def _test(self, sess, data, n_minibatch, x=None, is_file=False):
        model = NormalModel()
        variational = Variational()
        variational.add(Normal())

        inference = ed.MFVI(model, variational, data)
        inference.initialize(n_minibatch=n_minibatch)

        if x is not None:
            # Placeholder setting.
            # Check data is same as data fed to it.
            feed_dict = {inference.data['x']: x}
            # avoid directly fetching placeholder
            data_id = {
                k: tf.identity(v)
                for k, v in six.iteritems(inference.data)
            }
            val = sess.run(data_id, feed_dict)
            assert np.all(val['x'] == x)
        elif is_file:
            # File reader setting.
            # Check data varies by session run.
            val = sess.run(inference.data)
            val_1 = sess.run(inference.data)
            assert not np.all(val['x'] == val_1['x'])
        elif n_minibatch is None:
            # Preloaded full setting.
            # Check data is full data.
            val = sess.run(inference.data)
            assert np.all(val['x'] == data['x'])
        elif n_minibatch == 1:
            # Preloaded batch setting, with n_minibatch=1.
            # Check data is randomly shuffled.
            assert not np.all([
                sess.run(inference.data)['x'] == data['x'][i]
                for i in range(10)
            ])
        else:
            # Preloaded batch setting.
            # Check data is randomly shuffled.
            val = sess.run(inference.data)
            assert not np.all(val['x'] == data['x'][:n_minibatch])
            # Check data varies by session run.
            val_1 = sess.run(inference.data)
            assert not np.all(val['x'] == val_1['x'])

        inference.finalize()
Ejemplo n.º 4
0
    def __init__(self, model, data=None, params=None):
        with tf.variable_scope("variational"):
            if hasattr(model, 'n_vars'):
                variational = Variational()
                variational.add(PointMass(model.n_vars, params))
            else:
                variational = Variational()
                variational.add(PointMass(0))

        super(MAP, self).__init__(model, variational, data)
Ejemplo n.º 5
0
def _test(data, n_data, x=None, is_file=False):
    sess = ed.get_session()
    model = NormalModel()
    variational = Variational()
    variational.add(Normal())

    inference = ed.MFVI(model, variational, data)
    inference.initialize(n_data=n_data)

    if x is not None:
        # Placeholder setting.
        # Check data is same as data fed to it.
        feed_dict = {inference.data['x']: x}
        # avoid directly fetching placeholder
        data_id = {k: tf.identity(v) for k,v in
                   six.iteritems(inference.data)}
        val = sess.run(data_id, feed_dict)
        assert np.all(val['x'] == x)
    elif is_file:
        # File reader setting.
        # Check data varies by session run.
        val = sess.run(inference.data)
        val_1 = sess.run(inference.data)
        assert not np.all(val['x'] == val_1['x'])
    elif n_data is None:
        # Preloaded full setting.
        # Check data is full data.
        val = sess.run(inference.data)
        assert np.all(val['x'] == data['x'])
    else:
        # Preloaded batch setting.
        # Check data is randomly shuffled.
        val = sess.run(inference.data)
        assert not np.all(val['x'] == data['x'][:n_data])
        # Check data varies by session run.
        val_1 = sess.run(inference.data)
        assert not np.all(val['x'] == val_1['x'])

    inference.finalize()
    sess.close()
    del sess
    tf.reset_default_graph()
Ejemplo n.º 6
0
    def _test(self, sess, data, n_minibatch, x=None, is_file=False):
        model = NormalModel()
        variational = Variational()
        variational.add(Normal())

        inference = ed.MFVI(model, variational, data)
        inference.initialize(n_minibatch=n_minibatch)

        if x is not None:
            # Placeholder setting.
            # Check data is same as data fed to it.
            feed_dict = {inference.data['x']: x}
            # avoid directly fetching placeholder
            data_id = {k: tf.identity(v) for k,v in
                       six.iteritems(inference.data)}
            val = sess.run(data_id, feed_dict)
            assert np.all(val['x'] == x)
        elif is_file:
            # File reader setting.
            # Check data varies by session run.
            val = sess.run(inference.data)
            val_1 = sess.run(inference.data)
            assert not np.all(val['x'] == val_1['x'])
        elif n_minibatch is None:
            # Preloaded full setting.
            # Check data is full data.
            val = sess.run(inference.data)
            assert np.all(val['x'] == data['x'])
        elif n_minibatch == 1:
            # Preloaded batch setting, with n_minibatch=1.
            # Check data is randomly shuffled.
            assert not np.all([sess.run(inference.data)['x'] == data['x'][i] for i in range(10)])
        else:
            # Preloaded batch setting.
            # Check data is randomly shuffled.
            val = sess.run(inference.data)
            assert not np.all(val['x'] == data['x'][:n_minibatch])
            # Check data varies by session run.
            val_1 = sess.run(inference.data)
            assert not np.all(val['x'] == val_1['x'])

        inference.finalize()
Ejemplo n.º 7
0
    def __init__(self, model, data=Data(), transform=tf.identity):
        if hasattr(model, 'num_vars'):
            variational = Variational()
            variational.add(PointMass(model.num_vars, transform))
        else:
            variational = Variational()
            variational.add(PointMass(0, transform))

        VariationalInference.__init__(self, model, variational, data)
Ejemplo n.º 8
0
    def __init__(self, model, data=Data(), params=None):
        if hasattr(model, 'num_vars'):
            variational = Variational()
            variational.add(PointMass(model.num_vars, params))
        else:
            variational = Variational()
            variational.add(PointMass(0))

        VariationalInference.__init__(self, model, variational, data)
Ejemplo n.º 9
0
    stds = [[0.1, 0.1], [0.1, 0.1]]
    x = np.zeros((N, 2), dtype=np.float32)
    for n in range(N):
        k = np.argmax(np.random.multinomial(1, pi))
        x[n, :] = np.random.multivariate_normal(mus[k], np.diag(stds[k]))

    return {'x': x}


ed.set_seed(42)
data = build_toy_dataset(500)
plt.scatter(data['x'][:, 0], data['x'][:, 1])
plt.axis([-3, 3, -3, 3])
plt.title("Simulated dataset")
plt.show()

model = MixtureGaussian(K=2, D=2)
variational = Variational()
variational.add(Dirichlet(model.K))
variational.add(Normal(model.K*model.D))
variational.add(InvGamma(model.K*model.D))

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=4000, n_samples=50, n_minibatch=10)

clusters = np.argmax(ed.evaluate('log_likelihood', model, variational, data), axis=0)
plt.scatter(data['x'][:, 0], data['x'][:, 1], c=clusters, cmap=cm.bwr)
plt.axis([-3, 3, -3, 3])
plt.title("Predicted cluster assignments")
plt.show()
Ejemplo n.º 10
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import tensorflow as tf

from edward.models import Variational, Bernoulli
from edward.stats import bernoulli


class BernoulliPosterior:
    """
    p(x, z) = p(z) = p(z | x) = Bernoulli(z; p)
    """
    def __init__(self, p):
        self.p = p

    def log_prob(self, xs, zs):
        return bernoulli.logpmf(zs, p)


ed.set_seed(42)
p = tf.constant(0.6)
model = BernoulliPosterior(p)
variational = Variational()
variational.add(Bernoulli())

inference = ed.MFVI(model, variational)
inference.run(n_iter=10000)
def build_toy_dataset(n_data=40, noise_std=0.1):
    ed.set_seed(0)
    x  = np.concatenate([np.linspace(0, 2, num=n_data/2),
                         np.linspace(6, 8, num=n_data/2)])
    y = 0.075*x + norm.rvs(0, noise_std, size=n_data)
    x = (x - 4.0) / 4.0
    x = x.reshape((n_data, 1))
    y = y.reshape((n_data, 1))
    data = np.concatenate((y, x), axis=1) # n_data x 2
    data = tf.constant(data, dtype=tf.float32)
    return ed.Data(data)

ed.set_seed(42)
model = LinearModel()
variational = Variational()
variational.add(Normal(model.num_vars))
data = build_toy_dataset()

# Set up figure
fig = plt.figure(figsize=(8,8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)

sess = ed.get_session()
inference = ed.MFVI(model, variational, data)
inference.initialize(n_minibatch=5, n_print=5)
for t in range(250):
    loss = inference.update()
    if t % inference.n_print == 0:
Ejemplo n.º 12
0
ed.set_seed(42)
model = NormalBernoulli(num_vars=10)

# We use the variational model
# q(z | x) = prod_{n=1}^N q(z_n | x)
#          = prod_{n=1}^n Normal(z_n | loc, scale = phi(x_n))
# It is a distribution of the latent variables z_n for each data
# point x_n. We use neural_network() to globally parameterize the local
# variational factors q(z_n | x).
# We also do data subsampling during inference. Therefore we only need
# to explicitly represent the corresponding variational factors for a
# mini-batch,
# q(z_{batch} | x) = prod_{m=1}^{n_data} Normal(z_m | loc, scale = phi(x))
x_ph = tf.placeholder(tf.float32, [FLAGS.n_data, 28 * 28])
loc, scale = neural_network(x_ph)
variational = Variational()
variational.add(Normal(model.num_vars * FLAGS.n_data, loc=loc, scale=scale))

if not os.path.exists(FLAGS.data_directory):
    os.makedirs(FLAGS.data_directory)

mnist = input_data.read_data_sets(FLAGS.data_directory, one_hot=True)

# data uses placeholder in order to build inference's computational
# graph. np.arrays of data are fed in during computation.
x = tf.placeholder(tf.float32, [FLAGS.n_data, 28 * 28])
data = ed.Data(x)

sess = ed.get_session()
inference = ed.MFVI(model, variational, data)
with tf.variable_scope("model") as scope:
Ejemplo n.º 13
0
Normal.mapping = mapping


class Data:
    def __init__(self, data):
        self.mnist = data

    def sample(self, size):
        x_batch, _ = mnist.train.next_batch(size)
        return x_batch


ed.set_seed(42)
model = NormalBernoulli(FLAGS.num_vars)

variational = Variational()
variational.add(Normal(FLAGS.num_vars))

if not os.path.exists(FLAGS.data_directory):
    os.makedirs(FLAGS.data_directory)
mnist = input_data.read_data_sets(FLAGS.data_directory, one_hot=True)
data = Data(mnist)

inference = ed.VAE(model, variational, data)
sess = inference.initialize(n_data=FLAGS.n_data)
with tf.variable_scope("model", reuse=True) as scope:
    p_rep = model.sample_prior([FLAGS.n_data, FLAGS.num_vars])

for epoch in range(FLAGS.n_epoch):
    avg_loss = 0.0
Ejemplo n.º 14
0

ed.set_seed(42)
model = NormalBernoulli(n_vars=10)

# Use the variational model
# q(z | x) = prod_{n=1}^n Normal(z_n | loc, scale = neural_network(x_n))
# It is a distribution of the latent variables z_n for each data
# point x_n. We use neural_network() to globally parameterize the local
# variational factors q(z_n | x).
# We also do data subsampling during inference. Therefore we only need
# to explicitly represent the variational factors for a mini-batch,
# q(z_{batch} | x) = prod_{m=1}^{n_data} Normal(z_m | loc, scale = neural_network(x_m))
x_ph = tf.placeholder(tf.float32, [N_MINIBATCH, 28 * 28])
loc, scale = neural_network(x_ph)
variational = Variational()
variational.add(Normal(model.num_vars * N_MINIBATCH, loc=loc, scale=scale))

# MNIST batches are fed at training time.
if not os.path.exists(DATA_DIR):
    os.makedirs(DATA_DIR)

mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)
x = tf.placeholder(tf.float32, [N_MINIBATCH, 28 * 28])
data = {'x': x}

sess = ed.get_session()
inference = ed.MFVI(model, variational, data)
with tf.variable_scope("model") as scope:
    inference.initialize(optimizer="PrettyTensor")
with tf.variable_scope("model", reuse=True) as scope:
Ejemplo n.º 15
0
    def __init__(self, model, data=Data(), params=None):
        with tf.variable_scope("variational"):
            variational = Variational()
            variational.add(PointMass(model.num_vars, params))

        VariationalInference.__init__(self, model, variational, data)
Ejemplo n.º 16
0
    Posterior: (1-dimensional) Normal
Variational model
    Likelihood: Mean-field Normal
"""
import edward as ed
import tensorflow as tf

from edward.models import Variational, Normal
from edward.stats import norm

class NormalPosterior:
    """
    p(x, z) = p(z) = p(z | x) = Normal(z; mu, std)
    """
    def __init__(self, mu, std):
        self.mu = mu
        self.std = std

    def log_prob(self, xs, zs):
        return norm.logpdf(zs, self.mu, self.std)

ed.set_seed(42)
mu = tf.constant(1.0)
std = tf.constant(1.0)
model = NormalPosterior(mu, std)
variational = Variational()
variational.add(Normal())

inference = ed.MFVI(model, variational)
inference.run(n_iter=10000)
Ejemplo n.º 17
0
    return [mean, stddev]

ed.set_seed(42)
model = NormalBernoulli(num_vars=10)

# We use the variational model
# q(z | x) = prod_{n=1}^N q(z_n | x)
#          = prod_{n=1}^n Normal(z_n | mu, sigma = phi(x_n))
# It is a distribution of the latent variables z_n for each data
# point x_n. We use mapping() to globally parameterize the local
# variational factors q(z_n | x).
# We also do data subsampling during inference. Therefore we only need
# to explicitly represent the corresponding variational factors for a
# mini-batch,
# q(z_{batch} | x) = prod_{m=1}^{n_data} Normal(z_m | mu, sigma = phi(x))
variational = Variational()
Normal.mapping = mapping
Normal.num_local_vars = model.num_vars
variational.add(Normal(model.num_vars * FLAGS.n_data))

if not os.path.exists(FLAGS.data_directory):
    os.makedirs(FLAGS.data_directory)

mnist = input_data.read_data_sets(FLAGS.data_directory, one_hot=True)

# data uses placeholder in order to build inference's computational
# graph. np.arrays of data are fed in during computation.
x = tf.placeholder(tf.float32, [FLAGS.n_data, 28 * 28])
data = ed.Data(x)

inference = ed.MFVI(model, variational, data)
Ejemplo n.º 18
0

ed.set_seed(42)
model = NormalBernoulli(num_vars=10)

# Use the variational model
# q(z | x) = prod_{n=1}^n Normal(z_n | loc, scale = neural_network(x_n))
# It is a distribution of the latent variables z_n for each data
# point x_n. We use neural_network() to globally parameterize the local
# variational factors q(z_n | x).
# We also do data subsampling during inference. Therefore we only need
# to explicitly represent the variational factors for a mini-batch,
# q(z_{batch} | x) = prod_{m=1}^{n_data} Normal(z_m | loc, scale = neural_network(x_m))
x_ph = tf.placeholder(tf.float32, [N_DATA, 28 * 28])
loc, scale = neural_network(x_ph)
variational = Variational()
variational.add(Normal(model.num_vars * N_DATA, loc=loc, scale=scale))

# MNIST batches are fed at training time.
if not os.path.exists(DATA_DIR):
    os.makedirs(DATA_DIR)

mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)
x = tf.placeholder(tf.float32, [N_DATA, 28 * 28])
data = {'x': x}

sess = ed.get_session()
inference = ed.MFVI(model, variational, data)
with tf.variable_scope("model") as scope:
    inference.initialize(optimizer="PrettyTensor")
with tf.variable_scope("model", reuse=True) as scope:
    ed.set_seed(0)
    D = 1
    x  = np.linspace(-3, 3, num=n_data)
    y = np.tanh(x) + norm.rvs(0, noise_std, size=n_data)
    y[y < 0.5] = 0
    y[y >= 0.5] = 1
    x = (x - 4.0) / 4.0
    x = x.reshape((n_data, D))
    y = y.reshape((n_data, 1))
    data = np.concatenate((y, x), axis=1) # n_data x (D+1)
    data = tf.constant(data, dtype=tf.float32)
    return ed.Data(data)

ed.set_seed(42)
model = HierarchicalLogistic(weight_dim=[1,1])
variational = Variational()
variational.add(Normal(model.num_vars))
data = build_toy_dataset()

# Set up figure
fig = plt.figure(figsize=(8,8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)

inference = ed.MFVI(model, variational, data)
inference.initialize(n_print=5)
sess = ed.get_session()
for t in range(600):
    loss = inference.update()
    if t % inference.n_print == 0:
of 100,000 samples (!).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import tensorflow as tf

from edward.models import Variational, Bernoulli
from edward.stats import bernoulli


class BernoulliModel:
  """p(x, z) = p(z) = p(z | x) = Bernoulli(z; p)"""
  def __init__(self, p):
    self.p = p

  def log_prob(self, xs, zs):
    return bernoulli.logpmf(zs, p)


ed.set_seed(42)
p = tf.constant(0.6)
model = BernoulliModel(p)
variational = Variational()
variational.add(Bernoulli())

inference = ed.MFVI(model, variational)
inference.run(n_samples=int(1e5))
Ejemplo n.º 21
0
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)), 1)
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b), 1)

        # Loop over each mini-batch zs[b,:]
        log_lik = []
        n_minibatch = get_dims(zs)[0]
        for s in range(n_minibatch):
            log_lik_z = N*tf.reduce_sum(tf.log(pi), 1)
            for k in range(self.K):
                log_lik_z += tf.reduce_sum(multivariate_normal.logpdf(xs,
                    mus[s, (k*self.D):((k+1)*self.D)],
                    sigmas[s, (k*self.D):((k+1)*self.D)]))

            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)

ed.set_seed(42)
x = np.loadtxt('data/mixture_data.txt', dtype='float32', delimiter=',')
data = ed.Data(tf.constant(x, dtype=tf.float32))

model = MixtureGaussian(K=2, D=2)
variational = Variational()
variational.add(Dirichlet([1, model.K]))
variational.add(Normal(model.K*model.D))
variational.add(InvGamma(model.K*model.D))

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=500, n_minibatch=5, n_data=5)
Ejemplo n.º 22
0
    Prior: Beta
    Likelihood: Bernoulli
Variational model
    Likelihood: Mean-field Beta
"""
import edward as ed
from edward.models import Variational, Beta

model_code = """
    data {
      int<lower=0> N;
      int<lower=0,upper=1> y[N];
    }
    parameters {
      real<lower=0,upper=1> theta;
    }
    model {
      theta ~ beta(1.0, 1.0);
      for (n in 1:N)
        y[n] ~ bernoulli(theta);
    }
"""
ed.set_seed(42)
model = ed.StanModel(model_code=model_code)
variational = Variational()
variational.add(Beta())
data = ed.Data(dict(N=10, y=[0, 1, 0, 0, 0, 0, 0, 0, 0, 1]))

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=10000)
Ejemplo n.º 23
0
    def __init__(self):
        self.n_vars = 1

    def log_prob(self, xs, zs):
        log_prior = beta.logpdf(zs, a=1.0, b=1.0)
        log_lik = tf.pack([
            tf.reduce_sum(bernoulli.logpmf(xs['x'], z)) for z in tf.unpack(zs)
        ])
        return log_lik + log_prior

    def sample_likelihood(self, zs, n):
        """x | z ~ p(x | z)"""
        out = []
        for s in range(zs.shape[0]):
            out += [{'x': bernoulli.rvs(zs[s, :], size=n).reshape((n, ))}]

        return out


ed.set_seed(42)
model = BetaBernoulli()
variational = Variational()
variational.add(Beta(model.n_vars))
data = {'x': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])}

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=200)

T = lambda x, z=None: tf.reduce_mean(tf.cast(x['x'], tf.float32))
print(ed.ppc(model, variational, data, T))
Ejemplo n.º 24
0
    p(x, z) = Bernoulli(x | z) * Beta(z | 1, 1)
    """
    def __init__(self):
        self.num_vars = 1

    def log_prob(self, xs, zs):
        log_prior = beta.logpdf(zs, a=1.0, b=1.0)
        log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs, z))
                           for z in tf.unpack(zs)])
        return log_lik + log_prior

    def sample_likelihood(self, zs, size):
        """x | z ~ p(x | z)"""
        out = np.zeros((zs.shape[0], size))
        for s in range(zs.shape[0]):
            out[s,:] = bernoulli.rvs(zs[s,:], size=size).reshape((size,))

        return out

ed.set_seed(42)
model = BetaBernoulli()
variational = Variational()
variational.add(Beta(model.num_vars))
data = ed.Data(tf.constant((0, 1, 0, 0, 0, 0, 0, 0, 0, 1), dtype=tf.float32))

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=200)

T = lambda y, z=None: tf.reduce_mean(y)
print(ed.ppc(model, variational, data, T))
Ejemplo n.º 25
0
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)), 1)
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b), 1)

        # Loop over each mini-batch zs[b,:]
        log_lik = []
        n_minibatch = get_dims(zs[0])[0]
        for s in range(n_minibatch):
            log_lik_z = N*tf.reduce_sum(tf.log(pi), 1)
            for k in range(self.K):
                log_lik_z += tf.reduce_sum(multivariate_normal.logpdf(xs,
                    mus[s, (k*self.D):((k+1)*self.D)],
                    sigmas[s, (k*self.D):((k+1)*self.D)]))

            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)

ed.set_seed(42)
x = np.loadtxt('data/mixture_data.txt', dtype='float32', delimiter=',')
data = ed.Data(tf.constant(x, dtype=tf.float32))

model = MixtureGaussian(K=2, D=2)
variational = Variational()
variational.add(Dirichlet(model.K))
variational.add(Normal(model.K*model.D))
variational.add(InvGamma(model.K*model.D))

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=500, n_minibatch=5, n_data=5)
Ejemplo n.º 26
0

ed.set_seed(42)
model = NormalBernoulli(num_vars=10)

# We use the variational model
# q(z | x) = prod_{n=1}^N q(z_n | x)
#          = prod_{n=1}^n Normal(z_n | mu, sigma = phi(x_n))
# It is a distribution of the latent variables z_n for each data
# point x_n. We use mapping() to globally parameterize the local
# variational factors q(z_n | x).
# We also do data subsampling during inference. Therefore we only need
# to explicitly represent the corresponding variational factors for a
# mini-batch,
# q(z_{batch} | x) = prod_{m=1}^{n_data} Normal(z_m | mu, sigma = phi(x))
variational = Variational()
Normal.mapping = mapping
Normal.num_local_vars = model.num_vars
variational.add(Normal(model.num_vars * FLAGS.n_data))

if not os.path.exists(FLAGS.data_directory):
    os.makedirs(FLAGS.data_directory)

mnist = input_data.read_data_sets(FLAGS.data_directory, one_hot=True)

# data uses placeholder in order to build inference's computational
# graph. np.arrays of data are fed in during computation.
x = tf.placeholder(tf.float32, [FLAGS.n_data, 28 * 28])
data = ed.Data(x)

sess = ed.get_session()
Ejemplo n.º 27
0
    D = 1
    x = np.linspace(-3, 3, num=n_data)
    y = np.tanh(x) + norm.rvs(0, noise_std, size=n_data)
    y[y < 0.5] = 0
    y[y >= 0.5] = 1
    x = (x - 4.0) / 4.0
    x = x.reshape((n_data, D))
    y = y.reshape((n_data, 1))
    data = np.concatenate((y, x), axis=1)  # n_data x (D+1)
    data = tf.constant(data, dtype=tf.float32)
    return ed.Data(data)


ed.set_seed(42)
model = HierarchicalLogistic(weight_dim=[1, 1])
variational = Variational()
variational.add(Normal(model.num_vars))
data = build_toy_dataset()

# Set up figure
fig = plt.figure(figsize=(8, 8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)

inference = ed.MFVI(model, variational, data)
inference.initialize(n_print=5)
sess = ed.get_session()
for t in range(600):
    loss = inference.update()
    if t % inference.n_print == 0:
Ejemplo n.º 28
0

ed.set_seed(42)
model = NormalBernoulli(n_vars=10)

# Use the variational model
# q(z | x) = prod_{n=1}^n Normal(z_n | loc, scale = neural_network(x_n))
# It is a distribution of the latent variables z_n for each data
# point x_n. We use neural_network() to globally parameterize the local
# variational factors q(z_n | x).
# We also do data subsampling during inference. Therefore we only need
# to explicitly represent the variational factors for a mini-batch,
# q(z_{batch} | x) = prod_{m=1}^{n_data} Normal(z_m | loc, scale = neural_network(x_m))
x_ph = tf.placeholder(tf.float32, [N_MINIBATCH, 28 * 28])
loc, scale = neural_network(x_ph)
variational = Variational()
variational.add(Normal(model.n_vars * N_MINIBATCH, loc=loc, scale=scale))

# MNIST batches are fed at training time.
if not os.path.exists(DATA_DIR):
    os.makedirs(DATA_DIR)

mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)
x = tf.placeholder(tf.float32, [N_MINIBATCH, 28 * 28])
data = {'x': x}

sess = ed.get_session()
inference = ed.MFVI(model, variational, data)
with tf.variable_scope("model") as scope:
    inference.initialize(optimizer="PrettyTensor")
with tf.variable_scope("model", reuse=True) as scope:

def build_toy_dataset(N=40, noise_std=0.1):
    ed.set_seed(0)
    x = np.concatenate(
        [np.linspace(0, 2, num=N / 2),
         np.linspace(6, 8, num=N / 2)])
    y = 0.075 * x + norm.rvs(0, noise_std, size=N)
    x = (x - 4.0) / 4.0
    x = x.reshape((N, 1))
    return {'x': x, 'y': y}


ed.set_seed(42)
model = LinearModel()
variational = Variational()
variational.add(Normal(model.n_vars))
data = build_toy_dataset()

# Set up figure
fig = plt.figure(figsize=(8, 8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)

sess = ed.get_session()
inference = ed.MFVI(model, variational, data)
inference.initialize(n_samples=5, n_print=5)
for t in range(250):
    loss = inference.update()
    if t % inference.n_print == 0:
Ejemplo n.º 30
0
import tensorflow as tf

from edward.models import Variational, Normal
from edward.stats import norm


class NormalPosterior:
    """
    p(x, z) = p(z) = p(z | x) = Normal(z; mu, std)
    """
    def __init__(self, mu, std):
        self.mu = mu
        self.std = std

    def log_prob(self, xs, zs):
        return norm.logpdf(zs, self.mu, self.std)


ed.set_seed(42)
mu = tf.constant(1.0)
std = tf.constant(1.0)
model = NormalPosterior(mu, std)
variational = Variational()
variational.add(Normal())

inference = ed.MFVI(model, variational)
inference.initialize()
for t in range(1000):
    loss = inference.update()
    inference.print_progress(t, loss)
Ejemplo n.º 31
0
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import numpy as np
import tensorflow as tf

from edward.models import Variational, Beta
from edward.stats import bernoulli, beta


class BetaBernoulli:
    """p(x, z) = Bernoulli(x | z) * Beta(z | 1, 1)"""
    def log_prob(self, xs, zs):
        log_prior = beta.logpdf(zs, a=1.0, b=1.0)
        log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs['x'], z))
                           for z in tf.unpack(zs)])
        return log_lik + log_prior


ed.set_seed(42)
model = BetaBernoulli()
variational = Variational()
variational.add(Beta())
data = {'x': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])}

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=10000)
Ejemplo n.º 32
0
    return [mean, stddev]

Normal.mapping = mapping

class Data:
    def __init__(self, data):
        self.mnist = data

    def sample(self, size):
        x_batch, _ = mnist.train.next_batch(size)
        return x_batch

ed.set_seed(42)
model = NormalBernoulli(FLAGS.num_vars)

variational = Variational()
variational.add(Normal(FLAGS.num_vars))

if not os.path.exists(FLAGS.data_directory):
    os.makedirs(FLAGS.data_directory)
mnist = input_data.read_data_sets(FLAGS.data_directory, one_hot=True)
data = Data(mnist)

inference = ed.VAE(model, variational, data)
sess = inference.initialize(n_data=FLAGS.n_data)
with tf.variable_scope("model", reuse=True) as scope:
    p_rep = model.sample_prior([FLAGS.n_data, FLAGS.num_vars])

for epoch in range(FLAGS.n_epoch):
    avg_loss = 0.0