Exemplo n.º 1
0
  def test_latent_vars(self):
    tf.InteractiveSession()
    mu = Normal(mu=0.0, sigma=1.0)
    qmu = Normal(mu=tf.Variable(0.0), sigma=tf.constant(1.0))
    qmu_misshape = Normal(mu=tf.constant([0.0]), sigma=tf.constant([1.0]))

    ed.Inference({mu: qmu})
    ed.Inference({mu: tf.constant(0.0)})
    ed.Inference({tf.constant(0.0): qmu})
    self.assertRaises(TypeError, ed.Inference, {mu: '5'})
    self.assertRaises(TypeError, ed.Inference, {mu: qmu_misshape})
Exemplo n.º 2
0
  def test_data(self):
    x = Normal(mu=0.0, sigma=1.0)
    qx = Normal(mu=0.0, sigma=1.0)
    qx_misshape = Normal(mu=tf.constant([0.0]), sigma=tf.constant([1.0]))
    x_ph = ed.placeholder(tf.float32)

    ed.Inference()
    ed.Inference(data={x: tf.constant(0.0)})
    ed.Inference(data={x_ph: tf.constant(0.0)})
    ed.Inference(data={x: x_ph})
    ed.Inference(data={x: qx})
    self.assertRaises(TypeError, ed.Inference, data={5: tf.constant(0.0)})
    self.assertRaises(TypeError, ed.Inference, data={x: 'a'})
    self.assertRaises(TypeError, ed.Inference, data={x_ph: x})
    self.assertRaises(TypeError, ed.Inference, data={x: qx_misshape})
Exemplo n.º 3
0
  def test_model_wrapper(self):
    model = NormalNormal()
    qmu = Normal(mu=tf.Variable(0.0), sigma=tf.constant(1.0))

    ed.Inference({'mu': qmu}, model_wrapper=model)
    self.assertRaises(TypeError, ed.Inference, data={'x': qmu},
                      model_wrapper=model)
Exemplo n.º 4
0
    def test_data(self):
        tf.InteractiveSession()
        x = Normal(mu=0.0, sigma=1.0)
        qx = Normal(mu=0.0, sigma=1.0)
        qx_misshape = Normal(mu=tf.constant([0.0]), sigma=tf.constant([1.0]))
        x_ph = tf.placeholder(tf.float32)

        ed.Inference()
        ed.Inference(data={x: tf.constant(0.0)})
        ed.Inference(data={x_ph: tf.constant(0.0)})
        ed.Inference(data={x: np.float64(0.0)})
        ed.Inference(data={x: np.int64(0)})
        ed.Inference(data={x: 0.0})
        ed.Inference(data={x: 0})
        ed.Inference(data={x: False})  # converted to `int`
        ed.Inference(data={x: x_ph})
        ed.Inference(data={x: qx})
        self.assertRaises(TypeError, ed.Inference, data={5: tf.constant(0.0)})
        self.assertRaises(TypeError, ed.Inference, data={x: tf.zeros(5)})
        self.assertRaises(TypeError, ed.Inference, data={x_ph: x})
        self.assertRaises(TypeError, ed.Inference, data={x: qx_misshape})
import edward as ed
import tensorflow as tf

# 第一个字典为先验:后验的对应,第二个字典为观测值张量:观测值data对应
# qz与qbeta的后验分布中有可学习的参数
inference = ed.Inference({z: qz, beta: qbeta}, {x: x_train})
# 进行参数学习
inference.run()

# 更加精细的学习调节
inference = ed.Inference({z: qz, beta: qbeta}, {x: x_train})
inference.initialize()
tf.global_variables_initializer().run()
for _ in range(inference.n_iter):
    info_dict = inference.update()
    inference.print_progress(info_dict)
inference.finalize()

from edward.models import Normal

theta = tf.Variable(0.0)
x = Normal(loc=tf.ones(10) * theta, scale=1.0)
inference = ed.Inference({}, {x: x_train})

# 条件推断,放在后面字典的后验不进行学习
inference = ed.Inference({beta: qbeta}, {x: x_train, z: qz})
# 隐含先验分布,也就是后验分布就用先验分布,不出现两个字典中
inference = ed.Inference({beta: qbeta}, {x: x_train})

# 基本模型
# p(x, z, beta) = Normal(x | beta, I) Categorical(z | pi) Normal(beta | 0, I)
Exemplo n.º 6
0
    def test_model_wrapper(self):
        tf.InteractiveSession()
        model = NormalNormal()
        qmu = Normal(mu=tf.Variable(0.0), sigma=tf.constant(1.0))

        ed.Inference({'mu': qmu}, model_wrapper=model)
Exemplo n.º 7
0
# MODEL
K = 3
D = 2
N = 4
beta = Normal(mu=tf.zeros([K, D]), sigma=tf.ones([K, D]))
z = Categorical(logits=tf.zeros([N, K]))
x = Normal(mu=tf.gather(beta, z), sigma=tf.ones([N, D]))

# INFERENCE
qbeta = Normal(mu=tf.Variable(tf.zeros([K, D])),
               sigma=tf.exp(tf.Variable(tf.zeros([K, D]))))
qz = Categorical(logits=tf.Variable(tf.zeros([N, K])))

x_train = 

inference = ed.Inference({z: qz, beta: qbeta}, data={x: x_train})
inference.run()

#
# Supervised learning (regression)
def build_toy_dataset(N, coeff=np.random.randn(10), noise_std=0.1):
  n_dim = len(coeff)
  x = np.random.randn(N, n_dim).astype(np.float32)
  y = np.dot(x, coeff) + norm.rvs(0, noise_std, size=N)
  return x, y

N = 40  # number of data points
D = 10  # number of features

coeff = np.random.randn(D)
X_train, y_train = build_toy_dataset(N, coeff)
Exemplo n.º 8
0
import edward as ed
import tensorflow as tf

# 点估计评估
x_post = ed.copy(x, {z: qz})  # qz为z学习到的后验分布,语句的意思为,x对z的依赖替换为x_post对qz的依赖
# y_post为按照参数生成的y的后验分布,y_train为真实的数据
ed.evaluate('categorical_accuracy', data={y_post: y_train, x: x_train})
ed.evaluate('mean_absolute_error', data={y_post: y_train, x: x_train})
# 似然估计
ed.evaluate('log_likelihood', data={x_post: x_train})
# 拆分训练集和验证集的方式
from edward.models import Categorical
qz_test = Categorical(logits=tf.Variable(tf.zeros[N_test, K]))
inference_test = ed.Inference({z: qz_test}, data={x: x_test, beta: qbeta})
inference_test.run()  # 模型训练完成
x_post = ed.copy(x, {z: qz_test, beta: qbeta})
ed.evaluate('log_likelihood', data={x_post: x_valid})  # x_valid为测试表现的数据

# 后验预测检验,PPC
x_post = ed.copy(x, {z: qz})
ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x_post]), data={x_post: x_train})
ed.ppc(lambda xs, zs: tf.maximum(zs[z]),
       data={
           y_post: y_train,
           x_ph: x_train
       },
       latent_vars={
           z: qz,
           beta: qbeta
       })  # 明确指出后验