예제 #1
0
def _test(alpha, n):
  rv = Dirichlet(alpha=alpha)
  rv_sample = rv.sample(n)
  x = rv_sample.eval()
  x_tf = tf.constant(x, dtype=tf.float32)
  alpha = alpha.eval()
  assert np.allclose(rv.log_prob(x_tf).eval(),
                     dirichlet_logpdf_vec(x, alpha), atol=1e-3)
예제 #2
0
def _test(alpha, n):
  rv = Dirichlet(alpha=alpha)
  rv_sample = rv.sample(n)
  x = rv_sample.eval()
  x_tf = tf.constant(x, dtype=tf.float32)
  alpha = alpha.eval()
  assert np.allclose(rv.log_prob(x_tf).eval(),
                     dirichlet_logpdf_vec(x, alpha))
예제 #3
0
def _test(shape, n):
    K = shape[-1]
    rv = Dirichlet(shape, alpha=tf.constant(1.0 / K, shape=shape))
    rv_sample = rv.sample(n)
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    alpha = rv.alpha.eval()
    if len(shape) == 1:
        assert np.allclose(
            rv.log_prob_idx((), x_tf).eval(),
            dirichlet_logpdf_vec(x[:, :], alpha[:]))
    elif len(shape) == 2:
        for i in range(shape[0]):
            assert np.allclose(
                rv.log_prob_idx((i, ), x_tf).eval(),
                dirichlet_logpdf_vec(x[:, i, :], alpha[i, :]))
    else:
        assert False
def _test(shape, n):
    K = shape[-1]
    rv = Dirichlet(shape, alpha=tf.constant(1.0/K, shape=shape))
    rv_sample = rv.sample(n)    
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    alpha = rv.alpha.eval()
    if len(shape) == 1:
        assert np.allclose(
            rv.log_prob_idx((), x_tf).eval(),
            dirichlet_logpdf_vec(x[:, :], alpha[:]))
    elif len(shape) == 2:
        for i in range(shape[0]):
            assert np.allclose(
                rv.log_prob_idx((i, ), x_tf).eval(),
                dirichlet_logpdf_vec(x[:, i, :], alpha[i, :]))
    else:
        assert False
예제 #5
0
qmu_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))
qsigma_alpha = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))
qsigma_beta = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))

qpi = Dirichlet(alpha=qpi_alpha)
qmu = Normal(mu=qmu_mu, sigma=qmu_sigma)
qsigma = InverseGamma(alpha=qsigma_alpha, beta=qsigma_beta)

data = {'x': x_train}
inference = ed.KLqp({'pi': qpi, 'mu': qmu, 'sigma': qsigma}, data, model)
inference.run(n_iter=2500, n_samples=10, n_minibatch=20)

# Average per-cluster and per-data point likelihood over many posterior samples.
log_liks = []
for s in range(100):
    zrep = {
        'pi': qpi.sample(()),
        'mu': qmu.sample(()),
        'sigma': qsigma.sample(())
    }
    log_liks += [model.predict(data, zrep)]

log_liks = tf.reduce_mean(log_liks, 0)

# Choose the cluster with the highest likelihood for each data point.
clusters = tf.argmax(log_liks, 0).eval()
plt.scatter(x_train[:, 0], x_train[:, 1], c=clusters, cmap=cm.bwr)
plt.axis([-3, 3, -3, 3])
plt.title("Predicted cluster assignments")
plt.show()
예제 #6
0
qpi_alpha = tf.nn.softplus(tf.Variable(tf.random_normal([K])))
qmu_mu = tf.Variable(tf.random_normal([K * D]))
qmu_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))
qsigma_alpha = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))
qsigma_beta = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))

qpi = Dirichlet(alpha=qpi_alpha)
qmu = Normal(mu=qmu_mu, sigma=qmu_sigma)
qsigma = InverseGamma(alpha=qsigma_alpha, beta=qsigma_beta)

data = {'x': x_train}
inference = ed.MFVI({'pi': qpi, 'mu': qmu, 'sigma': qsigma}, data, model)
inference.run(n_iter=2500, n_samples=10, n_minibatch=20)

# Average per-cluster and per-data point likelihood over many posterior samples.
log_liks = []
for s in range(100):
  zrep = {'pi': qpi.sample(()),
          'mu': qmu.sample(()),
          'sigma': qsigma.sample(())}
  log_liks += [model.predict(data, zrep)]

log_liks = tf.reduce_mean(log_liks, 0)

# Choose the cluster with the highest likelihood for each data point.
clusters = tf.argmax(log_liks, 0).eval()
plt.scatter(x_train[:, 0], x_train[:, 1], c=clusters, cmap=cm.bwr)
plt.axis([-3, 3, -3, 3])
plt.title("Predicted cluster assignments")
plt.show()
예제 #7
0
def _test(shape, alpha, n):
    x = Dirichlet(shape, alpha)
    val_est = tuple(get_dims(x.sample(n)))
    val_true = (n, ) + shape
    assert val_est == val_true
예제 #8
0
def _test(alpha, n):
    x = Dirichlet(alpha=alpha)
    val_est = get_dims(x.sample(n))
    val_true = n + get_dims(alpha)
    assert val_est == val_true
예제 #9
0
def _test(shape, alpha, size):
    x = Dirichlet(shape, alpha)
    val_est = tuple(get_dims(x.sample(size=size)))
    val_true = (size, ) + shape
    assert val_est == val_true
예제 #10
0
def _test(alpha, n):
  x = Dirichlet(alpha=alpha)
  val_est = get_dims(x.sample(n))
  val_true = n + get_dims(alpha)
  assert val_est == val_true
예제 #11
0
qpi_alpha = tf.nn.softplus(tf.Variable(tf.random_normal([K])))
qmu_mu = tf.Variable(tf.random_normal([K * D]))
qmu_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))
qsigma_alpha = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))
qsigma_beta = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))

qpi = Dirichlet(alpha=qpi_alpha)
qmu = Normal(mu=qmu_mu, sigma=qmu_sigma)
qsigma = InverseGamma(alpha=qsigma_alpha, beta=qsigma_beta)

data = {'x': x_train}
inference = ed.KLqp({'pi': qpi, 'mu': qmu, 'sigma': qsigma}, data, model)
inference.run(n_iter=2500, n_samples=10, n_minibatch=20)

# Average per-cluster and per-data point likelihood over many posterior samples.
log_liks = []
for s in range(100):
  zrep = {'pi': qpi.sample(()),
          'mu': qmu.sample(()),
          'sigma': qsigma.sample(())}
  log_liks += [model.predict(data, zrep)]

log_liks = tf.reduce_mean(log_liks, 0)

# Choose the cluster with the highest likelihood for each data point.
clusters = tf.argmax(log_liks, 0).eval()
plt.scatter(x_train[:, 0], x_train[:, 1], c=clusters, cmap=cm.bwr)
plt.axis([-3, 3, -3, 3])
plt.title("Predicted cluster assignments")
plt.show()