def _test(p, n):
    rv = Bernoulli(p=p)
    rv_sample = rv.sample(n)
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    p = p.eval()
    assert np.allclose(rv.log_prob(x_tf).eval(), stats.bernoulli.logpmf(x, p))
示例#2
0
class ZeroInflatedRV(RandomVariable, Distribution):
    """
  A zero-inflated random variable. The prob_zero parameter defines the
  probability of inflation.
  """
    def __init__(self, prob_zero, underlying, *args, **kwargs):
        self.prob_zero = prob_zero
        self.underlying = underlying
        self.bernoulli = Bernoulli(probs=self.prob_zero)  # for sampling
        super(ZeroInflatedRV, self).__init__(
            *args,
            **kwargs,
            dtype=underlying.dtype,
            validate_args=underlying.validate_args,
            allow_nan_stats=underlying.allow_nan_stats,
            reparameterization_type=underlying.reparameterization_type)

    def _log_prob(self, value):
        not_zero_lp = self.underlying.log_prob(value)
        return tf.where(
            tf.equal(value, tf.zeros_like(value)),
            tf.log(self.prob_zero +
                   (1. - self.prob_zero * tf.exp(not_zero_lp))),
            tf.log(1. - self.prob_zero) + not_zero_lp)

    def _sample_n(self, n, seed=None):
        zero = self.bernoulli.sample(n, seed=seed)
        return tf.where(tf.equal(tf.constant(1), zero),
                        tf.zeros_like(zero, dtype=self.dtype),
                        self.underlying.sample(n))
def _test(shape, n):
    rv = Bernoulli(shape, p=tf.zeros(shape) + 0.5)
    rv_sample = rv.sample(n)
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    p = rv.p.eval()
    for idx in range(shape[0]):
        assert np.allclose(rv.log_prob_idx((idx,), x_tf).eval(), stats.bernoulli.logpmf(x[:, idx], p[idx]))
def _test(p, n):
  rv = Bernoulli(p=p)
  rv_sample = rv.sample(n)
  x = rv_sample.eval()
  x_tf = tf.constant(x, dtype=tf.float32)
  p = p.eval()
  assert np.allclose(rv.log_prob(x_tf).eval(),
                     stats.bernoulli.logpmf(x, p))
def _test(shape, n):
    rv = Bernoulli(shape, p=tf.zeros(shape) + 0.5)
    rv_sample = rv.sample(n)
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    p = rv.p.eval()
    for idx in range(shape[0]):
        assert np.allclose(
            rv.log_prob_idx((idx, ), x_tf).eval(),
            stats.bernoulli.logpmf(x[:, idx], p[idx]))
def _test(shape, n):
    # using Bernoulli's internally implemented log_prob_idx() to check
    # Distribution's log_prob()
    rv = Bernoulli(shape, p=tf.zeros(shape)+0.5)
    rv_sample = rv.sample(n)
    
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    p = rv.p.eval()
    val_ed = rv.log_prob(x_tf).eval()
    val_true = 0.0
    for idx in range(shape[0]):
        val_true += stats.bernoulli.logpmf(x[:, idx], p[idx])

    assert np.allclose(val_ed, val_true)
def _test(shape, n):
    # using Bernoulli's internally implemented log_prob_idx() to check
    # Distribution's log_prob()
    rv = Bernoulli(shape, p=tf.zeros(shape)+0.5)
    rv_sample = rv.sample(n)
    with sess.as_default():
        x = rv_sample.eval()
        x_tf = tf.constant(x, dtype=tf.float32)
        p = rv.p.eval()
        val_ed = rv.log_prob(x_tf).eval()
        val_true = 0.0
        for idx in range(shape[0]):
            val_true += stats.bernoulli.logpmf(x[:, idx], p[idx])

        assert np.allclose(val_ed, val_true)
示例#8
0
def _test(p, n):
    x = Bernoulli(p=p)
    val_est = get_dims(x.sample(n))
    val_true = n + get_dims(p)
    assert val_est == val_true
def _test(shape, p, n):
    x = Bernoulli(shape, p)
    val_est = tuple(get_dims(x.sample(n)))
    val_true = (n, ) + shape
    assert val_est == val_true
示例#10
0
from __future__ import print_function, division, absolute_import

from data import get_value
import tensorflow as tf
import edward as ed
from edward.models import Beta, Bernoulli

theta = Beta(a=1.0, b=1.0)
# 100-dimensional Bernoulli
x = Bernoulli(p=tf.ones(12) * theta)

# ====== sampling from each marginal variables
theta_sample = theta.sample()
x_sample = x.sample()
print("Marginal theta samples:", get_value(theta_sample))
print("Marginal X samples:", get_value(x_sample))

# ====== sampling from the joint distribution
samples = get_value([x.value(), theta.value()])
print("From joint distribution:")
print("- X:", samples[0])
print("- theta:", samples[1])
示例#11
0
def _test(shape, p, n):
    x = Bernoulli(shape, p)
    val_est = tuple(get_dims(x.sample(n)))
    val_true = (n, ) + shape
    assert val_est == val_true
示例#12
0
def _test(p, n):
  x = Bernoulli(p=p)
  val_est = get_dims(x.sample(n))
  val_true = n + get_dims(p)
  assert val_est == val_true
 def _test(self, probs, n):
     rv = Bernoulli(probs)
     dist = ds.Bernoulli(probs)
     x = rv.sample(n).eval()
     self.assertAllEqual(rv.log_prob(x).eval(), dist.log_prob(x).eval())
 def _test(self, probs, n):
     rv = Bernoulli(probs)
     dist = ds.Bernoulli(probs)
     self.assertEqual(rv.sample(n).shape, dist.sample(n).shape)
示例#15
0
 def _test(self, probs, n):
   rv = Bernoulli(probs)
   dist = ds.Bernoulli(probs)
   x = rv.sample(n).eval()
   self.assertAllEqual(rv.log_prob(x).eval(), dist.log_prob(x).eval())
示例#16
0
def _test(shape, p, size):
    x = Bernoulli(shape, p)
    val_est = tuple(get_dims(x.sample(size=size)))
    val_true = (size, ) + shape
    assert val_est == val_true