예제 #1
0
 def testNotReparameterized(self):
     p = tf.constant([0.2, 0.6])
     _, grad_p = tfp.math.value_and_gradient(
         lambda x: tfd.Bernoulli(probs=x).sample(100), p)
     self.assertIsNone(grad_p)
예제 #2
0
 def testEntropyNoBatch(self):
     p = 0.2
     dist = tfd.Bernoulli(probs=p)
     self.assertAllClose(self.evaluate(dist.entropy()), entropy(p))
예제 #3
0
 def testEntropyWithBatch(self):
     p = [[0.1, 0.7], [0.2, 0.6]]
     dist = tfd.Bernoulli(probs=p, validate_args=False)
     self.assertAllClose(self.evaluate(dist.entropy()),
                         [[entropy(0.1), entropy(0.7)],
                          [entropy(0.2), entropy(0.6)]])
 def testLogProbWithZeroOrOneProbs(self):
     probs = [1., 0.]  # logits = [np.inf, -np.inf]
     dist = tfd.Bernoulli(probs=probs)
     self.assertAllEqual([0., -np.inf], dist.log_prob([1., 1.]))
     self.assertAllEqual([-np.inf, 0.], dist.log_prob([0., 0.]))
     self.assertAllEqual([np.nan, np.nan], dist.log_prob([np.nan, np.nan]))
예제 #5
0
 def testBoundaryConditions(self):
     dist = tfd.Bernoulli(probs=1.0)
     self.assertAllClose(np.nan, self.evaluate(dist.log_prob(0)))
     self.assertAllClose([np.nan], [self.evaluate(dist.log_prob(1))])
예제 #6
0
 def testDocstrSliceExample(self):
     b = tfd.Bernoulli(logits=tf.zeros([3, 5, 7,
                                        9]))  # batch shape [3, 5, 7, 9]
     self.assertAllEqual((3, 5, 7, 9), b.batch_shape)
     b2 = b[:, tf.newaxis, ..., -2:, 1::2]  # batch shape [3, 1, 5, 2, 4]
     self.assertAllEqual((3, 1, 5, 2, 4), b2.batch_shape)
예제 #7
0
def _left_doubling_increments(batch_shape,
                              max_doublings,
                              step_size,
                              seed=None,
                              name=None):
    """Computes the doubling increments for the left end point.

  The doubling procedure expands an initial interval to find a superset of the
  true slice. At each doubling iteration, the interval width is doubled to
  either the left or the right hand side with equal probability.
  If, initially, the left end point is at `L(0)` and the width of the
  interval is `w(0)`, then the left end point and the width at the
  k-th iteration (denoted L(k) and w(k) respectively) are given by the following
  recursions:

  ```none
  w(k) = 2 * w(k-1)
  L(k) = L(k-1) - w(k-1) * X_k, X_k ~ Bernoulli(0.5)
  or, L(0) - L(k) = w(0) Sum(2^i * X(i+1), 0 <= i < k)
  ```

  This function computes the sequence of `L(0)-L(k)` and `w(k)` for k between 0
  and `max_doublings` independently for each chain.

  Args:
    batch_shape: Positive int32 `tf.Tensor`. The batch shape.
    max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of
      doublings to consider.
    step_size: A real `tf.Tensor` with shape compatible with [num_chains].
      The size of the initial interval.
    seed: (Optional) positive int. The random seed. If None, no seed is set.
    name: Python `str` name prefixed to Ops created by this function.
      Default value: `None` (i.e., 'find_slice_bounds').

  Returns:
    left_increments: A tensor of shape (max_doublings+1, batch_shape). The
      relative position of the left end point after the doublings.
    widths: A tensor of shape (max_doublings+1, ones_like(batch_shape)). The
      widths of the intervals at each stage of the doubling.
  """
    with tf.name_scope(name, 'left_doubling_increments',
                       [batch_shape, max_doublings, step_size]):

        step_size = tf.convert_to_tensor(step_size)
        dtype = step_size.dtype.base_dtype
        # Output shape of the left increments tensor.
        output_shape = tf.concat(([max_doublings + 1], batch_shape), axis=0)
        # A sample realization of X_k.
        expand_left = distributions.Bernoulli(0.5, dtype=dtype).sample(
            sample_shape=output_shape, seed=seed)

        # The widths of the successive intervals. Starts with 1.0 and ends with
        # 2^max_doublings.
        width_multipliers = tf.cast(2**tf.range(0, max_doublings + 1),
                                    dtype=dtype)
        # Output shape of the `widths` tensor.
        widths_shape = tf.concat(
            ([max_doublings + 1], tf.ones_like(batch_shape)), axis=0)
        width_multipliers = tf.reshape(width_multipliers, shape=widths_shape)
        # Widths shape is [max_doublings + 1, 1, 1, 1...].
        widths = width_multipliers * step_size

        # Take the cumulative sum of the left side increments in slice width to give
        # the resulting distance from the inital lower bound.
        left_increments = tf.cumsum(widths * expand_left,
                                    exclusive=True,
                                    axis=0)
        return left_increments, widths
예제 #8
0
 def setUp(self):
     self.dtype = np.float32
     self.model = tfp.glm.Bernoulli()
     self.expected = tfp.glm.CustomExponentialFamily(
         lambda mu: tfd.Bernoulli(probs=mu), tf.nn.sigmoid)
예제 #9
0
 def _as_distribution(self, r):
     return tfd.Bernoulli(logits=r)
예제 #10
0
 def __init__(self, logits):
   super(IndepBern1d, self).__init__(tfd.Bernoulli(logits=logits,
                                                   dtype=tf.float32),
                                     reinterpreted_batch_ndims=1)
   self._parameters = {'logits': logits}
예제 #11
0
 def exact_kl_bernoulli_bernoulli(probs_p, probs_q):
     p = tfd.Bernoulli(probs=probs_p)
     q = tfd.Bernoulli(probs=probs_q)
     return tfd.kl_divergence(p, q)
 def testMeanWithInfiniteLogits(self):
     logits = [np.inf, -np.inf]  # probs = [1, 0]
     dist = tfd.Bernoulli(logits=logits, validate_args=True)
     self.assertAllEqual([1., 0.], dist.mean())
 def testEntropyWithZeroOneProbs(self):
     probs = [1., 0.]  # logits = [np.inf, -np.inf]
     dist = tfd.Bernoulli(probs=probs)
     self.assertAllEqual([0., 0.], dist.entropy())
 def testEntropyWithInfiniteLogits(self):
     logits = [np.inf, -np.inf]  # probs = [1, 0]
     dist = tfd.Bernoulli(logits=logits)
     self.assertAllEqual([0., 0.], dist.entropy())
예제 #15
0
 def testMean(self):
     p = np.array([[0.2, 0.7], [0.5, 0.4]], dtype=np.float32)
     dist = tfd.Bernoulli(probs=p)
     self.assertAllEqual(self.evaluate(dist.mean()), p)
예제 #16
0
 def _as_distribution(self, r):
     return tfd.Bernoulli(logits=DeferredTensor(r, self._as_logits))
예제 #17
0
def make_bernoulli(batch_shape, dtype=tf.int32):
    p = np.random.uniform(size=list(batch_shape))
    p = tf.constant(p, dtype=tf.float32)
    return tfd.Bernoulli(probs=p, dtype=dtype)
  def test_ordereddict_sample_log_prob(self):
    build_ordereddict = lambda e, scale, loc, m, x: collections.OrderedDict([  # pylint: disable=g-long-lambda
        ('e', e), ('scale', scale), ('loc', loc), ('m', m), ('x', x)])

    # pylint: disable=bad-whitespace
    model = build_ordereddict(
        e    =          tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
        scale=lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
        loc  =          tfd.Normal(loc=0, scale=2.),
        m    =          tfd.Normal,
        x    =lambda m: tfd.Sample(tfd.Bernoulli(logits=m), 12))
    # pylint: enable=bad-whitespace
    d = tfd.JointDistributionNamed(model, validate_args=True)

    self.assertEqual(
        (
            ('e', ()),
            ('scale', ('e',)),
            ('loc', ()),
            ('m', ('loc', 'scale')),
            ('x', ('m',)),
        ),
        d.resolve_graph())

    xs = d.sample(seed=test_util.test_seed())
    self.assertLen(xs, 5)
    # We'll verify the shapes work as intended when we plumb these back into the
    # respective log_probs.

    ds, _ = d.sample_distributions(value=xs, seed=test_util.test_seed())
    self.assertLen(ds, 5)
    values = tuple(ds.values())
    self.assertIsInstance(values[0], tfd.Independent)
    self.assertIsInstance(values[1], tfd.Gamma)
    self.assertIsInstance(values[2], tfd.Normal)
    self.assertIsInstance(values[3], tfd.Normal)
    self.assertIsInstance(values[4], tfd.Sample)

    # Static properties.
    self.assertAllEqual(build_ordereddict(
        e=tf.float32, scale=tf.float32, loc=tf.float32,
        m=tf.float32, x=tf.int32), d.dtype)

    batch_shape_tensor_, event_shape_tensor_ = self.evaluate([
        d.batch_shape_tensor(), d.event_shape_tensor()])

    expected_batch_shape = build_ordereddict(e=[], scale=[], loc=[], m=[], x=[])
    for (expected, actual_tensorshape, actual_shape_tensor_) in zip(
        expected_batch_shape, d.batch_shape, batch_shape_tensor_):
      self.assertAllEqual(expected, actual_tensorshape)
      self.assertAllEqual(expected, actual_shape_tensor_)

    expected_event_shape = build_ordereddict(
        e=[2], scale=[], loc=[], m=[], x=[12])
    for (expected, actual_tensorshape, actual_shape_tensor_) in zip(
        expected_event_shape, d.event_shape, event_shape_tensor_):
      self.assertAllEqual(expected, actual_tensorshape)
      self.assertAllEqual(expected, actual_shape_tensor_)

    expected_jlp = sum(d.log_prob(x) for d, x in zip(ds.values(), xs.values()))
    actual_jlp = d.log_prob(xs)
    self.assertAllClose(*self.evaluate([expected_jlp, actual_jlp]),
                        atol=0., rtol=1e-4)
예제 #19
0
 def testP(self):
     p = [0.2, 0.4]
     dist = tfd.Bernoulli(probs=p)
     self.assertAllClose(p, self.evaluate(dist.probs))
  def test_dict_sample_log_prob(self):
    # pylint: disable=bad-whitespace
    d = tfd.JointDistributionNamed(dict(
        e    =          tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
        scale=lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
        loc  =          tfd.Normal(loc=0, scale=2.),
        m    =          tfd.Normal,
        x    =lambda m: tfd.Sample(tfd.Bernoulli(logits=m), 12)),
                                   validate_args=True)
    # pylint: enable=bad-whitespace

    self.assertEqual(
        (
            ('e', ()),
            ('scale', ('e',)),
            ('loc', ()),
            ('m', ('loc', 'scale')),
            ('x', ('m',)),
        ),
        d.resolve_graph())

    xs = d.sample(seed=test_util.test_seed())
    self.assertLen(xs, 5)
    # We'll verify the shapes work as intended when we plumb these back into the
    # respective log_probs.

    ds, _ = d.sample_distributions(value=xs, seed=test_util.test_seed())
    self.assertLen(ds, 5)
    self.assertIsInstance(ds['e'], tfd.Independent)
    self.assertIsInstance(ds['scale'], tfd.Gamma)
    self.assertIsInstance(ds['loc'], tfd.Normal)
    self.assertIsInstance(ds['m'], tfd.Normal)
    self.assertIsInstance(ds['x'], tfd.Sample)

    # Static properties.
    self.assertAllEqual(
        {'e': tf.float32, 'scale': tf.float32, 'loc': tf.float32,
         'm': tf.float32, 'x': tf.int32},
        d.dtype)

    batch_shape_tensor_, event_shape_tensor_ = self.evaluate([
        d.batch_shape_tensor(), d.event_shape_tensor()])

    expected_batch_shape = {
        'e': [], 'scale': [], 'loc': [], 'm': [], 'x': []}
    batch_tensorshape = d.batch_shape
    for k in expected_batch_shape:
      self.assertAllEqual(expected_batch_shape[k], batch_tensorshape[k])
      self.assertAllEqual(expected_batch_shape[k], batch_shape_tensor_[k])

    expected_event_shape = {
        'e': [2], 'scale': [], 'loc': [], 'm': [], 'x': [12]}
    event_tensorshape = d.event_shape
    for k in expected_event_shape:
      self.assertAllEqual(expected_event_shape[k], event_tensorshape[k])
      self.assertAllEqual(expected_event_shape[k], event_shape_tensor_[k])

    expected_jlp = sum(ds[k].log_prob(xs[k]) for k in ds.keys())
    actual_jlp = d.log_prob(xs)
    self.assertAllClose(*self.evaluate([expected_jlp, actual_jlp]),
                        atol=0., rtol=1e-4)
예제 #21
0
 def testFloatMode(self):
     dist = tfd.Bernoulli(probs=.6, dtype=tf.float32, validate_args=True)
     self.assertEqual(np.float32(1), self.evaluate(dist.mode()))
 def testLogProbWithInfiniteLogits(self):
     logits = [np.inf, -np.inf]  # probs = [1, 0].
     dist = tfd.Bernoulli(logits=logits)
     self.assertAllEqual([0., -np.inf], dist.log_prob([1., 1.]))
     self.assertAllEqual([-np.inf, 0.], dist.log_prob([0., 0.]))
     self.assertAllEqual([np.nan, np.nan], dist.log_prob([np.nan, np.nan]))