Exemple #1
0
    def new(params, event_shape=(), validate_args=False, name=None):
        """Create the distribution instance from a `params` vector."""
        from odin.bay.distributions import ZeroInflated

        with tf.compat.v1.name_scope(name, 'ZeroInflatedPoisson',
                                     [params, event_shape]):
            params = tf.convert_to_tensor(value=params, name='params')
            event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
                value=event_shape, name='event_shape', dtype=tf.int32),
                                                     tensor_name='event_shape')
            output_shape = tf.concat([
                tf.shape(input=params)[:-1],
                event_shape,
            ],
                                     axis=0)
            (log_rate_params, logits_params) = tf.split(params, 2, axis=-1)
            zip = ZeroInflated(count_distribution=tfd.Poisson(
                log_rate=tf.reshape(log_rate_params, output_shape),
                validate_args=validate_args),
                               logits=tf.reshape(logits_params, output_shape),
                               validate_args=validate_args)
            return tfd.Independent(
                zip,
                reinterpreted_batch_ndims=tf.size(input=event_shape),
                validate_args=validate_args)
Exemple #2
0
 def new(params,
         event_shape=(),
         activation=tf.identity,
         validate_args=False,
         name="ZIPoissonLayer"):
     """Create the distribution instance from a `params` vector."""
     params = tf.convert_to_tensor(value=params, name='params')
     event_shape = dist_util.expand_to_vector(
         tf.convert_to_tensor(value=event_shape,
                              name='event_shape',
                              dtype=tf.int32),
         tensor_name='event_shape',
     )
     output_shape = tf.concat(
         [tf.shape(input=params)[:-1], event_shape],
         axis=0,
     )
     (log_rate_params, logits_params) = tf.split(params, 2, axis=-1)
     return tfd.Independent(
         ZeroInflated(count_distribution=tfd.Poisson(
             log_rate=activation(tf.reshape(log_rate_params, output_shape)),
             validate_args=validate_args),
                      logits=tf.reshape(logits_params, output_shape),
                      validate_args=validate_args),
         reinterpreted_batch_ndims=tf.size(input=event_shape),
         name=name,
     )
  def test_broadcasting_explicitly_unsupported(self):
    old_batch_shape = [4]
    new_batch_shape = [1, 4, 1]
    rate_ = self.dtype([1, 10, 2, 20])

    rate = tf1.placeholder_with_default(
        rate_, shape=old_batch_shape if self.is_static_shape else None)
    poisson_4 = tfd.Poisson(rate, validate_args=True)
    new_batch_shape_ph = (
        tf.constant(np.int32(new_batch_shape)) if self.is_static_shape else
        tf1.placeholder_with_default(np.int32(new_batch_shape), shape=None))
    poisson_141_reshaped = tfd.BatchReshape(
        poisson_4, new_batch_shape_ph, validate_args=True)

    x_4 = self.dtype([2, 12, 3, 23])
    x_114 = self.dtype([2, 12, 3, 23]).reshape(1, 1, 4)

    if self.is_static_shape or tf.executing_eagerly():
      with self.assertRaisesRegexp(NotImplementedError,
                                   'too few batch and event dims'):
        poisson_141_reshaped.log_prob(x_4)
      with self.assertRaisesRegexp(NotImplementedError,
                                   'unexpected batch and event shape'):
        poisson_141_reshaped.log_prob(x_114)
      return

    with self.assertRaisesOpError('too few batch and event dims'):
      self.evaluate(poisson_141_reshaped.log_prob(x_4))

    with self.assertRaisesOpError('unexpected batch and event shape'):
      self.evaluate(poisson_141_reshaped.log_prob(x_114))
Exemple #4
0
 def test_kahan_precision(self, jit=False):
     maybe_jit = lambda f: f
     if jit:
         self.skip_if_no_xla()
         maybe_jit = tf.function(experimental_compile=True)
     stream = test_util.test_seed_stream()
     n = 20_000
     samps = tfd.Poisson(rate=1.).sample(n, seed=stream())
     log_rate = tfd.Normal(0, .2).sample(seed=stream())
     pois = tfd.Poisson(log_rate=log_rate)
     lp = maybe_jit(
         tfd.Sample(pois, n,
                    experimental_use_kahan_sum=True).log_prob)(samps)
     pois64 = tfd.Poisson(log_rate=tf.cast(log_rate, tf.float64))
     lp64 = tfd.Sample(pois64, n).log_prob(tf.cast(samps, tf.float64))
     # Evaluate together to ensure we use the same samples.
     lp, lp64 = self.evaluate((tf.cast(lp, tf.float64), lp64))
     # Fails 75% CPU, 0-80% GPU --vary_seed runs w/o experimental_use_kahan_sum.
     self.assertAllClose(lp64, lp, rtol=0., atol=.01)
 def testConstructor(self):
   x = ed.RandomVariable(tfd.Poisson(rate=tf.ones([2, 5])),
                         value=tf.ones([2, 5]))
   x_sample, x_value = self.evaluate([tf.convert_to_tensor(value=x), x.value])
   self.assertAllEqual(x_sample, x_value)
   with self.assertRaises(ValueError):
     _ = ed.RandomVariable(tfd.Bernoulli(probs=0.5),
                           value=tf.zeros([2, 5], dtype=tf.int32))
   x = ed.RandomVariable(FakeDistribution())
   with self.assertRaises(NotImplementedError):
     _ = x.value
 def true_log_joint(loc, flip, x):
     log_prob = tf.reduce_sum(
         input_tensor=tfd.Normal(loc=0., scale=1.).log_prob(loc))
     log_prob += tf.reduce_sum(input_tensor=tfd.Bernoulli(
         probs=0.5).log_prob(flip))
     if tf.equal(flip, 1):
         log_prob += tf.reduce_sum(
             input_tensor=tfd.Normal(loc=loc, scale=0.5).log_prob(x))
     else:
         log_prob += tf.reduce_sum(input_tensor=tfd.Poisson(
             rate=tf.nn.softplus(loc)).log_prob(x))
     return log_prob
Exemple #7
0
 def new(params, event_shape=(), validate_args=False, name=None):
     """Create the distribution instance from a `params` vector."""
     with tf.name_scope(name, 'IndependentPoisson', [params, event_shape]):
         params = tf.convert_to_tensor(params, name='params')
         event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
             event_shape, name='event_shape', preferred_dtype=tf.int32),
                                                  tensor_name='event_shape')
         output_shape = tf.concat([
             tf.shape(params)[:-1],
             event_shape,
         ],
                                  axis=0)
         return tfd.Independent(
             tfd.Poisson(log_rate=tf.reshape(params, output_shape),
                         validate_args=validate_args),
             reinterpreted_batch_ndims=tf.size(event_shape),
             validate_args=validate_args)
Exemple #8
0
 def testRandomTensorSample(self):
     num_samples = tf.cast(tfd.Poisson(rate=5.).sample(), tf.int32)
     _ = ed.RandomVariable(tfd.Normal(loc=0.0, scale=1.0),
                           sample_shape=num_samples)
Exemple #9
0
 def _log_prob(self, y, r):
     return tfd.Poisson(rate=tf.nn.softplus(r)).log_prob(y)
Exemple #10
0
 def _log_prob(self, y, r):
     return tfd.Poisson(log_rate=r).log_prob(y)
 def observation_noise_fn(log_intensity):
     """Creates the observation noise distribution."""
     return tfd.Poisson(log_rate=tf.math.log(train_extents) +
                        log_intensity)
 def setUp(self):
     super(PoissonSoftplusTest, self).setUp()
     self.dtype = np.float32
     self.model = tfp.glm.PoissonSoftplus()
     self.expected = tfp.glm.CustomExponentialFamily(
         lambda mean: tfd.Poisson(rate=mean), tf.nn.softplus)
Exemple #13
0
def poisson(x,
            layer_fn=tf.compat.v1.layers.dense,
            log_rate_fn=lambda x: x,
            name=None):
    """Constructs a trainable `tfd.Poisson` distribution.

  This function creates a Poisson distribution parameterized by log rate.
  Using default args, this function is mathematically equivalent to:

  ```none
  Y = Poisson(log_rate=matmul(W, x) + b)

  where,
    W in R^[d, n]
    b in R^d
  ```

  #### Examples

  This can be used as a [Poisson regression](
  https://en.wikipedia.org/wiki/Poisson_regression) loss.

  ```python
  # This example fits a poisson regression loss.
  import numpy as np
  import tensorflow as tf
  import tensorflow_probability as tfp

  # Create fictitious training data.
  dtype = np.float32
  n = 3000    # number of samples
  x_size = 4  # size of single x
  def make_training_data():
    np.random.seed(142)
    x = np.random.randn(n, x_size).astype(dtype)
    w = np.random.randn(x_size).astype(dtype)
    b = np.random.randn(1).astype(dtype)
    true_log_rate = np.tensordot(x, w, axes=[[-1], [-1]]) + b
    y = np.random.poisson(lam=np.exp(true_log_rate)).astype(dtype)
    return y, x
  y, x = make_training_data()

  # Build TF graph for fitting Poisson maximum likelihood estimator.
  poisson = tfp.trainable_distributions.poisson(x)
  loss = -tf.reduce_mean(poisson.log_prob(y))
  train_op = tf.train.AdamOptimizer(learning_rate=2.**-5).minimize(loss)
  mse = tf.reduce_mean(tf.squared_difference(y, poisson.mean()))
  init_op = tf.global_variables_initializer()

  # Run graph 1000 times.
  num_steps = 1000
  loss_ = np.zeros(num_steps)   # Style: `_` to indicate sess.run result.
  mse_ = np.zeros(num_steps)
  with tf.Session() as sess:
    sess.run(init_op)
    for it in xrange(loss_.size):
      _, loss_[it], mse_[it] = sess.run([train_op, loss, mse])
      if it % 200 == 0 or it == loss_.size - 1:
        print("iteration:{}  loss:{}  mse:{}".format(it, loss_[it], mse_[it]))

  # ==> iteration:0    loss:37.0814208984  mse:6359.41259766
  #     iteration:200  loss:1.42010736465  mse:40.7654914856
  #     iteration:400  loss:1.39027583599  mse:8.77660560608
  #     iteration:600  loss:1.3902695179   mse:8.78443241119
  #     iteration:800  loss:1.39026939869  mse:8.78443622589
  #     iteration:999  loss:1.39026939869  mse:8.78444766998
  ```

  Args:
    x: `Tensor` with floating type. Must have statically defined rank and
      statically known right-most dimension.
    layer_fn: Python `callable` which takes input `x` and `int` scalar `d` and
      returns a transformation of `x` with shape
      `tf.concat([tf.shape(x)[:-1], [1]], axis=0)`.
      Default value: `tf.layers.dense`.
    log_rate_fn: Python `callable` which transforms the `log_rate` parameter.
      Takes a (batch of) length-`dims` vectors and returns a `Tensor` of same
      shape and `dtype`.
      Default value: `lambda x: x`.
    name: A `name_scope` name for operations created by this function.
      Default value: `None` (i.e., "poisson").

  Returns:
    poisson: An instance of `tfd.Poisson`.
  """
    with tf.compat.v1.name_scope(name, 'poisson', [x]):
        x = tf.convert_to_tensor(value=x, name='x')
        log_rate = log_rate_fn(tf.squeeze(layer_fn(x, 1), axis=-1))
        return tfd.Poisson(log_rate=log_rate)
Exemple #14
0
 def setUp(self):
     self.dtype = np.float32
     self.model = tfp.glm.PoissonSoftplus()
     self.expected = tfp.glm.CustomExponentialFamily(
         lambda mu: tfd.Poisson(rate=mu), tf.nn.softplus)
Exemple #15
0
 def _as_distribution(self, r):
   return tfd.Poisson(rate=tf.nn.softplus(r))
Exemple #16
0
 def _as_distribution(self, r):
   return tfd.Poisson(log_rate=r)
Exemple #17
0
 def model():
     change_year = yield Root(tfd.Categorical(probs=tf.ones(n) / n))
     for year in range(n):
         post_change_year = tf.cast(year >= change_year, dtype=tf.int32)
         mu = tf.gather([mu1, mu2], post_change_year)
         accidents = yield tfd.Poisson(mu)
 def _as_distribution(self, r):
     return tfd.Poisson(rate=DeferredTensor(r, tf.math.softplus))