Example #1
0
    def _log_prob(self, x):
        alpha0 = tf.convert_to_tensor(self.concentration1_numerator)
        beta0 = tf.convert_to_tensor(self.concentration0_numerator)
        alpha1 = tf.convert_to_tensor(self.concentration1_denominator)
        beta1 = tf.convert_to_tensor(self.concentration0_denominator)

        alpha_sum = alpha0 + alpha1

        log_normalization = (tfp_math.lbeta(alpha0, beta0) +
                             tfp_math.lbeta(alpha1, beta1))
        log_normalization = log_normalization - tfp_math.lbeta(
            alpha_sum, tf.where(x > 1., beta0, beta1))

        b = tf.where(x > 1., 1. - beta1, 1 - beta0)
        c = alpha_sum + tf.where(x > 1., beta0, beta1)
        z = tf.where(x > 1., tf.math.reciprocal(x), x)

        # Here, c - a - b = beta0 + beta1 - 1, so the series always converges
        # conditionally.

        log_unnormalized_prob = tf.math.log(
            hypgeo.hyp2f1_small_argument(alpha_sum, b, c, z))
        log_unnormalized_prob = log_unnormalized_prob + tf.math.xlogy(
            tf.where(x > 1., -(alpha1 + 1.), alpha0 - 1.), x)

        return log_unnormalized_prob - log_normalization
 def _log_prob(self, x):
   total_count = tf.convert_to_tensor(self.total_count)
   logits = self._logits_parameter_no_checks()
   log_unnormalized_prob = (total_count * tf.math.log_sigmoid(-logits) +
                            x * tf.math.log_sigmoid(logits))
   log_normalization = (tfp_math.lbeta(1. + x, total_count) +
                        tf.math.log(total_count + x))
   return log_unnormalized_prob - log_normalization
Example #3
0
    def _log_prob(self, x):
        concentration = tf.convert_to_tensor(self.concentration)
        mixing_concentration = tf.convert_to_tensor(self.mixing_concentration)
        mixing_rate = tf.convert_to_tensor(self.mixing_rate)

        log_normalization = (
            tfp_math.lbeta(concentration, mixing_concentration) -
            mixing_concentration * tf.math.log(mixing_rate))

        log_unnormalized_prob = (tf.math.xlogy(concentration - 1., x) -
                                 (concentration + mixing_concentration) *
                                 tf.math.log(x + mixing_rate))

        return log_unnormalized_prob - log_normalization
Example #4
0
 def _log_moment(self, n, concentration1=None, concentration0=None):
     """Compute the n'th (uncentered) moment."""
     concentration0 = tf.convert_to_tensor(
         self.concentration0) if concentration0 is None else concentration0
     concentration1 = tf.convert_to_tensor(
         self.concentration1) if concentration1 is None else concentration1
     total_concentration = concentration1 + concentration0
     expanded_concentration1 = tf.broadcast_to(
         concentration1, tf.shape(total_concentration))
     expanded_concentration0 = tf.broadcast_to(
         concentration0, tf.shape(total_concentration))
     beta_arg = 1 + n / expanded_concentration1
     return (tf.math.log(expanded_concentration0) +
             tfp_math.lbeta(beta_arg, expanded_concentration0))
Example #5
0
 def testLogBeta(self):
   x = tfp.distributions.HalfCauchy(loc=1., scale=15.).sample(
       10000, test_util.test_seed())
   x = self.evaluate(x)
   y = tfp.distributions.HalfCauchy(loc=1., scale=15.).sample(
       10000, test_util.test_seed())
   y = self.evaluate(y)
   # Why not 1e-8?
   # - Could be because scipy does the reduction loops recommended
   #   by DiDonato and Morris 1988
   # - Could be that tf.math.lgamma is less accurate than scipy
   # - Could be that scipy evaluates in 64 bits internally
   rtol = 1e-6
   self.assertAllClose(
       scipy_special.betaln(x, y), tfp_math.lbeta(x, y),
       atol=0, rtol=rtol)
Example #6
0
def entropy(df, scale, batch_shape, dtype):
    """Compute entropy of the StudentT distribution.

  Args:
    df: Floating-point `Tensor`. The degrees of freedom of the
      distribution(s). `df` must contain only positive values.
    scale: Floating-point `Tensor`; the scale(s) of the distribution(s). Must
      contain only positive values.
    batch_shape: Floating-point `Tensor` of the batch shape
    dtype: Return dtype.

  Returns:
    A `Tensor` of the entropy for a Student's T with these parameters.
  """
    v = tf.ones(batch_shape, dtype=dtype)
    u = v * df
    return (tf.math.log(tf.abs(scale)) + 0.5 * tf.math.log(df) +
            tfp_math.lbeta(u / 2., v / 2.) + 0.5 * (df + 1.) *
            (tf.math.digamma(0.5 * (df + 1.)) - tf.math.digamma(0.5 * df)))
    def _log_prob(self, x):
        concentration = tf.convert_to_tensor(self.concentration)
        mixing_concentration = tf.convert_to_tensor(self.mixing_concentration)
        mixing_rate = tf.convert_to_tensor(self.mixing_rate)

        log_normalization = (
            tfp_math.lbeta(concentration, mixing_concentration) -
            mixing_concentration * tf.math.log(mixing_rate))

        log_unnormalized_prob = (tf.math.xlogy(concentration - 1., x) -
                                 (concentration + mixing_concentration) *
                                 tf.math.log(x + mixing_rate))
        # The formula computes `nan` for `x == +inf`.  However, it shouldn't be too
        # inaccurate for large finite `x`, because `x` only appears as `log(x)`, and
        # `log` is effectively discountinuous at `+inf`.
        log_unnormalized_prob = tf.where(
            x >= np.inf, tf.constant(-np.inf,
                                     dtype=log_unnormalized_prob.dtype),
            log_unnormalized_prob)

        return log_unnormalized_prob - log_normalization
Example #8
0
 def testLogBetaDtype(self, dtype):
     x = tf.constant([1., 2.], dtype=dtype)
     y = tf.constant([3., 4.], dtype=dtype)
     result = tfp_math.lbeta(x, y)
     self.assertEqual(result.dtype, dtype)
Example #9
0
def _log_normalization(counts, total_count):
  return (tfp_math.lbeta(1. + counts, 1. + total_count - counts) +
          tf.math.log(1. + total_count))
Example #10
0
def _bates_cdf(total_count, low, high, dtype, value):
    """Compute the Bates cdf.

  Internally, the (standard, unnormalized) cdf is computed by the formula

  ```none
  pdf = sum_{k=0}^j (-1)^k (n choose k) (nx - k)^n
  ```

  where
  * `n = total_count`,
  * `x = value` the value to compute the cumulative probability of, and
  * `j = floor(nx)`.

  This is shifted to `[low, high]` and normalized. Since the pdf is symmetric,
  we have `cdf(x) = 1 - cdf(1 - x)` for `x > .5`, hence we only compute the left
  half, which keeps the number of terms lower.

  Computation is batched, using `tf.math.segment_sum()`. For this reason this is
  not compatible with `tf.vectorized_map()`.

  All input parameters should have compatible dtypes and shapes.

  Args:
    total_count: `Tensor` with integer values, as given to the `Bates`
      constructor.
    low: Float `Tensor`, as given to the `Bates` constructor.
    high: Float `Tensor`, as given to the `Bates` constructor.
    dtype: The dtype of the output.
    value: Float `Tensor`. Input value to `cdf()`.
  Returns:
    cdf: Float `Tensor`. See above formula.
  """
    total_count = tf.cast(total_count, dtype)
    low = tf.convert_to_tensor(low)
    high = tf.convert_to_tensor(high)

    # Warn the user if they try to compute a pdf with high `total_count`.  This
    # warning is here instead of `_parameter_control_dependencies()` because
    # nested calls to `_name_and_control_scope` (e.g. `log_survival_function`) can
    # result in multiple warnings being added and multiple tensor
    # conversions. Also `sample()` does not have the same numerical issues.
    with tf.control_dependencies([_stability_limit_tensor(total_count,
                                                          dtype)]):
        # Center and adjust `value` using limits and symmetry.
        value_centered = (value - low) / (high - low)
        value_adj = tf.clip_by_value(value_centered, 0., 1.)
        value_adj = tf.where(value_adj < .5, value_adj, 1. - value_adj)
        value_adj = tf.where(tf.math.is_finite(value_adj), value_adj, low)
        # Flatten to make segments; need to broadcast before flattening.
        shape = ps.broadcast_shape(ps.shape(value_adj), ps.shape(total_count))
        total_count_b = ps.broadcast_to(total_count, shape)
        total_count_x_value_adj_b = total_count * value_adj
        total_count_f = tf.reshape(total_count_b, [-1])
        total_count_x_value_adj_f = tf.reshape(total_count_x_value_adj_b, [-1])
        # Create segmented terms of summation.
        num_terms_f = tf.cast(tf.math.floor(total_count_x_value_adj_f + 1),
                              dtype=tf.int32)
        term_idx_s = tf.cast(_segmented_range(num_terms_f), dtype)  # aka `k`
        total_count_s = tf.repeat(total_count_f, num_terms_f)
        total_count_x_value_adj_s = tf.repeat(total_count_x_value_adj_f,
                                              num_terms_f)
        terms = (tf.cast(-1., dtype)**term_idx_s *
                 (1. / ((total_count_s + 1.) * tf.math.exp(
                     tfp_math.lbeta(total_count_s - term_idx_s + 1.,
                                    term_idx_s + 1.)))) *
                 (total_count_x_value_adj_s - term_idx_s)**total_count_s)
        # Segment sum.
        segment_ids = tf.repeat(tf.range(tf.size(num_terms_f)), num_terms_f)
        cdf_s = tf.math.segment_sum(terms, segment_ids)
        # Reshape back.
        cdf = tf.reshape(cdf_s, shape)
        # Normalize.
        cdf = cdf / tf.math.exp(
            tf.math.lgamma(total_count_b + tf.cast(1., dtype)))
        # cdf symmetry adjustment: cdf(x) = 1 - cdf(1 - x) for x > 0.5
        cdf = tf.where(value_centered > .5, 1. - cdf, cdf)
        # Fix out-of-support queries.
        cdf = tf.where(value_centered < 0., tf.cast(0., dtype), cdf)
        cdf = tf.where(value_centered > 1., tf.cast(1., dtype), cdf)
        cdf = tf.where(tf.math.is_finite(value_centered), cdf, np.nan)
        return cdf
Example #11
0
 def _log_normalization(self, concentration1, concentration0):
     return tfp_math.lbeta(concentration1, concentration0)
Example #12
0
def _log_combinations(n, k):
  """Computes log(Gamma(n+1) / (Gamma(k+1) * Gamma(n-k+1))."""
  return -tfp_math.lbeta(k + 1, n - k + 1) - tf.math.log(n + 1)
Example #13
0
 def _log_prob(self, counts):
   n, c1, c0 = self._params_list_as_tensors()
   return (_log_combinations(n, counts)
           + tfp_math.lbeta(c1 + counts, (n - counts) + c0)
           - tfp_math.lbeta(c1, c0))