def _log_cdf(self, x):
   loc, scale, low, high = self._loc_scale_low_high()
   std_low, std_high = self._standardized_low_and_high(
       low=low, high=high, loc=loc, scale=scale)
   return (_log_sub_exp(
       special_math.log_ndtr(
           (x - loc) / scale), special_math.log_ndtr(std_low)) -
           self._log_normalizer(std_low=std_low, std_high=std_high))
示例#2
0
def _normal_cdf_log_difference(x, y):
    """Computes log(ndtr(x) - ndtr(y)) assuming that x >= y."""
    # When x >= y >= 0, we will return log(ndtr(-y) - ndtr(-x))
    # because ndtr does not have a good precision for large positive x, y.
    is_y_positive = y >= 0
    x_hat = tf.where(is_y_positive, -y, x)
    y_hat = tf.where(is_y_positive, -x, y)
    return _log_sub_exp(special_math.log_ndtr(x_hat),
                        special_math.log_ndtr(y_hat))
示例#3
0
 def _log_cdf(self, x):
     rate = tf.convert_to_tensor(self.rate)
     x_centralized = x - self.loc
     u = rate * x_centralized
     v = rate * self.scale
     vsquared = tf.square(v)
     return tfp_math.log_sub_exp(
         special_math.log_ndtr(x_centralized / self.scale),
         -u + vsquared / 2. + special_math.log_ndtr((u - vsquared) / v))
 def _log_normalizer(self,
                     loc=None,
                     scale=None,
                     low=None,
                     high=None,
                     std_low=None,
                     std_high=None):
   if std_low is None or std_high is None:
     std_low, std_high = self._standardized_low_and_high(
         loc=loc, scale=scale, low=low, high=high)
   return _log_sub_exp(
       special_math.log_ndtr(std_high), special_math.log_ndtr(std_low))
示例#5
0
  def _test_grid_log(self, dtype, grid_spec, error_spec):
    if not special:
      return

    grid = _make_grid(dtype, grid_spec)
    actual = self.evaluate(special_math.log_ndtr(grid))

    # Basic tests.
    # isfinite checks for NaN and Inf.
    self.assertTrue(np.isfinite(actual).all())
    # On the grid, -inf < log_cdf(x) < 0.  In this case, we should be able
    # to use a huge grid because we have used tricks to escape numerical
    # difficulties.
    self.assertTrue((actual < 0).all())
    _check_strictly_increasing(actual)

    # Versus scipy.
    expected = special.log_ndtr(grid)
    # Scipy prematurely goes to zero at some places that we don't.  So don't
    # include these in the comparison.
    self.assertAllClose(
        expected.astype(np.float64)[expected < 0],
        actual.astype(np.float64)[expected < 0],
        rtol=error_spec.rtol,
        atol=error_spec.atol)
示例#6
0
 def _log_prob(self, x):
   loc = tf.convert_to_tensor(self.loc)
   rate = tf.convert_to_tensor(self.rate)
   scale = tf.convert_to_tensor(self.scale)
   two = dtype_util.as_numpy_dtype(x.dtype)(2.)
   z = (x - loc) / scale
   w = rate * scale
   return (tf.math.log(rate) + w / two * (w - 2 * z) +
           special_math.log_ndtr(z - w))
示例#7
0
 def _test_grad_finite(self, dtype):
   x = tf.constant([-100., 0., 100.], dtype=dtype)
   output = (special_math.log_ndtr(x) if self._use_log
             else special_math.ndtr(x))
   fn = special_math.log_ndtr if self._use_log else special_math.ndtr
   # Not having the lambda sanitzer means we'd get an `IndexError` whenever
   # the user supplied function has default args.
   output, grad_output = value_and_gradient(fn, x)
   # isfinite checks for NaN and Inf.
   output_, grad_output_ = self.evaluate([output, grad_output])
   self.assert_all_true(np.isfinite(output_))
   self.assert_all_true(np.isfinite(grad_output_[0]))
示例#8
0
 def _log_survival_function(self, x):
     return special_math.log_ndtr(-self._z(x))
示例#9
0
 def _log_cdf(self, x):
     return special_math.log_ndtr(self._z(x))
 def _outcome_log_probs(self):
     if self._probits is None:
         p = tf.convert_to_tensor(self._probs)
         return tf.math.log1p(-p), tf.math.log(p)
     s = tf.convert_to_tensor(self._probits)
     return special_math.log_ndtr(-s), special_math.log_ndtr(s)
示例#11
0
 def _log_survival_function(self, x):
   return special_math.log_ndtr(-self._z(x))
示例#12
0
 def _log_cdf(self, x):
   return special_math.log_ndtr(self._z(x))