Ejemplo n.º 1
0
 def _cdf(self, x):
   concentration = tf.convert_to_tensor(self.concentration)
   loc = tf.convert_to_tensor(self.loc)
   return (
       special_math.ndtr((tf.math.rsqrt(x / concentration) * (x / loc - 1.))) +
       tf.exp(2. * concentration / loc) *
       special_math.ndtr(-tf.math.rsqrt(x / concentration) * (x / loc + 1)))
 def _cdf(self, x):
     u = self.rate * (x - self.loc)
     v = self.rate * self.scale
     v2 = tf.square(v)
     return (
         special_math.ndtr(u / v) -
         tf.exp(-u + 0.5 * v2 + tf.log(special_math.ndtr((u - v2) / v))))
Ejemplo n.º 3
0
 def _quantile(self, p):
     """See https://www.ntrand.com/truncated-normal-distribution/"""
     a = (self.low - self.loc) / self.scale
     b = (self.high - self.loc) / self.scale
     delta = special_math.ndtr(b) - special_math.ndtr(a)
     x = delta * p + special_math.ndtr(a)
     return tf.math.ndtri(x) * self.scale + self.loc
Ejemplo n.º 4
0
 def _cdf(self, x):
   loc, scale, low, high = self._loc_scale_low_high()
   std_low, std_high = self._standardized_low_and_high(
       low=low, high=high, loc=loc, scale=scale)
   cdf_in_support = ((special_math.ndtr(
       (x - loc) / scale) - special_math.ndtr(std_low)) /
                     self._normalizer(std_low=std_low, std_high=std_high))
   return tf.clip_by_value(cdf_in_support, 0., 1.)
Ejemplo n.º 5
0
def _normal_cdf_difference(x, y):
    """Computes ndtr(x) - ndtr(y) assuming that x >= y."""
    # When x >= y >= 0, we will return ndtr(-y) - ndtr(-x)
    # because ndtr does not have a good precision for large positive x, y.
    is_y_positive = y >= 0
    x_hat = tf.where(is_y_positive, -y, x)
    y_hat = tf.where(is_y_positive, -x, y)
    return special_math.ndtr(x_hat) - special_math.ndtr(y_hat)
Ejemplo n.º 6
0
 def _cdf(self, x):
     with tf.control_dependencies(self._maybe_assert_valid_sample(x)):
         concentration = tf.convert_to_tensor(self.concentration)
         loc = tf.convert_to_tensor(self.loc)
         return (special_math.ndtr(
             ((concentration / x)**0.5 *
              (x / loc - 1.))) + tf.exp(2. * concentration / loc) *
                 special_math.ndtr(-(concentration / x)**0.5 *
                                   (x / loc + 1)))
Ejemplo n.º 7
0
 def _normalizer(self,
                 loc=None,
                 scale=None,
                 low=None,
                 high=None,
                 std_low=None,
                 std_high=None):
   if std_low is None or std_high is None:
     std_low, std_high = self._standardized_low_and_high(
         loc=loc, scale=scale, low=low, high=high)
   return special_math.ndtr(std_high) - special_math.ndtr(std_low)
Ejemplo n.º 8
0
 def _quantile(self, p):
   # TODO(b/188413116): This implementation is analytically correct, but might
   # not perform well in all cases. See
   # https://en.wikipedia.org/wiki/Truncated_normal_distribution#Generating_values_from_the_truncated_normal_distribution)
   # for a discussion on alternatives.
   loc, scale, low, high = self._loc_scale_low_high()
   std_low, std_high = self._standardized_low_and_high(
       low=low, high=high, loc=loc, scale=scale)
   quantile = tf.math.ndtri(
       special_math.ndtr(std_low) + p *
       (special_math.ndtr(std_high) - special_math.ndtr(std_low))) * scale + loc
   return quantile
    def _cdf(self, x):
        with tf.control_dependencies([
                tf.compat.v1.assert_greater(x,
                                            tf.cast(0., x.dtype.base_dtype),
                                            message="x must be positive.")
        ] if self.validate_args else []):

            return (special_math.ndtr(
                ((self.concentration / x)**0.5 * (x / self.loc - 1.))) +
                    tf.exp(2. * self.concentration / self.loc) *
                    special_math.ndtr(-(self.concentration / x)**0.5 *
                                      (x / self.loc + 1)))
Ejemplo n.º 10
0
    def _cdf(self, x):
        with tf.control_dependencies([
                assert_util.assert_greater(x,
                                           dtype_util.as_numpy_dtype(x.dtype)
                                           (0),
                                           message="x must be positive.")
        ] if self.validate_args else []):

            return (special_math.ndtr(
                ((self.concentration / x)**0.5 * (x / self.loc - 1.))) +
                    tf.exp(2. * self.concentration / self.loc) *
                    special_math.ndtr(-(self.concentration / x)**0.5 *
                                      (x / self.loc + 1)))
Ejemplo n.º 11
0
    def test_chandrupatla_scalar_inverse_gaussian_cdf(self):
        true_x = 3.14159
        u = special_math.ndtr(true_x)

        roots, value_at_roots, _ = tfp.math.find_root_chandrupatla(
            objective_fn=lambda x: special_math.ndtr(x) - u,
            low=-100.,
            high=100.,
            position_tolerance=1e-8)
        self.assertAllClose(value_at_roots, tf.zeros_like(value_at_roots))
        # The normal CDF function is not precise enough to be inverted to a
        # position tolerance of 1e-8 (the objective goes to zero relatively
        # far from the expected point), so check it at a lower tolerance.
        self.assertAllClose(roots, true_x, atol=1e-4)
Ejemplo n.º 12
0
  def _test_grid_no_log(self, dtype, grid_spec, error_spec):
    if not special:
      return

    grid = _make_grid(dtype, grid_spec)
    actual = self.evaluate(special_math.ndtr(grid))

    # Basic tests.
    # isfinite checks for NaN and Inf.
    self.assertTrue(np.isfinite(actual).all())
    # On the grid, 0 < cdf(x) < 1.  The grid cannot contain everything due
    # to numerical limitations of cdf.
    self.assertTrue((actual > 0).all())
    self.assertTrue((actual < 1).all())
    _check_strictly_increasing(actual)

    # Versus scipy.
    expected = special.ndtr(grid)
    # Scipy prematurely goes to zero at some places that we don't.  So don't
    # include these in the comparison.
    self.assertAllClose(
        expected.astype(np.float64)[expected < 0],
        actual.astype(np.float64)[expected < 0],
        rtol=error_spec.rtol,
        atol=error_spec.atol)
 def _cdf(self, x):
     rate = tf.convert_to_tensor(self.rate)
     x_centralized = x - self.loc
     u = rate * x_centralized
     v = rate * self.scale
     vsquared = tf.square(v)
     return special_math.ndtr(x_centralized / self.scale) - tf.exp(
         -u + vsquared / 2. + special_math.log_ndtr((u - vsquared) / v))
Ejemplo n.º 14
0
      def grad(dy):
        """Computes a derivative for the min and max parameters.

        This function implements the derivative wrt the truncation bounds, which
        get blocked by the sampler. We use a custom expression for numerical
        stability instead of automatic differentiation on CDF for implicit
        gradients.

        Args:
          dy: output gradients

        Returns:
           The standard normal samples and the gradients wrt the upper
           bound and lower bound.
        """
        # std_samples has an extra dimension (the sample dimension), expand
        # lower and upper so they broadcast along this dimension.
        # See note above regarding parameterized_truncated_normal, the sample
        # dimension is the final dimension.
        lower_broadcast = lower[..., tf.newaxis]
        upper_broadcast = upper[..., tf.newaxis]

        cdf_samples = ((special_math.ndtr(std_samples) -
                        special_math.ndtr(lower_broadcast)) /
                       (special_math.ndtr(upper_broadcast)
                        - special_math.ndtr(lower_broadcast)))

        # tiny, eps are tolerance parameters to ensure we stay away from giving
        # a zero arg to the log CDF expression.

        tiny = np.finfo(self.dtype.as_numpy_dtype).tiny
        eps = np.finfo(self.dtype.as_numpy_dtype).eps
        cdf_samples = tf.clip_by_value(cdf_samples, tiny, 1 - eps)

        du = tf.exp(0.5 * (std_samples**2 - upper_broadcast**2) +
                    tf.math.log(cdf_samples))
        dl = tf.exp(0.5 * (std_samples**2 - lower_broadcast**2) +
                    tf.math.log1p(-cdf_samples))

        # Reduce the gradient across the samples
        grad_u = tf.reduce_sum(input_tensor=dy * du, axis=-1)
        grad_l = tf.reduce_sum(input_tensor=dy * dl, axis=-1)
        return [grad_l, grad_u]
Ejemplo n.º 15
0
 def _test_grad_finite(self, dtype):
   x = tf.constant([-100., 0., 100.], dtype=dtype)
   output = (special_math.log_ndtr(x) if self._use_log
             else special_math.ndtr(x))
   fn = special_math.log_ndtr if self._use_log else special_math.ndtr
   # Not having the lambda sanitzer means we'd get an `IndexError` whenever
   # the user supplied function has default args.
   output, grad_output = value_and_gradient(fn, x)
   # isfinite checks for NaN and Inf.
   output_, grad_output_ = self.evaluate([output, grad_output])
   self.assert_all_true(np.isfinite(output_))
   self.assert_all_true(np.isfinite(grad_output_[0]))
Ejemplo n.º 16
0
 def _survival_function(self, x):
   return special_math.ndtr(-self._z(x))
Ejemplo n.º 17
0
 def _cdf(self, x):
   cdf_in_support = ((special_math.ndtr((x - self.loc) / self.scale)
                      -  special_math.ndtr(self._standardized_low))
                     / self._normalizer)
   return tf.clip_by_value(cdf_in_support, 0., 1.)
Ejemplo n.º 18
0
 def _normalizer(self):
   return (special_math.ndtr(self._standardized_high) -
           special_math.ndtr(self._standardized_low))
Ejemplo n.º 19
0
 def _forward(self, x):
   return special_math.ndtr(x)
Ejemplo n.º 20
0
 def _survival_function(self, x):
     return special_math.ndtr(-self._z(x))
Ejemplo n.º 21
0
 def _cdf(self, x):
     return special_math.ndtr(self._z(x))
Ejemplo n.º 22
0
 def mean(self, name='mean', **kwargs):
     u = self.loc
     s = self.scale
     return s * np.sqrt(2 / np.pi) * tf.math.exp(
         -0.5 * (u / s)**2.) + u * (1. - 2. * special_math.ndtr(-u / s))
Ejemplo n.º 23
0
 def _cdf(self, x):
   return special_math.ndtr(self._z(x))
Ejemplo n.º 24
0
 def _probs_parameter_no_checks(self):
     if self._probits is None:
         return tf.identity(self._probs)
     return special_math.ndtr(self._probits)
Ejemplo n.º 25
0
def norm_cdf(x, approx=False):
    return abrahamowitz_stegun_cdf(x) if approx else special_math.ndtr(x)
Ejemplo n.º 26
0
 def _probs_parameter_no_checks(self):
     if self._probits is None:
         return tensor_util.identity_as_tensor(self._probs)
     return special_math.ndtr(self._probits)