示例#1
0
 def _cdf(self, x):
     with tf.control_dependencies(self._maybe_assert_valid_sample(x)):
         concentration = tf.convert_to_tensor(self.concentration)
         loc = tf.convert_to_tensor(self.loc)
         return (special_math.ndtr(
             ((concentration / x)**0.5 *
              (x / loc - 1.))) + tf.exp(2. * concentration / loc) *
                 special_math.ndtr(-(concentration / x)**0.5 *
                                   (x / loc + 1)))
示例#2
0
            def grad(dy):
                """Computes a derivative for the min and max parameters.

        This function implements the derivative wrt the truncation bounds, which
        get blocked by the sampler. We use a custom expression for numerical
        stability instead of automatic differentiation on CDF for implicit
        gradients.

        Args:
          dy: output gradients

        Returns:
           The standard normal samples and the gradients wrt the upper
           bound and lower bound.
        """
                # std_samples has an extra dimension (the sample dimension), expand
                # lower and upper so they broadcast along this dimension.
                # See note above regarding parameterized_truncated_normal, the sample
                # dimension is the final dimension.
                lower_broadcast = lower[..., tf.newaxis]
                upper_broadcast = upper[..., tf.newaxis]

                cdf_samples = ((special_math.ndtr(std_samples) -
                                special_math.ndtr(lower_broadcast)) /
                               (special_math.ndtr(upper_broadcast) -
                                special_math.ndtr(lower_broadcast)))

                # tiny, eps are tolerance parameters to ensure we stay away from giving
                # a zero arg to the log CDF expression.

                tiny = np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny
                eps = np.finfo(dtype_util.as_numpy_dtype(self.dtype)).eps
                cdf_samples = tf.clip_by_value(cdf_samples, tiny, 1 - eps)

                du = tf.exp(0.5 * (std_samples**2 - upper_broadcast**2) +
                            tf.math.log(cdf_samples))
                dl = tf.exp(0.5 * (std_samples**2 - lower_broadcast**2) +
                            tf.math.log1p(-cdf_samples))

                # Reduce the gradient across the samples
                grad_u = tf.reduce_sum(dy * du, axis=-1)
                grad_l = tf.reduce_sum(dy * dl, axis=-1)
                return [grad_l, grad_u]
示例#3
0
 def _cdf(self, x):
     cdf_in_support = ((special_math.ndtr((x - self.loc) / self.scale) -
                        special_math.ndtr(self._standardized_low)) /
                       self._normalizer)
     return tf.clip_by_value(cdf_in_support, 0., 1.)
示例#4
0
 def _normalizer(self):
     return (special_math.ndtr(self._standardized_high) -
             special_math.ndtr(self._standardized_low))
示例#5
0
 def _forward(self, x):
     return special_math.ndtr(x)
示例#6
0
 def _probs_parameter_no_checks(self):
     if self._probits is None:
         return tf.identity(self._probs)
     return special_math.ndtr(self._probits)
示例#7
0
 def _survival_function(self, x):
   return special_math.ndtr(-self._z(x))
示例#8
0
 def _cdf(self, x):
   return special_math.ndtr(self._z(x))