Пример #1
0
      def grad(dy):
        """Computes a derivative for the min and max parameters.

        This function implements the derivative wrt the truncation bounds, which
        get blocked by the sampler. We use a custom expression for numerical
        stability instead of automatic differentiation on CDF for implicit
        gradients.

        Args:
          dy: output gradients

        Returns:
           The standard normal samples and the gradients wrt the upper
           bound and lower bound.
        """
        cdf_samples = ((special_math.ndtr(std_samples) -
                        special_math.ndtr(lower)) /
                       (special_math.ndtr(upper) - special_math.ndtr(lower)))

        # tiny, eps are tolerance parameters to ensure we stay away from giving
        # a zero arg to the log CDF expression.

        tiny = np.finfo(self.dtype.as_numpy_dtype).tiny
        eps = np.finfo(self.dtype.as_numpy_dtype).eps
        cdf_samples = tf.clip_by_value(cdf_samples, tiny, 1 - eps)

        du = tf.exp(0.5 * (std_samples**2 - upper**2) + tf.log(cdf_samples))
        dl = tf.exp(0.5 * (std_samples**2 - lower**2) + tf.log(1 - cdf_samples))

        # Reduce the gradient across the samples
        grad_u = tf.reduce_sum(dy * du, axis=-1)
        grad_l = tf.reduce_sum(dy * dl, axis=-1)
        return [grad_l, grad_u]
Пример #2
0
    def _cdf(self, x):
        with tf.control_dependencies([
                tf.assert_greater(x,
                                  tf.cast(0., x.dtype.base_dtype),
                                  message="x must be positive.")
        ] if self.validate_args else []):

            return (special_math.ndtr(
                ((self.concentration / x)**0.5 * (x / self.loc - 1.))) +
                    tf.exp(2. * self.concentration / self.loc) *
                    special_math.ndtr(-(self.concentration / x)**0.5 *
                                      (x / self.loc + 1)))
Пример #3
0
            def grad(dy):
                """Computes a derivative for the min and max parameters.

        This function implements the derivative wrt the truncation bounds, which
        get blocked by the sampler. We use a custom expression for numerical
        stability instead of automatic differentiation on CDF for implicit
        gradients.

        Args:
          dy: output gradients

        Returns:
           The standard normal samples and the gradients wrt the upper
           bound and lower bound.
        """
                # std_samples has an extra dimension (the sample dimension), expand
                # lower and upper so they broadcast along this dimension.
                # See note above regarding parameterized_truncated_normal, the sample
                # dimension is the final dimension.
                lower_broadcast = lower[..., tf.newaxis]
                upper_broadcast = upper[..., tf.newaxis]

                cdf_samples = ((special_math.ndtr(std_samples) -
                                special_math.ndtr(lower_broadcast)) /
                               (special_math.ndtr(upper_broadcast) -
                                special_math.ndtr(lower_broadcast)))

                # tiny, eps are tolerance parameters to ensure we stay away from giving
                # a zero arg to the log CDF expression.

                tiny = np.finfo(self.dtype.as_numpy_dtype).tiny
                eps = np.finfo(self.dtype.as_numpy_dtype).eps
                cdf_samples = tf.clip_by_value(cdf_samples, tiny, 1 - eps)

                du = tf.exp(0.5 * (std_samples**2 - upper_broadcast**2) +
                            tf.log(cdf_samples))
                dl = tf.exp(0.5 * (std_samples**2 - lower_broadcast**2) +
                            tf.log1p(-cdf_samples))

                # Reduce the gradient across the samples
                grad_u = tf.reduce_sum(dy * du, axis=-1)
                grad_l = tf.reduce_sum(dy * dl, axis=-1)
                return [grad_l, grad_u]
Пример #4
0
 def _survival_function(self, x):
   return special_math.ndtr(-self._z(x))
Пример #5
0
 def _cdf(self, x):
   return special_math.ndtr(self._z(x))
Пример #6
0
 def _cdf(self, x):
     cdf_in_support = ((special_math.ndtr((x - self.loc) / self.scale) -
                        special_math.ndtr(self._standardized_low)) /
                       self._normalizer)
     return tf.clip_by_value(cdf_in_support, 0., 1.)
Пример #7
0
 def _normalizer(self):
     return (special_math.ndtr(self._standardized_high) -
             special_math.ndtr(self._standardized_low))
Пример #8
0
 def _survival_function(self, x):
   return special_math.ndtr(-self._z(x))
Пример #9
0
 def _cdf(self, x):
   return special_math.ndtr(self._z(x))
Пример #10
0
 def _cdf(self, x):
   cdf_in_support = ((special_math.ndtr((x - self.loc) / self.scale)
                      -  special_math.ndtr(self._standardized_low))
                     / self._normalizer)
   return tf.clip_by_value(cdf_in_support, 0., 1.)
Пример #11
0
 def _normalizer(self):
   return (special_math.ndtr(self._standardized_high) -
           special_math.ndtr(self._standardized_low))