def _cdf(self, x): with tf.control_dependencies(self._maybe_assert_valid_sample(x)): concentration = tf.convert_to_tensor(self.concentration) loc = tf.convert_to_tensor(self.loc) return ( special_math.ndtr( ((concentration / x) ** 0.5 * (x / loc - 1.))) + tf.exp(2. * concentration / loc) * special_math.ndtr( -(concentration / x) ** 0.5 * (x / loc + 1)))
def grad(dy): """Computes a derivative for the min and max parameters. This function implements the derivative wrt the truncation bounds, which get blocked by the sampler. We use a custom expression for numerical stability instead of automatic differentiation on CDF for implicit gradients. Args: dy: output gradients Returns: The standard normal samples and the gradients wrt the upper bound and lower bound. """ # std_samples has an extra dimension (the sample dimension), expand # lower and upper so they broadcast along this dimension. # See note above regarding parameterized_truncated_normal, the sample # dimension is the final dimension. lower_broadcast = lower[..., tf.newaxis] upper_broadcast = upper[..., tf.newaxis] cdf_samples = ((special_math.ndtr(std_samples) - special_math.ndtr(lower_broadcast)) / (special_math.ndtr(upper_broadcast) - special_math.ndtr(lower_broadcast))) # tiny, eps are tolerance parameters to ensure we stay away from giving # a zero arg to the log CDF expression. tiny = np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny eps = np.finfo(dtype_util.as_numpy_dtype(self.dtype)).eps cdf_samples = tf.clip_by_value(cdf_samples, tiny, 1 - eps) du = tf.exp(0.5 * (std_samples**2 - upper_broadcast**2) + tf.math.log(cdf_samples)) dl = tf.exp(0.5 * (std_samples**2 - lower_broadcast**2) + tf.math.log1p(-cdf_samples)) # Reduce the gradient across the samples grad_u = tf.reduce_sum(dy * du, axis=-1) grad_l = tf.reduce_sum(dy * dl, axis=-1) return [grad_l, grad_u]
def _probs_parameter_no_checks(self): if self._probits is None: return tf.identity(self._probs) return special_math.ndtr(self._probits)
def _forward(self, x): return special_math.ndtr(x)
def _cdf(self, x): cdf_in_support = ((special_math.ndtr((x - self.loc) / self.scale) - special_math.ndtr(self._standardized_low)) / self._normalizer) return tf.clip_by_value(cdf_in_support, 0., 1.)
def _normalizer(self): return (special_math.ndtr(self._standardized_high) - special_math.ndtr(self._standardized_low))
def _survival_function(self, x): return special_math.ndtr(-self._z(x))
def _cdf(self, x): return special_math.ndtr(self._z(x))