def log_prob(self, value): """Log of probability densitiy function. Args: value (Tensor): Value to be evaluated. """ return ((paddle.log(value) * (self.concentration - 1.0)).sum(-1) + paddle.lgamma(self.concentration.sum(-1)) - paddle.lgamma(self.concentration).sum(-1))
def _binomial_logpmf(self, count, value): logits = self._probs_to_logits(self.probs, is_binary=True) factor_n = paddle.lgamma(count + 1) factor_k = paddle.lgamma(value + 1) factor_nmk = paddle.lgamma(count - value + 1) norm = (count * _clip_by_zero(logits) + count * paddle.log1p(paddle.exp(-paddle.abs(logits))) - factor_n) return value * logits - factor_k - factor_nmk - norm
def entropy(self): """Entropy of Dirichlet distribution. Returns: Entropy of distribution. """ concentration0 = self.concentration.sum(-1) k = self.concentration.shape[-1] return (paddle.lgamma(self.concentration).sum(-1) - paddle.lgamma(concentration0) - (k - concentration0) * paddle.digamma(concentration0) - ((self.concentration - 1.0) * paddle.digamma(self.concentration)).sum(-1))
def entropy(self): """entropy of multinomial distribution Returns: Tensor: entropy value """ n = paddle.full(shape=[1], fill_value=self.total_count, dtype=self.probs.dtype) support = paddle.arange( self.total_count + 1, dtype=self.probs.dtype).reshape((-1, ) + (1, ) * len(self.probs.shape))[1:] binomial_pmf = paddle.exp(self._binomial_logpmf(n, support)) return ((n * self._categorical.entropy() - paddle.lgamma(n + 1)) + ((binomial_pmf * paddle.lgamma(support + 1)).sum([0, -1])))
def log_prob(self, value): """probability mass function evaluated at value Args: value (Tensor): value to be evaluated. Returns: Tensor: probability of value. """ if paddle.is_integer(value): value = paddle.cast(value, self.probs.dtype) logits, value = paddle.broadcast_tensors( [paddle.log(self.probs), value]) logits[(value == 0) & (paddle.isinf(logits))] = 0 return (paddle.lgamma(value.sum(-1) + 1) - paddle.lgamma(value + 1).sum(-1) + (value * logits).sum(-1))
def _log_normalizer(self, x, y): return paddle.lgamma(x) + paddle.lgamma(y) - paddle.lgamma(x + y)
def _log_normalizer(self, x): return x.lgamma().sum(-1) - paddle.lgamma(x.sum(-1))