def entropy(self, n, p): x = np.arange(1, np.max(n) + 1) term1 = n * np.sum(entr(p), axis=-1) - gammaln(n + 1) n = n[..., np.newaxis] new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1 x.shape += (1, ) * new_axes_needed term2 = np.sum(binom.pmf(x, n, p) * gammaln(x + 1), axis=(-1, -1 - new_axes_needed)) return term1 + term2
def entropy(self, n, p): n, p, npcond = self._process_parameters(n, p) x = np.arange(1, np.max(n) + 1) term1 = n * np.sum(entr(p), axis=-1) - gammaln(n + 1) n = n[..., np.newaxis] new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1 x.shape += (1, ) * new_axes_needed term2 = np.sum(binom.pmf(x, n, p) * gammaln(x + 1), axis=(-1, -1 - new_axes_needed)) return self._checkresult(term1 + term2, npcond, np.nan)
def _entropy(self, p): return entr(p) + entr(1 - p)
def _entropy(self, n, p): k = np.arange(n + 1) vals = self._pmf(k, n, p) return np.sum(entr(vals), axis=0)
def _entropy(self, p): # TODO: use logits and binary_cross_entropy_with_logits for more stable if self.is_logits: p = expit(p) return entr(p) + entr(1 - p)
def _entropy(self, n, p): if self.is_logits: p = expit(p) k = np.arange(n + 1) vals = self._pmf(k, n, p) return np.sum(entr(vals), axis=0)