Example #1
0
 def log_prob(self, value):
     if self._validate_args:
         self._validate_sample(value)
     y = (value - self.loc) / self.scale
     z = (np.log(self.scale) + 0.5 * np.log(self.df) + 0.5 * np.log(np.pi) +
          gammaln(0.5 * self.df) - gammaln(0.5 * (self.df + 1.)))
     return -0.5 * (self.df + 1.) * np.log1p(y**2. / self.df) - z
Example #2
0
def binomial_lpmf(k, n, p):
    # Credit to https://github.com/pyro-ppl/numpyro/blob/master/numpyro/distributions/discrete.py
    log_factorial_n = gammaln(n + 1)
    log_factorial_k = gammaln(k + 1)
    log_factorial_nmk = gammaln(n - k + 1)
    return (log_factorial_n - log_factorial_k - log_factorial_nmk +
            xlogy(k, p) + xlog1py(n - k, -p))
Example #3
0
def half_students_t(scale, dof):
    """Half-Student's T distribution with support on nogative reals.

    Args:
        scale (float): Scale parameter (positive).
        dof (float): Degrees of freedom parameter (positive).

    Returns:
        Distribution: Half-Student's T distribution object.
    """
    def neg_log_dens(x):
        z = x / scale
        return (dof + 1) * np.log1p(z**2 / dof) / 2

    log_normalizing_constant = (gammaln(dof / 2) - gammaln(
        (dof + 1) / 2) + np.log(np.pi * dof) / 2 + np.log(scale / 2))

    def sample(rng, shape=()):
        return abs(rng.standard_t(df=dof, size=shape) * scale)

    return Distribution(
        neg_log_dens=neg_log_dens,
        log_normalizing_constant=log_normalizing_constant,
        sample=sample,
        support=nonnegative_reals,
    )
Example #4
0
 def logpdf(self, k):
     k = jnp.floor(k)
     unnormalized = xlogy(k, self.p) + xlog1py(self.n - k, -self.p)
     binomialcoeffln = gammaln(self.n + 1) - (
         gammaln(k + 1) + gammaln(self.n - k + 1)
     )
     return unnormalized + binomialcoeffln
Example #5
0
 def log_prob(self, value):
     log_factorial_n = gammaln(self.total_count + 1)
     log_factorial_k = gammaln(value + 1)
     log_factorial_nmk = gammaln(self.total_count - value + 1)
     normalize_term = (self.total_count * np.clip(self.logits, 0) + xlog1py(
         self.total_count, np.exp(-np.abs(self.logits))) - log_factorial_n)
     return value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
Example #6
0
 def log_prob(self, value):
     if self._validate_args:
         self._validate_sample(value)
     normalize_term = self.total_count * logsumexp(self.logits, axis=-1) \
         - gammaln(self.total_count + 1)
     return jnp.sum(value * self.logits - gammaln(value + 1),
                    axis=-1) - normalize_term
Example #7
0
 def log_prob(self, value):
     if self._validate_args:
         self._validate_sample(value)
     normalize_term = (np.sum(gammaln(self.concentration), axis=-1) -
                       gammaln(np.sum(self.concentration, axis=-1)))
     return np.sum(np.log(value) *
                   (self.concentration - 1.), axis=-1) - normalize_term
Example #8
0
 def log_prob(self, value):
     log_factorial_n = gammaln(self.total_count + 1)
     log_factorial_k = gammaln(value + 1)
     log_factorial_nmk = gammaln(self.total_count - value + 1)
     return (log_factorial_n - log_factorial_k - log_factorial_nmk +
             xlogy(value, self.probs) +
             xlog1py(self.total_count - value, -self.probs))
Example #9
0
 def log_prob(self, value):
     if self._validate_args:
         self._validate_sample(value)
     log_factorial_n = gammaln(self.total_count + 1)
     log_factorial_k = gammaln(value + 1)
     log_factorial_nmk = gammaln(self.total_count - value + 1)
     return (log_factorial_n - log_factorial_k - log_factorial_nmk +
             xlogy(value, self.probs) + xlog1py(self.total_count - value, -self.probs))
Example #10
0
 def _pdf(self, x, df):
     #                                gamma((df+1)/2)
     # t.pdf(x, df) = ---------------------------------------------------
     #                sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
     r = np.asarray(df * 1.0)
     Px = np.exp(gammaln((r + 1) / 2) - gammaln(r / 2))
     Px = Px / np.sqrt(r * np.pi) * (1 + (x**2) / r)**((r + 1) / 2)
     return Px
Example #11
0
 def log_prob(self, value):
     log_factorial_n = gammaln(self.total_count + 1)
     log_factorial_k = gammaln(value + 1)
     log_factorial_nmk = gammaln(self.total_count - value + 1)
     return (log_factorial_n - log_factorial_k - log_factorial_nmk +
             _log_beta(value + self.concentration1,
                       self.total_count - value + self.concentration0) -
             _log_beta(self.concentration0, self.concentration1))
Example #12
0
 def log_prob(self, value):
     if self._validate_args:
         self._validate_sample(value)
     dtype = get_dtypes(self.probs)[0]
     value = lax.convert_element_type(value, dtype)
     total_count = lax.convert_element_type(self.total_count, dtype)
     return gammaln(total_count + 1) + np.sum(
         xlogy(value, self.probs) - gammaln(value + 1), axis=-1)
Example #13
0
 def logpmf(self, x, n, p):
     x, n, p = _promote_dtypes(x, n, p)
     if self.is_logits:
         return gammaln(n + 1) + np.sum(x * p - gammaln(x + 1),
                                        axis=-1) - n * logsumexp(p, axis=-1)
     else:
         return gammaln(n + 1) + np.sum(xlogy(x, p) - gammaln(x + 1),
                                        axis=-1)
Example #14
0
def _kl_dirichlet_dirichlet(p, q):
    # From http://bariskurt.com/kullback-leibler-divergence-between-two-dirichlet-and-beta-distributions/
    sum_p_concentration = p.concentration.sum(-1)
    sum_q_concentration = q.concentration.sum(-1)
    t1 = gammaln(sum_p_concentration) - gammaln(sum_q_concentration)
    t2 = (gammaln(p.concentration) - gammaln(q.concentration)).sum(-1)
    t3 = p.concentration - q.concentration
    t4 = digamma(p.concentration) - digamma(sum_p_concentration)[..., None]
    return t1 - t2 + (t3 * t4).sum(-1)
Example #15
0
 def log_prob(self, value):
     if self._validate_args:
         self._validate_sample(value)
     concentration = lax.convert_element_type(self.concentration,
                                              value.dtype)
     normalize_term = (np.sum(gammaln(concentration), axis=-1) -
                       gammaln(np.sum(concentration, axis=-1)))
     return np.sum(np.log(value) *
                   (concentration - 1.), axis=-1) - normalize_term
Example #16
0
def kl_divergence(p, q):
    # From https://en.wikipedia.org/wiki/Gamma_distribution#Kullback%E2%80%93Leibler_divergence
    a, b = p.concentration, p.rate
    alpha, beta = q.concentration, q.rate
    b_ratio = beta / b
    t1 = gammaln(alpha) - gammaln(a)
    t2 = (a - alpha) * digamma(a)
    t3 = alpha * jnp.log(b_ratio)
    t4 = a * (b_ratio - 1)
    return t1 + t2 - t3 + t4
Example #17
0
 def log_prob(self, value):
     if self._validate_args:
         self._validate_sample(value)
     dtype = get_dtypes(self.logits)[0]
     value = lax.convert_element_type(value, dtype)
     total_count = lax.convert_element_type(self.total_count, dtype)
     normalize_term = total_count * logsumexp(
         self.logits, axis=-1) - gammaln(total_count + 1)
     return np.sum(value * self.logits - gammaln(value + 1),
                   axis=-1) - normalize_term
Example #18
0
def logpdf(x, alpha):
    args = (np.ones((0, ), lax.dtype(x)), np.ones((1, ), lax.dtype(alpha)))
    to_dtype = lax.dtype(osp_stats.dirichlet.logpdf(*args))
    x, alpha = [lax.convert_element_type(arg, to_dtype) for arg in (x, alpha)]
    one = jnp._constant_like(x, 1)
    normalize_term = jnp.sum(gammaln(alpha), axis=-1) - gammaln(
        jnp.sum(alpha, axis=-1))
    log_probs = lax.sub(jnp.sum(xlogy(lax.sub(alpha, one), x), axis=-1),
                        normalize_term)
    return jnp.where(_is_simplex(x), log_probs, -jnp.inf)
Example #19
0
def kl_divergence(p, q):
    # From https://arxiv.org/abs/1401.6853 Formula (28)
    a, b = p.concentration, p.scale
    alpha, beta = q.concentration, q.rate
    a_reciprocal = 1 / a
    b_beta = b * beta
    t1 = jnp.log(a) + gammaln(alpha)
    t2 = alpha * (jnp.euler_gamma * a_reciprocal - jnp.log(b_beta))
    t3 = b_beta * jnp.exp(gammaln(a_reciprocal + 1))
    return t1 + t2 + t3 - (jnp.euler_gamma + 1)
Example #20
0
 def log_prob(self, value):
     if self._validate_args:
         self._validate_sample(value)
     dtype = get_dtypes(self.probs)[0]
     value = lax.convert_element_type(value, dtype)
     total_count = lax.convert_element_type(self.total_count, dtype)
     log_factorial_n = gammaln(total_count + 1)
     log_factorial_k = gammaln(value + 1)
     log_factorial_nmk = gammaln(total_count - value + 1)
     return (log_factorial_n - log_factorial_k - log_factorial_nmk +
             xlogy(value, self.probs) +
             xlog1py(total_count - value, -self.probs))
Example #21
0
    def entropy(self, n, p):
        x = np.arange(1, np.max(n) + 1)

        term1 = n * np.sum(entr(p), axis=-1) - gammaln(n + 1)

        n = n[..., np.newaxis]
        new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
        x.shape += (1, ) * new_axes_needed

        term2 = np.sum(binom.pmf(x, n, p) * gammaln(x + 1),
                       axis=(-1, -1 - new_axes_needed))

        return term1 + term2
Example #22
0
 def log_prob(self, value):
     if self._validate_args:
         self._validate_sample(value)
     dtype = get_dtypes(self.logits)[0]
     value = lax.convert_element_type(value, dtype)
     total_count = lax.convert_element_type(self.total_count, dtype)
     log_factorial_n = gammaln(total_count + 1)
     log_factorial_k = gammaln(value + 1)
     log_factorial_nmk = gammaln(total_count - value + 1)
     normalize_term = (total_count * np.clip(self.logits, 0) +
                       xlog1py(total_count, np.exp(-np.abs(self.logits))) -
                       log_factorial_n)
     return value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
Example #23
0
 def _logpmf(self, x, n, p):
     x, n, p = _promote_dtypes(x, n, p)
     combiln = gammaln(n + 1) - (gammaln(x + 1) + gammaln(n - x + 1))
     if self.is_logits:
         # TODO: move this implementation to PyTorch if it does not get non-continuous problem
         # In PyTorch, k * logit - n * log1p(e^logit) get overflow when logit is a large
         # positive number. In that case, we can reformulate into
         # k * logit - n * log1p(e^logit) = k * logit - n * (log1p(e^-logit) + logit)
         #                                = k * logit - n * logit - n * log1p(e^-logit)
         # More context: https://github.com/pytorch/pytorch/pull/15962/
         return combiln + x * p - (n * jnp.clip(p, 0) + xlog1py(n, jnp.exp(-jnp.abs(p))))
     else:
         return combiln + xlogy(x, p) + xlog1py(n - x, -p)
Example #24
0
    def log_prob(self, data, **kwargs):
        loc, scale, df, dim = \
            self.loc, self.scale, self.df, self.dimension
        assert data.ndim == 2 and data.shape[1] == dim

        # Quadratic term
        tmp = np.linalg.solve(scale, (data - loc).T).T
        lp = -0.5 * (df + dim) * np.log1p(np.sum(tmp**2, axis=1) / df)

        # Normalizer
        lp += spsp.gammaln(0.5 * (df + dim)) - spsp.gammaln(0.5 * df)
        lp += -0.5 * dim * np.log(np.pi) - 0.5 * dim * np.log(df)
        # L_diag = np.reshape(Ls, Ls.shape[:-2] + (-1,))[..., ::D + 1]
        lp += -np.sum(np.log(np.diag(scale)))
        return lp
 def compute_log_C(self, alpha):
     """
     Compute the log-transformation of the dirichlet
     normalization constant
     
     Parameters
     ----------
     alpha: np.array(K,)
         {αi}i components of a dirichlet
         
     Returns
     -------
     float: log(C(α)) = log Γ(Σi α_i) - Σi Γ(alpha_i)
     """
     return special.gammaln(alpha.sum()) - special.gammaln(alpha).sum()
Example #26
0
    def entropy(self, n, p):
        n, p, npcond = self._process_parameters(n, p)

        x = np.arange(1, np.max(n) + 1)

        term1 = n * np.sum(entr(p), axis=-1) - gammaln(n + 1)

        n = n[..., np.newaxis]
        new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
        x.shape += (1, ) * new_axes_needed

        term2 = np.sum(binom.pmf(x, n, p) * gammaln(x + 1),
                       axis=(-1, -1 - new_axes_needed))

        return self._checkresult(term1 + term2, npcond, np.nan)
Example #27
0
def gamma(shape, rate):
    """Gamma distribution with support on positive reals.

    Args:
        shape: Shape parameter.
        rate: Rate (inverse scale) parameter.

    Returns:
        Distribution: Gamma distribution object.
    """
    def neg_log_dens(x):
        return rate * x + (1 - shape) * np.log(x)

    log_normalizing_constant = gammaln(shape) - shape * np.log(rate)

    shape_param = shape

    def sample(rng, shape=()):
        return rng.gamma(shape=shape_param, scale=1 / rate, size=shape)

    return Distribution(
        neg_log_dens=neg_log_dens,
        log_normalizing_constant=log_normalizing_constant,
        sample=sample,
        support=positive_reals,
    )
Example #28
0
def inverse_gamma(shape, scale):
    """Inverse gamma distribution with support on positive reals.

    Args:
        shape: Shape parameter.
        scale: Scale parameter.

    Returns:
        Distribution: Inverse gamma distribution object.
    """
    def neg_log_dens(x):
        return scale / x + (shape + 1) * np.log(x)

    log_normalizing_constant = gammaln(shape) - shape * np.log(scale)

    shape_param = shape

    def sample(rng, shape=()):
        return 1 / rng.gamma(shape=shape_param, scale=1 / scale, size=shape)

    return Distribution(
        neg_log_dens=neg_log_dens,
        log_normalizing_constant=log_normalizing_constant,
        sample=sample,
        support=positive_reals,
    )
Example #29
0
    def poisson(self, n, lam):
        r"""
        The continous approximation, using :math:`n! = \Gamma\left(n+1\right)`,
        to the probability mass function of the Poisson distribution evaluated
        at :code:`n` given the parameter :code:`lam`.

        Example:

            >>> import pyhf
            >>> pyhf.set_backend(pyhf.tensor.jax_backend())
            >>> pyhf.tensorlib.poisson(5., 6.)
            DeviceArray(0.16062314, dtype=float64)
            >>> values = pyhf.tensorlib.astensor([5., 9.])
            >>> rates = pyhf.tensorlib.astensor([6., 8.])
            >>> pyhf.tensorlib.poisson(values, rates)
            DeviceArray([0.16062314, 0.12407692], dtype=float64)

        Args:
            n (`tensor` or `float`): The value at which to evaluate the approximation to the Poisson distribution p.m.f.
                                  (the observed number of events)
            lam (`tensor` or `float`): The mean of the Poisson distribution p.m.f.
                                    (the expected number of events)

        Returns:
            JAX ndarray: Value of the continous approximation to Poisson(n|lam)
        """
        n = np.asarray(n)
        lam = np.asarray(lam)
        return np.exp(n * np.log(lam) - lam - gammaln(n + 1.0))
Example #30
0
 def cond_fn(val):
     _, V, us, k = val
     cond1 = (us >= 0.07) & (V <= vr)
     cond2 = (k < 0) | ((us < 0.013) & (V > us))
     cond3 = ((np.log(V) + np.log(invalpha) - np.log(a / (us * us) + b)) <=
              (-lam + k * loglam - gammaln(k + 1)))
     return (~cond1) & (cond2 | (~cond3))