def d2_log_rho(beta, mk, K, N):
    """
    Second derivate of the logarithm of the Dirichlet multinomial likelihood.
    """

    return K ** 2 * (mp.psi(1, K * beta) - mp.psi(1, K * beta + N)) - K * mp.psi(1, beta) \
        + np.sum((mk[n] * mp.psi(1, n + beta) for n in mk))
Пример #2
0
def _dlogrho(
        beta, mk, K, N
):  # Derivative of the logarithm of the Dirichlet-multinomial likelihood
    db = K * (mp.psi(0, K * beta) - mp.psi(0, K * beta + N)) - \
        K * mp.psi(0, beta)
    db += np.array([mk[n] * mp.psi(0, n + beta) for n in mk]).sum()
    return db
def d_xi(beta, K):
    """
    First derivative of xi(beta).

    xi(beta) is the entropy of the system when no data has been observed.
    d_xi is the prior for the nsb estimator
    """

    return K * mp.psi(1, K * beta + 1.) - mp.psi(1, beta + 1.)
Пример #4
0
def _S2i_nondiag(x1, x2, nxkx, beta, N, kappa):
    psNK2 = psi(0, N + kappa + 2)
    ps1NK2 = psi(1, N + kappa + 2)
    x1beta = x1 + beta
    x2beta = x2 + beta

    s1 = (psi(0, x1beta + 1) - psNK2) * (psi(0, x2beta + 1) - psNK2) - ps1NK2
    s1 = s1 * nxkx[x1] * nxkx[x2] * x1beta * x2beta

    return s1
def H1(beta, mk, K, N):
    """
    Compute the first moment (expectation value) of the entropy H.

    H is the entropy one obtains with a symmetric Dirichlet prior
    with concentration parameter beta and a multinomial likelihood.
    """

    norm = N + beta * K
    return mp.psi(0, norm + 1) - np.sum(
        (mk[n] * (n + beta) * mp.psi(0, n + beta + 1) for n in mk)) / norm
Пример #6
0
def kurtosis(k, theta):
    """
    Excess kurtosis of the log-gamma distribution.

    k is the shape parameter of the gamma distribution.
    theta is the scale parameter of the log-gamma distribution.
    """
    with mpmath.extradps(5):
        k = mpmath.mpf(k)
        theta = mpmath.mpf(theta)
        return mpmath.psi(3, k) / mpmath.psi(1, k)**2
Пример #7
0
def skewness(k, theta):
    """
    Variance of the log-gamma distribution.

    k is the shape parameter of the gamma distribution.
    theta is the scale parameter of the log-gamma distribution.
    """
    with mpmath.extradps(5):
        k = mpmath.mpf(k)
        theta = mpmath.mpf(theta)
        return mpmath.psi(2, k) / mpmath.psi(1, k)**1.5
Пример #8
0
def _H2(beta, mk, K, K1, N):  # Computes 2nd moment of entropy
    norm = N + beta * K
    psi0norm = mp.psi(0, norm + 2)
    psi1norm = mp.psi(1, norm + 2)
    H2 = 0
    index = 0
    for k in mk:
        index += 1
        H2 += _H2summand(index, k, mk, beta, psi0norm, psi1norm)
    H2 = H2 / (norm + 1) / norm
    return H2
def get_x_y(omega):

    Ts = []
    ys = []

    for T in np.linspace(0.01, 2, 200):
        x = omega * 1j / (2 * np.pi * T)
        Ts.append(T)
        ys.append(-omega / (4 * np.pi * T) *
                  (psi(1, 0.5 + x) - psi(1, 0.5 - x)).imag)

    return np.array(Ts), np.array(ys)
Пример #10
0
def _S2i_diag(x, nxkx, beta, N, kappa):
    xbeta = x + beta
    Nkappa2 = N + kappa + 2
    psNK2 = psi(0, Nkappa2)
    ps1NK2 = psi(1, Nkappa2)

    s1 = (psi(0, xbeta + 2) - psNK2)**2 + psi(1, xbeta + 2) - ps1NK2
    s1 *= nxkx[x] * xbeta * (xbeta + 1)

    s2 = (psi(0, xbeta + 1) - psNK2)**2 - ps1NK2
    s2 *= nxkx[x] * (nxkx[x] - 1) * xbeta * xbeta

    return s1 + s2
Пример #11
0
def nsb_integrand_variance_term1(beta,nxkx,N,m): 
    '''computer variance of the nsb esimator, with term 1,2  
        names refering to eq 3.34 from kinney thesis, the third term, 
        which is simply the square of the expectation value of the
        entropy, is subtracted off at the end'''
    summand1 = sp.array(
        [nxkx[x]*(x + beta+1)*(x+beta)/((N+m*beta+1)*(N+m*beta))*
        psi(1,x + beta + 1) for x in nxkx])
    summand2 = sp.array(
        [nxkx[x]*(x+beta)/((N+m*beta+1)*(N+m*beta))*psi(0,x+beta+1)**2 
        for x in nxkx])
    summand3 = sp.array(
        [nxkx[x]*(x+beta)/(N+m*beta)*psi(0,x+beta+1) for x in nxkx])
    return (
        sp.sum(summand1) - psi(1,N + beta*m+1) + sp.sum(summand2)-1/
        (N+m*beta+1)*sp.sum(summand3)**2)
Пример #12
0
def f88(x):
    # psi
    if abs(x) > 1e100:
        return None
    try:
        return mpmath.psi(0, x)
    except:
        return None
Пример #13
0
def f90(x):
    # psi_1
    if abs(x) > 1e100:
        return None
    try:
        return mpmath.psi(1, x)
    except:
        return None
Пример #14
0
def mle(x, k=None, theta=None):
    """
    Gamma distribution maximum likelihood parameter estimation.

    Maximum likelihood estimate for the k (shape) and theta (scale) parameters
    of the gamma distribution.

    x must be a sequence of values.
    """
    meanx = mpmath.fsum(x) / len(x)
    meanlnx = mpmath.fsum(mpmath.log(t) for t in x) / len(x)

    if k is None:
        if theta is None:
            # Solve for k and theta
            s = mpmath.log(meanx) - meanlnx
            k_hat = (3 - s + mpmath.sqrt((s - 3)**2 + 24*s)) / (12*s)
            # XXX This loop implements a "dumb" convergence criterion.
            # It exits early if the old k equals the new k, but if that never
            # happens, then whatever value k_hat has after  the last iteration
            # is the value that is returned.
            for _ in range(10):
                oldk = k_hat
                delta = ((mpmath.log(k_hat) - mpmath.psi(0, k_hat) - s) /
                         (1/k_hat - mpmath.psi(1, k_hat)))
                k_hat = k_hat - delta
                if k_hat == oldk:
                    break
            theta_hat = meanx / k_hat
        else:
            # theta is fixed, only solve for k
            theta = mpmath.mpf(theta)
            k_hat = digammainv(meanlnx - mpmath.log(theta))
            theta_hat = theta
    else:
        if theta is None:
            # Solve for theta, k is fixed.
            k_hat = mpmath.mpf(k)
            theta_hat = meanx / k_hat
        else:
            # Both k and theta are fixed.
            k_hat = mpmath.mpf(k)
            theta_hat = mpmath.mpf(theta)

    return k_hat, theta_hat
Пример #15
0
def nsb_integrand_variance_term1(beta, nxkx, N, m):
    '''computer variance of the nsb esimator, with term 1,2  
        names refering to eq 3.34 from kinney thesis, the third term, 
        which is simply the square of the expectation value of the
        entropy, is subtracted off at the end'''
    summand1 = sp.array([
        nxkx[x] * (x + beta + 1) * (x + beta) /
        ((N + m * beta + 1) * (N + m * beta)) * psi(1, x + beta + 1)
        for x in nxkx
    ])
    summand2 = sp.array([
        nxkx[x] * (x + beta) / ((N + m * beta + 1) * (N + m * beta)) *
        psi(0, x + beta + 1)**2 for x in nxkx
    ])
    summand3 = sp.array([
        nxkx[x] * (x + beta) / (N + m * beta) * psi(0, x + beta + 1)
        for x in nxkx
    ])
    return (sp.sum(summand1) - psi(1, N + beta * m + 1) + sp.sum(summand2) -
            1 / (N + m * beta + 1) * sp.sum(summand3)**2)
Пример #16
0
def mean(k, theta):
    """
    Mean of the log-gamma distribution.

    k is the shape parameter of the gamma distribution.
    theta is the scale parameter of the log-gamma distribution.
    """
    with mpmath.extradps(5):
        k = mpmath.mpf(k)
        theta = mpmath.mpf(theta)
        return theta * mpmath.psi(0, k)
Пример #17
0
def var(k, theta):
    """
    Variance of the log-gamma distribution.

    k is the shape parameter of the gamma distribution.
    theta is the scale parameter of the log-gamma distribution.
    """
    with mpmath.extradps(5):
        k = mpmath.mpf(k)
        theta = mpmath.mpf(theta)
        return theta**2 * mpmath.psi(1, k)
Пример #18
0
def nll_hess(x, k, theta):
    """
    Gamma distribution hessian of the negative log-likelihood function.
    """
    _validate_k_theta(k, theta)
    k = mpmath.mpf(k)
    theta = mpmath.mpf(theta)

    N = len(x)
    sumx = mpmath.fsum(x)
    # sumlnx = mpmath.fsum(mpmath.log(t) for t in x)

    dk2 = -N*mpmath.psi(1, k)
    dkdtheta = -N/theta
    dtheta2 = -2*sumx/theta**3 + N*k/theta**2

    return mpmath.matrix([[-dk2, -dkdtheta], [-dkdtheta, -dtheta2]])
Пример #19
0
def nll_invhess(x, k, theta):
    """
    Gamma distribution inverse of the hessian of the negative log-likelihood.
    """
    _validate_k_theta(k, theta)
    k = mpmath.mpf(k)
    theta = mpmath.mpf(theta)

    N = len(x)
    sumx = mpmath.fsum(x)
    # sumlnx = mpmath.fsum(mpmath.log(t) for t in x)

    dk2 = -N*mpmath.psi(1, k)
    dkdtheta = -N/theta
    dtheta2 = -2*sumx/theta**3 + N*k/theta**2

    det = dk2*dtheta2 - dkdtheta**2

    return mpmath.matrix([[-dtheta2/det, dkdtheta/det],
                          [dkdtheta/det, -dk2/det]])
Пример #20
0
def _dxi(beta, K): #this is d<H_q|B>/dB
  return K*psi(1, K*beta+1)-psi(1, beta+1)
Пример #21
0
def _xi(beta, K): # This is <H_q|B>
  return psi(0, K*beta+1)-psi(0, beta+1)
Пример #22
0
def nsb_integrand_variance_term2(beta,nxkx,N,m):
    summand1 = sp.array(
        [nxkx[x]*((x+beta)/(N+m*beta)*psi(0,x+beta+1)) for x in nxkx])
    return (psi(0,N+beta*m+1)-sp.sum(summand1))**2
Пример #23
0
def _dxi(beta, K):
    return K * mp.psi(1, K * beta + 1.) - mp.psi(1, beta + 1.)
Пример #24
0
def _d2xi(beta, K):
    return K**2 * mp.psi(2, K * beta + 1) - mp.psi(2, beta + 1)
Пример #25
0
def _d3xi(beta, K):
    return K**3 * mp.psi(3, K * beta + 1) - mp.psi(3, beta + 1)
Пример #26
0
def _h2(beta, K, N, n):  # 2nd moment of Shannon information content
    return (mp.psi(0, N + beta * K) - mp.psi(0, n + beta))**2 - mp.psi(
        1, N + beta * K) + mp.psi(1, n + beta)
Пример #27
0
def _h1(beta, K, N, n):  # mean of Shannon information content
    return mp.psi(0, N + beta * K) - mp.psi(0, n + beta)
Пример #28
0
def _dh(a):
    return mp.psi(1, a + 1)
Пример #29
0
def _J(nk, psi0norm, psi1norm):
    return (mp.psi(0, nk + 2) - psi0norm) * (
        mp.psi(0, nk + 2) - psi0norm) - psi1norm + mp.psi(1, nk + 2)
Пример #30
0
def _I(nj, nk, psi0norm, psi1norm):
    return (mp.psi(0, nj + 1) - psi0norm) * (mp.psi(0, nk + 1) -
                                             psi0norm) - psi1norm
Пример #31
0
def _S1i(x, nxkx, beta, N, kappa):
  return nxkx[x] * (x+beta)/(N+kappa) * (psi(0, x+beta+1)-psi(0, N+kappa+1))
def d2_xi(beta, K):
    """
    Second derivative of xi(beta) (cf d_xi).
    """

    return K**2 * mp.psi(2, K * beta + 1) - mp.psi(2, beta + 1)
def d3_xi(beta, K):
    """
    Third derivative of xi(beta) (cf d_xi).
    """

    return K**3 * mp.psi(3, K * beta + 1) - mp.psi(3, beta + 1)
Пример #34
0
def _H1(beta, mk, K, K1, N):  # Computes expression from eq. (78)
    norm = N + beta * K
    H1 = mp.psi(0, norm + 1)
    for n in mk:
        H1 += -mk[n] * (n + beta) * mp.psi(0, n + beta + 1) / norm
    return H1
Пример #35
0
def _logvarrho_h_DP(a, mk, K1, N):
    return _logvarrho_DP(a, mk, K1, N) + mp.log(mp.psi(1, a + 1))