Exemple #1
0
def compute_e_log_q_dirichlet(x):
    a_0 = x.sum()

    K = len(x)

    return log_gamma(a_0) - log_gamma(x).sum() + safe_multiply(
        x - 1, psi(x)).sum() - safe_multiply(a_0 - K, psi(a_0))
Exemple #2
0
def compute_e_log_q_dirichlet(x):
    # for q, we don't use the priors of the Dirichlets (alpha_0, beta_0, gamma_0), just the hidden variables
    # x is the prior
    
    a_0 = x.sum()
       
    K = len(x)
    # I checked, and the following give exactly the same values
    # print ('Val1 ', log_gamma(a_0) - log_gamma(x).sum() + safe_multiply(x - 1, compute_e_log_dirichlet(x)).sum())
    # print ('Val2 ', log_gamma(a_0) - log_gamma(x).sum() + safe_multiply(x - 1, psi(x)).sum() - safe_multiply(a_0 - K, psi(a_0)))
    return log_gamma(a_0) - log_gamma(x).sum() + safe_multiply(x - 1, psi(x)).sum() - safe_multiply(a_0 - K, psi(a_0))
Exemple #3
0
def compute_e_log_p_dirichlet(posterior, prior):
# For p we use both the prior and the hidden variable, such as gamma_star and gamma_0
# This computes the E log of Dirichlet (x, alpha), where alpha=prior x=posterior
# Dirichlet(alpha) = gamma(sum(alpha))/prod(gamma(alpha)) prod x_i^{alpha_i-1}
# log(Dirichlet(alpha)) = log_gamma(sum(alpha)) - sum(log_gamma(alpha)) + sum(alpha_i-1 * log(x_i))
# Elog(Dirichlet(alpha)) = log_gamma(sum(alpha)) - sum(log_gamma(alpha)) + sum(alpha_i-1 * Elog(x_i))
    log_p = log_gamma(prior.sum()) - \
            log_gamma(prior).sum() + \
            safe_multiply(prior - 1, compute_e_log_dirichlet(posterior)).sum()
 
    return log_p
Exemple #4
0
def compute_e_log_q(var_params):
    log_p = 0

    log_p += log_gamma(np.sum(var_params.pi)) - np.sum(log_gamma(
        var_params.pi))

    log_p += np.sum((var_params.pi - 1) *
                    (psi(var_params.pi) - psi(np.sum(var_params.pi))))

    log_p += np.sum(var_params.theta * np.log(var_params.theta + 1e-6))

    log_p += np.sum(var_params.z * np.log(var_params.z + 1e-6))

    return log_p
Exemple #5
0
def compute_e_log_p(log_p_data, priors, var_params):
    log_p = 0

    log_p += log_gamma(np.sum(priors.pi)) - np.sum(log_gamma(priors.pi))

    log_p += np.sum((priors.pi + np.sum(var_params.z, axis=0) - 1) *
                    (psi(var_params.pi) - psi(np.sum(var_params.pi))))

    log_p += np.sum(var_params.theta *
                    np.log(priors.theta)[np.newaxis, np.newaxis, :])

    log_p += np.sum(var_params.z *
                    compute_log_p_data_theta(log_p_data, var_params.theta))

    return log_p
Exemple #6
0
 def log_prior(self, value):
     """
     Evaluates and returns the log of this prior when the variable is value.
     
     value numerical value of the variable
     """
     return (self._shape_min_one * np.log(value)) -\
            (self.shape * np.log(self.scale)) -\
            (value / self.scale) - log_gamma(self.shape)
Exemple #7
0
    def __init__(self, history):
        self._history = np.array(history, dtype=np.float64)
        self.alpha = 10.0
        self.beta = 12.0
        self.m = 10.0
        self._init_cum_hist_with_m = np.cumsum(np.hstack(((self.m,), history)))[:-1]

        history_span = self._history_span(history)
        self._mean, self._sd = self._mean_var_to_mu_sigma(
            np.mean(history_span),
            np.max([np.mean(history_span), 1]))
        self._cache = {}

        # Constants in the log-likelihood
        self._len_history = len(history)
        self._N = np.sum(history)
        self._c_const = np.dot(np.log(self._init_cum_hist_with_m), history)
        self._d_const = np.sum(log_gamma(history + 1))
        self._const = self._c_const - self._d_const
Exemple #8
0
    def __init__(self, history):
        self._history = np.array(history, dtype=np.float64)
        self.alpha = 10.0
        self.beta = 12.0
        self.m = 10.0
        self._init_cum_hist_with_m = np.cumsum(np.hstack(
            ((self.m, ), history)))[:-1]

        history_span = self._history_span(history)
        self._mean, self._sd = self._mean_var_to_mu_sigma(
            np.mean(history_span), np.max([np.mean(history_span), 1]))
        self._cache = {}

        # Constants in the log-likelihood
        self._len_history = len(history)
        self._N = np.sum(history)
        self._c_const = np.dot(np.log(self._init_cum_hist_with_m), history)
        self._d_const = np.sum(log_gamma(history + 1))
        self._const = self._c_const - self._d_const
Exemple #9
0
def compute_e_log_p_dirichlet(posterior, prior):
    log_p = log_gamma(prior.sum()) - \
            log_gamma(prior).sum() + \
            safe_multiply(prior - 1, compute_e_log_dirichlet(posterior)).sum()

    return log_p
Exemple #10
0
 def _loglikelihood_helper(self, alpha, beta, cite_diff_sum, log_diff_sum):
     a = alpha
     b = beta
     N = self._N
     return self._const + log_diff_sum + a * np.log(b) - log_gamma(a) + \
            log_gamma(a + N) - (a + N) * np.log(b + cite_diff_sum)
Exemple #11
0
 def _loglikelihood_helper(self, alpha, beta, cite_diff_sum, log_diff_sum):
     a = alpha
     b = beta
     N = self._N
     return self._const + log_diff_sum + a * np.log(b) - log_gamma(a) + \
            log_gamma(a + N) - (a + N) * np.log(b + cite_diff_sum)