def __init__(self, data, T, alpha_beta): mu, sigma = compute_uniform_mean_psi(T) self.theta_prior = Gaussian( mu=mu, sigma=sigma, mu_0=mu, sigma_0=T*sigma/10., nu_0=T/10., kappa_0=1./10) self.ppgs = initialize_polya_gamma_samplers() self.omega = np.zeros((data.shape[0], T-1)) super(StickbreakingCorrelatedLDA, self).__init__(data, T, alpha_beta)
def __init__(self, data, T, alpha_beta): mu, sigma = np.zeros(T), np.eye(T) self.theta_prior = \ Gaussian( mu=mu, sigma=sigma, mu_0=mu, sigma_0=T*sigma/10., nu_0=T/10., kappa_0=10.) self.ppgs = initialize_polya_gamma_samplers() self.omega = np.zeros((data.shape[0], T)) super(LogisticNormalCorrelatedLDA, self).__init__(data, T, alpha_beta)
def monte_carlo_approx(M=100000): ppgs = initialize_polya_gamma_samplers() # Compute the left hand side analytically loglhs = psi*a - b * np.log1p(np.exp(psi)) # Compute the right hand side with Monte Carlo omegas = np.ones(M) ppg.pgdrawvpar(ppgs, b*np.ones(M), np.zeros(M), omegas) logrhss = -b * np.log(2) + (a-b/2.)*psi -0.5 * omegas*psi**2 logrhs = logsumexp(logrhss) - np.log(M) print("Monte Carlo") print("log LHS: ", loglhs) print("log RHS: ", logrhs)