def normalize_vbd(self): self.prob = {} alpha = 1e-5 s = sum(c for c in self.count.values()) s += alpha * len(self.count) for r, c in self.count.items(): n = c + alpha self.prob[r] = exp(digamma(n)) / exp(digamma(s)) self.count = {} self.beginning = False
def cal_up_llh_grad(sampler, alpha, grad): for m in range(len(alpha)): alpha_m = np.sum(alpha[m]) wc_m = sampler.ndsum[m] for k in range(len(alpha[m])): alpha_mk = alpha[m][k] wc_mk = sampler.nd[m][k] grad[m][k] = digamma(alpha_m) - digamma(wc_m+alpha_m) + \ digamma(alpha_mk+wc_mk) - digamma(alpha_mk) return grad
def normalize_vbdp(self, alpha, threshold): self.prob = {} s = sum(c for c in self.count.values()) s += alpha for r, c in self.count.items(): n = c + alpha * base_dist(r) p = exp(digamma(n)) / exp(digamma(s)) if p > threshold: self.prob[r] = p self.count = {} self.beginning = False
def cal_down_llh_grad(sampler, beta, grad): beta_sum = beta.sum(axis=0) for v in range(len(beta)): for k in range(sampler.K): beta_k = beta_sum[k] beta_vk = beta[v][k] wc_vk = sampler.nw[v][k] wc_k = sampler.nwsum[k] grad[v][k] = digamma(beta_k) - digamma(beta_k+wc_k) + \ digamma(beta_vk+wc_vk) - digamma(beta_vk) return grad