def runBatchEP(workQueue, lockingPost): count = 0 for (W, K, docs, alpha, maxiters, thr, useNewton) in iter(workQueue.get, "none"): #read current parameter value (and copy) lam = copy.deepcopy(lockingPost.value()) #calculate correction ep = ep_lda.EP_LDA(W, K, docs, alpha, lam, useNewton) lam = ep.train(maxiters, thr) ss = (lam - ep._eta) #apply correction lockingPost.increment(ss) workQueue.task_done() count += 1 print("\tdone " + str(count))
def runBatchEP(W, K, docs, alpha, lam, maxiters, threshold, useNewton): ep = ep_lda.EP_LDA(W, K, docs, alpha, lam, useNewton) lam = ep.train(maxiters, threshold) return lam
def update_lambda(self, docs): ep = ep_lda.EP_LDA(self._W, self._K, docs, self._alpha, self._lambda, self._useNewton) self._lambda = ep.train(self._maxiters, self._threshold) return (self._alpha, self._lambda)