def printDebug(self): assert self._debug is True from tabulate import tabulate iters = [self._debugUACalls[i].innerIters for i in range(len(self._debugUACalls))] sig02 = [self._debugUACalls[i].sig02 for i in range(len(self._debugUACalls))] sig12 = [self._debugUACalls[i].sig12 for i in range(len(self._debugUACalls))] sign2 = [self._debugUACalls[i].sign2 for i in range(len(self._debugUACalls))] gradMeans = [NP.mean(abs(self._debugUACalls[i].lastGrad)) for i in range(len(self._debugUACalls))] Pr.prin("*** Update approximation ***") Pr.prin("calls: %d" % (len(self._debugUACalls),)) table = [["", "min", "max", "mean"], ["iters", min(iters), max(iters), NP.mean(iters)], ["|grad|_{mean}", min(gradMeans), max(gradMeans), NP.mean(gradMeans)], ["sig01", min(sig02), max(sig02), NP.mean(sig02)], ["sig11", min(sig12), max(sig12), NP.mean(sig12)], ["sign1", min(sign2), max(sign2), NP.mean(sign2)]] Pr.prin(tabulate(table))
def printDebug(self): assert self._debug is True from tabulate import tabulate iters = [ self._debugUACalls[i].innerIters for i in range(len(self._debugUACalls)) ] sig02 = [ self._debugUACalls[i].sig02 for i in range(len(self._debugUACalls)) ] sig12 = [ self._debugUACalls[i].sig12 for i in range(len(self._debugUACalls)) ] sign2 = [ self._debugUACalls[i].sign2 for i in range(len(self._debugUACalls)) ] gradMeans = [ NP.mean(abs(self._debugUACalls[i].lastGrad)) for i in range(len(self._debugUACalls)) ] Pr.prin("*** Update approximation ***") Pr.prin("calls: %d" % (len(self._debugUACalls), )) table = [["", "min", "max", "mean"], ["iters", min(iters), max(iters), NP.mean(iters)], [ "|grad|_{mean}", min(gradMeans), max(gradMeans), NP.mean(gradMeans) ], ["sig01", min(sig02), max(sig02), NP.mean(sig02)], ["sig11", min(sig12), max(sig12), NP.mean(sig12)], ["sign1", min(sign2), max(sign2), NP.mean(sign2)]] Pr.prin(tabulate(table))
def _updateApproximation(self): ''' Calculates the Laplace approximation for the posterior. It can be defined by two variables: f mode and W at f mode. ''' if self._updateApproximationCount == 0: return if self._is_kernel_zero(): self._updateApproximationCount = 0 return self._updateApproximationBegin() gradEpsStop = 1e-10 objEpsStop = 1e-8 gradEpsErr = 1e-3 self._mean = self._calculateMean() m = self._mean if self._lasta is None or self._lasta.shape[0] != self._N: aprev = NP.zeros(self._N) else: aprev = self._lasta fprev = self._rdotK(aprev) + m objprev = self._likelihood.log(fprev, self._y) - (fprev-m).dot(aprev)/2.0 ii = 0 line_search = False maxIter = 1000 failed = False failedMsg = '' while ii < maxIter: grad = self._calculateUAGrad(fprev, aprev) if NP.mean(abs(grad)) < gradEpsStop: a = aprev f = fprev break # The following is just a Newton step (eq. (3.18) [1]) to maximize # log(p(F|X,y)) over F g = self._likelihood.gradient_log(fprev, self._y) W = self._calculateW(fprev) b = W*(fprev-m) + g a = self._calculateUAa(b, W) if line_search: (f, a, obj) = self._lineSearch(a, aprev, m) else: f = self._rdotK(a) + m obj = self._likelihood.log(f, self._y) - (f-m).dot(a)/2.0 if abs(objprev-obj) < objEpsStop : grad = self._calculateUAGrad(f, a) break if obj > objprev: fprev = f objprev = obj aprev = a else: if line_search: grad = self._calculateUAGrad(fprev, aprev) a = aprev f = fprev break line_search = True ii+=1 self._lasta = a err = NP.mean(abs(grad)) if err > gradEpsErr: failed = True failedMsg = 'Gradient not too small in the Laplace update approximation.\n' failedMsg = failedMsg+"Problem in the f mode estimation. |grad|_{mean} = %.6f." % (err,) if ii>=maxIter: failed = True failedMsg = 'Laplace update approximation did not converge in less than maxIter.' if self._debug: self._debugUACalls.append(self.DebugUACall( ii, grad, not failed, self.beta, self._sig02, self._sig12, self._sign2)) if failed: Pr.prin('Laplace update approximation failed. The failure message is the following.') Pr.prin(failedMsg) sys.exit('Stopping program.') self._updateApproximationEnd(f, a) self._updateApproximationCount = 0
def _updateApproximation(self): ''' Calculates the Laplace approximation for the posterior. It can be defined by two variables: f mode and W at f mode. ''' if self._updateApproximationCount == 0: return if self._is_kernel_zero(): self._updateApproximationCount = 0 return self._updateApproximationBegin() gradEpsStop = 1e-10 objEpsStop = 1e-8 gradEpsErr = 1e-3 self._mean = self._calculateMean() m = self._mean if self._lasta is None or self._lasta.shape[0] != self._N: aprev = NP.zeros(self._N) else: aprev = self._lasta fprev = self._rdotK(aprev) + m objprev = self._likelihood.log(fprev, self._y) - (fprev - m).dot(aprev) / 2.0 ii = 0 line_search = False maxIter = 1000 failed = False failedMsg = '' while ii < maxIter: grad = self._calculateUAGrad(fprev, aprev) if NP.mean(abs(grad)) < gradEpsStop: a = aprev f = fprev break # The following is just a Newton step (eq. (3.18) [1]) to maximize # log(p(F|X,y)) over F g = self._likelihood.gradient_log(fprev, self._y) W = self._calculateW(fprev) b = W * (fprev - m) + g a = self._calculateUAa(b, W) if line_search: (f, a, obj) = self._lineSearch(a, aprev, m) else: f = self._rdotK(a) + m obj = self._likelihood.log(f, self._y) - (f - m).dot(a) / 2.0 if abs(objprev - obj) < objEpsStop: grad = self._calculateUAGrad(f, a) break if obj > objprev: fprev = f objprev = obj aprev = a else: if line_search: grad = self._calculateUAGrad(fprev, aprev) a = aprev f = fprev break line_search = True ii += 1 self._lasta = a err = NP.mean(abs(grad)) if err > gradEpsErr: failed = True failedMsg = 'Gradient not too small in the Laplace update approximation.\n' failedMsg = failedMsg + "Problem in the f mode estimation. |grad|_{mean} = %.6f." % ( err, ) if ii >= maxIter: failed = True failedMsg = 'Laplace update approximation did not converge in less than maxIter.' if self._debug: self._debugUACalls.append( self.DebugUACall(ii, grad, not failed, self.beta, self._sig02, self._sig12, self._sign2)) if failed: Pr.prin( 'Laplace update approximation failed. The failure message is the following.' ) Pr.prin(failedMsg) sys.exit('Stopping program.') self._updateApproximationEnd(f, a) self._updateApproximationCount = 0
def _updateApproximation(self): ''' Calculates the Laplace approximation for the posterior. It can be defined by two variables: f mode and W at f mode. ''' if self._updateApproximationCount == 0: return if self._is_kernel_zero(): self._updateApproximationCount = 0 return self._updateApproximationBegin() self._mean = self._calculateMean() m = self._mean ttau = NP.zeros(self._N) tnu = NP.zeros(self._N) sig2_ = NP.zeros(self._N) mu_ = NP.zeros(self._N) prevsig2 = self._dKn() prevmu = m.copy() converged = False outeriter = 1 iterMax = 1000 while outeriter <= iterMax and not converged: tau_ = 1.0 / prevsig2 - ttau nu_ = prevmu / prevsig2 - tnu tt = tau_**2 + tau_ stt = NP.sqrt(tt) c = (self._y * nu_) / stt # dan: using _cdf and _pdf instead of cdf and pdf I avoid # a lot of overhead due to error checking and other things nc_hz = NP.exp(ST.norm._logpdf(c) - ST.norm._logcdf(c)) hmu = nu_ / tau_ + nc_hz * self._y / stt hsig2 = 1.0 / tau_ - (nc_hz / tt) * (c + nc_hz) tempttau = 1.0 / hsig2 - tau_ ok = NP.bitwise_and(~NP.isnan(tempttau), abs(tempttau) > 1e-8) ttau[ok] = tempttau[ok] tnu[ok] = hmu[ok] / hsig2[ok] - nu_[ok] (sig2, mu) = self._calculateSig2Mu(ttau, tnu, m) lastMuDiff = abs(mu - prevmu).max() lastSig2Diff = abs(sig2 - prevsig2).max() if lastMuDiff < 1e-6 and lastSig2Diff < 1e-6: converged = True prevmu = mu prevsig2 = sig2 outeriter += 1 if outeriter > iterMax: Pr.prin('EP did not converge. Printing debug information...') Pr.prin('beta ' + str(self.beta)) Pr.prin('sig02 ' + str(self.sig02) + ' sig12 ' + str(self.sig12) + ' sign2 ' + str(self.sign2)) Pr.prin('abs(mu-prevmu).max() ' + str(lastMuDiff)) Pr.prin('abs(sig2-prevsig2).max() ' + str(lastSig2Diff)) self._updateApproximationEnd(ttau, tnu, tau_, nu_) self._updateApproximationCount = 0