Example #1
0
 def __init__(self, link):
     self._lasta = None
     self._debugUACalls = []
     self._link = link
     if link == "logistic":
         self._likelihood = LogitLikelihood()
     elif link == "erf":
         self._likelihood = ProbitLikelihood()
     else:
         assert False, "Unknown link function."
Example #2
0
 def __init__(self, link):
     self._lasta = None
     self._debugUACalls = []
     self._link = link
     if link == "logistic":
         self._likelihood = LogitLikelihood()
     elif link == "erf":
         self._likelihood = ProbitLikelihood()
     else:
         assert False, "Unknown link function."
Example #3
0
class LaplaceGLMM(object):
    def __init__(self, link):
        self._lasta = None
        self._debugUACalls = []
        self._link = link
        if link == "logistic":
            self._likelihood = LogitLikelihood()
        elif link == "erf":
            self._likelihood = ProbitLikelihood()
        else:
            assert False, "Unknown link function."

    def _calculateW(self, f):
        W = -self._likelihood.hessian_log(f)
        return W

    def _lineSearch(self, a, aprev, m):
        da = a - aprev
        def fobj(alpha):
            a = aprev + alpha*da
            f = self._rdotK(a) + m
            return -(self._likelihood.log(f, self._y) - (f-m).dot(a)/2.0)

        (alpha,obj,iter,funcalls) = sp.optimize.brent(fobj, brack=(0.0,1.0), full_output=True, tol=1e-4, maxiter=10)
        obj = -obj
        a = aprev + alpha*da
        f = self._rdotK(a) + m
        return (f, a, obj)

    def _calculateUAGrad(self, f, a):
        grad = self._likelihood.gradient_log(f, self._y) - a
        return grad

    def printDebug(self):
        assert self._debug is True
        from tabulate import tabulate
        iters = [self._debugUACalls[i].innerIters for i in range(len(self._debugUACalls))]
        sig02 = [self._debugUACalls[i].sig02 for i in range(len(self._debugUACalls))]
        sig12 = [self._debugUACalls[i].sig12 for i in range(len(self._debugUACalls))]
        sign2 = [self._debugUACalls[i].sign2 for i in range(len(self._debugUACalls))]
        gradMeans = [NP.mean(abs(self._debugUACalls[i].lastGrad)) for i in range(len(self._debugUACalls))]
        
        Pr.prin("*** Update approximation ***")
        Pr.prin("calls: %d" % (len(self._debugUACalls),))

        table = [["", "min", "max", "mean"],
            ["iters", min(iters), max(iters), NP.mean(iters)],
            ["|grad|_{mean}", min(gradMeans), max(gradMeans), NP.mean(gradMeans)],
            ["sig01", min(sig02), max(sig02), NP.mean(sig02)],
            ["sig11", min(sig12), max(sig12), NP.mean(sig12)],
            ["sign1", min(sign2), max(sign2), NP.mean(sign2)]]
        Pr.prin(tabulate(table))

    def _updateApproximation(self):
        '''
        Calculates the Laplace approximation for the posterior.
        It can be defined by two variables: f mode and W at f mode.
        '''
        if self._updateApproximationCount == 0:
            return

        if self._is_kernel_zero():
            self._updateApproximationCount = 0
            return

        self._updateApproximationBegin()

        gradEpsStop = 1e-10
        objEpsStop = 1e-8
        gradEpsErr = 1e-3
        
        self._mean = self._calculateMean()
        m = self._mean

        if self._lasta is None or self._lasta.shape[0] != self._N:
            aprev = NP.zeros(self._N)
        else:
            aprev = self._lasta

        fprev = self._rdotK(aprev) + m
        objprev = self._likelihood.log(fprev, self._y) - (fprev-m).dot(aprev)/2.0
        ii = 0
        line_search = False
        maxIter = 1000
        failed = False
        failedMsg = ''
        while ii < maxIter:
            grad = self._calculateUAGrad(fprev, aprev)
            if NP.mean(abs(grad)) < gradEpsStop:
                a = aprev
                f = fprev
                break

            # The following is just a Newton step (eq. (3.18) [1]) to maximize
            # log(p(F|X,y)) over F
            g = self._likelihood.gradient_log(fprev, self._y)
            W = self._calculateW(fprev)
            b = W*(fprev-m) + g

            a = self._calculateUAa(b, W)

            if line_search:
                (f, a, obj) = self._lineSearch(a, aprev, m)
            else:
                f = self._rdotK(a) + m
                obj = self._likelihood.log(f, self._y) - (f-m).dot(a)/2.0

            if abs(objprev-obj) < objEpsStop :
                grad = self._calculateUAGrad(f, a)
                break
            if obj > objprev:
                fprev = f
                objprev = obj
                aprev = a
            else:
                if line_search:
                    grad = self._calculateUAGrad(fprev, aprev)
                    a = aprev
                    f = fprev
                    break
                line_search = True
            ii+=1

        self._lasta = a

        err = NP.mean(abs(grad))
        if err > gradEpsErr:
            failed = True
            failedMsg = 'Gradient not too small in the Laplace update approximation.\n'
            failedMsg = failedMsg+"Problem in the f mode estimation. |grad|_{mean} = %.6f." % (err,)
       
        if ii>=maxIter:
            failed = True
            failedMsg = 'Laplace update approximation did not converge in less than maxIter.'

        if self._debug:
            self._debugUACalls.append(self.DebugUACall(
                ii, grad, not failed, self.beta, self._sig02, self._sig12, self._sign2))

        if failed:
            Pr.prin('Laplace update approximation failed. The failure message is the following.')
            Pr.prin(failedMsg)
            sys.exit('Stopping program.')

        self._updateApproximationEnd(f, a)

        self._updateApproximationCount = 0

    def _predict(self, meanstar, kstar, kstarstar, prob):
        self._updateConstants()
        self._updateApproximation()

        if NP.isscalar(kstarstar):
            return self._predict_each(meanstar, kstar, kstarstar, prob)

        n = len(kstarstar)
        ps = NP.zeros(n)

        for i in xrange(n):
            ps[i] = self._predict_each(meanstar[i], kstar[i,:], kstarstar[i], prob)

        return ps
Example #4
0
class LaplaceGLMM(object):
    def __init__(self, link):
        self._lasta = None
        self._debugUACalls = []
        self._link = link
        if link == "logistic":
            self._likelihood = LogitLikelihood()
        elif link == "erf":
            self._likelihood = ProbitLikelihood()
        else:
            assert False, "Unknown link function."

    def _calculateW(self, f):
        W = -self._likelihood.hessian_log(f)
        return W

    def _lineSearch(self, a, aprev, m):
        da = a - aprev

        def fobj(alpha):
            a = aprev + alpha * da
            f = self._rdotK(a) + m
            return -(self._likelihood.log(f, self._y) - (f - m).dot(a) / 2.0)

        (alpha, obj, iter, funcalls) = sp.optimize.brent(fobj,
                                                         brack=(0.0, 1.0),
                                                         full_output=True,
                                                         tol=1e-4,
                                                         maxiter=10)
        obj = -obj
        a = aprev + alpha * da
        f = self._rdotK(a) + m
        return (f, a, obj)

    def _calculateUAGrad(self, f, a):
        grad = self._likelihood.gradient_log(f, self._y) - a
        return grad

    def printDebug(self):
        assert self._debug is True
        from tabulate import tabulate
        iters = [
            self._debugUACalls[i].innerIters
            for i in range(len(self._debugUACalls))
        ]
        sig02 = [
            self._debugUACalls[i].sig02 for i in range(len(self._debugUACalls))
        ]
        sig12 = [
            self._debugUACalls[i].sig12 for i in range(len(self._debugUACalls))
        ]
        sign2 = [
            self._debugUACalls[i].sign2 for i in range(len(self._debugUACalls))
        ]
        gradMeans = [
            NP.mean(abs(self._debugUACalls[i].lastGrad))
            for i in range(len(self._debugUACalls))
        ]

        Pr.prin("*** Update approximation ***")
        Pr.prin("calls: %d" % (len(self._debugUACalls), ))

        table = [["", "min", "max", "mean"],
                 ["iters", min(iters),
                  max(iters), NP.mean(iters)],
                 [
                     "|grad|_{mean}",
                     min(gradMeans),
                     max(gradMeans),
                     NP.mean(gradMeans)
                 ], ["sig01", min(sig02),
                     max(sig02),
                     NP.mean(sig02)],
                 ["sig11", min(sig12),
                  max(sig12), NP.mean(sig12)],
                 ["sign1", min(sign2),
                  max(sign2), NP.mean(sign2)]]
        Pr.prin(tabulate(table))

    def _updateApproximation(self):
        '''
        Calculates the Laplace approximation for the posterior.
        It can be defined by two variables: f mode and W at f mode.
        '''
        if self._updateApproximationCount == 0:
            return

        if self._is_kernel_zero():
            self._updateApproximationCount = 0
            return

        self._updateApproximationBegin()

        gradEpsStop = 1e-10
        objEpsStop = 1e-8
        gradEpsErr = 1e-3

        self._mean = self._calculateMean()
        m = self._mean

        if self._lasta is None or self._lasta.shape[0] != self._N:
            aprev = NP.zeros(self._N)
        else:
            aprev = self._lasta

        fprev = self._rdotK(aprev) + m
        objprev = self._likelihood.log(fprev,
                                       self._y) - (fprev - m).dot(aprev) / 2.0
        ii = 0
        line_search = False
        maxIter = 1000
        failed = False
        failedMsg = ''
        while ii < maxIter:
            grad = self._calculateUAGrad(fprev, aprev)
            if NP.mean(abs(grad)) < gradEpsStop:
                a = aprev
                f = fprev
                break

            # The following is just a Newton step (eq. (3.18) [1]) to maximize
            # log(p(F|X,y)) over F
            g = self._likelihood.gradient_log(fprev, self._y)
            W = self._calculateW(fprev)
            b = W * (fprev - m) + g

            a = self._calculateUAa(b, W)

            if line_search:
                (f, a, obj) = self._lineSearch(a, aprev, m)
            else:
                f = self._rdotK(a) + m
                obj = self._likelihood.log(f, self._y) - (f - m).dot(a) / 2.0

            if abs(objprev - obj) < objEpsStop:
                grad = self._calculateUAGrad(f, a)
                break
            if obj > objprev:
                fprev = f
                objprev = obj
                aprev = a
            else:
                if line_search:
                    grad = self._calculateUAGrad(fprev, aprev)
                    a = aprev
                    f = fprev
                    break
                line_search = True
            ii += 1

        self._lasta = a

        err = NP.mean(abs(grad))
        if err > gradEpsErr:
            failed = True
            failedMsg = 'Gradient not too small in the Laplace update approximation.\n'
            failedMsg = failedMsg + "Problem in the f mode estimation. |grad|_{mean} = %.6f." % (
                err, )

        if ii >= maxIter:
            failed = True
            failedMsg = 'Laplace update approximation did not converge in less than maxIter.'

        if self._debug:
            self._debugUACalls.append(
                self.DebugUACall(ii, grad, not failed, self.beta, self._sig02,
                                 self._sig12, self._sign2))

        if failed:
            Pr.prin(
                'Laplace update approximation failed. The failure message is the following.'
            )
            Pr.prin(failedMsg)
            sys.exit('Stopping program.')

        self._updateApproximationEnd(f, a)

        self._updateApproximationCount = 0

    def _predict(self, meanstar, kstar, kstarstar, prob):
        self._updateConstants()
        self._updateApproximation()

        if NP.isscalar(kstarstar):
            return self._predict_each(meanstar, kstar, kstarstar, prob)

        n = len(kstarstar)
        ps = NP.zeros(n)

        for i in xrange(n):
            ps[i] = self._predict_each(meanstar[i], kstar[i, :], kstarstar[i],
                                       prob)

        return ps