Beispiel #1
0
    def _predict(self, meanstar, kstar, kstarstar, prob):
        self._updateConstants()
        self._updateApproximation()

        m = self._mean
        tnu = self._tnu
        Lk = self._Lk
        H = self._H
        V = self._V

        Ktnu = self._rdotK(tnu)
        mKtnu = m + Ktnu
        Vkstar = ddot(V, kstar, left=True)
        Hkstar = dot(H, kstar)

        mustar = meanstar + dot(kstar.T,tnu) - dot(Vkstar.T, mKtnu) + dot(Hkstar.T, dot(H, mKtnu))

        if prob is False:
            if nom > 0.0:
                return +1.0
            return -1.0

        sig2star = kstarstar - dotd(kstar.T,Vkstar)\
            + dotd(dot(Hkstar.T, H), kstar) + self._sign2

        return LH.probit_sigmoid(mustar/NP.sqrt(1.0 + sig2star))
Beispiel #2
0
    def _predict(self, meanstar, kstar, kstarstar, prob):
        self._updateConstants()
        self._updateApproximation()

        m = self._mean
        tnu = self._tnu
        Lk = self._Lk
        H = self._H
        V = self._V

        Ktnu = self._rdotK(tnu)
        mKtnu = m + Ktnu
        Vkstar = ddot(V, kstar, left=True)
        Hkstar = dot(H, kstar)

        mustar = meanstar + dot(kstar.T, tnu) - dot(Vkstar.T, mKtnu) + dot(
            Hkstar.T, dot(H, mKtnu))

        if prob is False:
            if nom > 0.0:
                return +1.0
            return -1.0

        sig2star = kstarstar - dotd(kstar.T,Vkstar)\
            + dotd(dot(Hkstar.T, H), kstar) + self._sign2

        return LH.probit_sigmoid(mustar / NP.sqrt(1.0 + sig2star))
Beispiel #3
0
 def _updateApproximationEnd(self, ttau, tnu, tau_, nu_):
     self._ttau = ttau
     self._tnu = tnu
     self._tau_ = tau_
     self._nu_ = nu_
     self._V = self._calculateV(ttau, self._sign2)
     self._Lk = self._calculateLk(self._G01, self._V)
     self._H = stl(self._Lk, ddot(self._G01.T, self._V, left=False))
Beispiel #4
0
 def _updateApproximationEnd(self, ttau, tnu, tau_, nu_):
     self._ttau = ttau
     self._tnu = tnu
     self._tau_ = tau_
     self._nu_ = nu_
     self._V = self._calculateV(ttau, self._sign2)
     self._Lk = self._calculateLk(self._G01, self._V)
     self._H = stl(self._Lk, ddot(self._G01.T, self._V, left=False))
Beispiel #5
0
    def _rmll_gradient(self,
                       optSig02=True,
                       optSig12=True,
                       optSign2=True,
                       optBeta=True):
        self._updateConstants()
        self._updateApproximation()

        m = self._mean
        H = self._H
        V = self._V
        ttau = self._ttau
        tnu = self._tnu
        G0 = self._G0
        G1 = self._G1

        Smtnu = ttau * m - tnu
        KSmtnu = self._rdotK(Smtnu)

        b = Smtnu - V * KSmtnu + dot(H.T, dot(H, KSmtnu))

        ret = []
        if optSig02:
            r = 0.5*(dot(b, dot(G0, dot(G0.T, b))) - trace2(ddot(V, G0, left=True), G0.T)\
                + trace2(H.T, dot(dot(H, G0), G0.T)))

            ret.append(r)

        if optSig12:
            r = 0.5*(dot(b, dot(G1, dot(G1.T, b))) - trace2(ddot(V, G1, left=True), G1.T)\
                + trace2(H.T, dot(dot(H, G1), G1.T)))

            ret.append(r)

        if optSign2:
            r = 0.5*(dot(b, b) - NP.sum(V)\
                + trace2(H.T, H))

            ret.append(r)

        if optBeta:
            ret += list(-dot(b, self._X))

        return NP.array(ret)
Beispiel #6
0
    def _rmll_gradient(self, optSig02=True, optSig12=True, optSign2=True, optBeta=True):
        self._updateConstants()
        self._updateApproximation()
        
        m = self._mean
        H = self._H
        V = self._V
        ttau = self._ttau
        tnu = self._tnu
        G0 = self._G0
        G1 = self._G1

        Smtnu = ttau*m - tnu
        KSmtnu = self._rdotK(Smtnu)

        b = Smtnu - V*KSmtnu + dot(H.T, dot(H, KSmtnu))

        ret = []
        if optSig02:
            r = 0.5*(dot(b, dot(G0, dot(G0.T, b))) - trace2(ddot(V, G0, left=True), G0.T)\
                + trace2(H.T, dot(dot(H, G0), G0.T)))
            
            ret.append(r)

        if optSig12:
            r = 0.5*(dot(b, dot(G1, dot(G1.T, b))) - trace2(ddot(V, G1, left=True), G1.T)\
                + trace2(H.T, dot(dot(H, G1), G1.T)))
            
            ret.append(r)

        if optSign2:
            r = 0.5*(dot(b, b) - NP.sum(V)\
                + trace2(H.T, H))
            
            ret.append(r)

        if optBeta:
            ret += list(-dot(b, self._X))

        return NP.array(ret)
Beispiel #7
0
    def _calculateUAa(self, b, W):
        A = 1.0 + W*self._sign2
        V = W/A
        Lk = self._calculateLk(self._G01, V)

        Gtb = dot(self._G01.T, b)
        GtV = ddot(self._G01.T, V, left=False)
        LtLGtV = stu(Lk.T, stl(Lk, GtV))
        LtLGtVG = dot(LtLGtV, self._G01)
        bn = self._sign2*b
        a = b + dot(dot(GtV.T, LtLGtVG) - GtV.T, Gtb)\
              + dot(GtV.T, dot(LtLGtV, bn)) - V*bn

        return a
Beispiel #8
0
    def _calculateUAa(self, b, W):
        A = 1.0 + W * self._sign2
        V = W / A
        Lk = self._calculateLk(self._G01, V)

        Gtb = dot(self._G01.T, b)
        GtV = ddot(self._G01.T, V, left=False)
        LtLGtV = stu(Lk.T, stl(Lk, GtV))
        LtLGtVG = dot(LtLGtV, self._G01)
        bn = self._sign2 * b
        a = b + dot(dot(GtV.T, LtLGtVG) - GtV.T, Gtb)\
              + dot(GtV.T, dot(LtLGtV, bn)) - V*bn

        return a
Beispiel #9
0
    def _calculateSig2Mu(self, ttau, tnu, mean):
        sign2 = self._sign2

        V = self._calculateV(ttau, sign2)
        G01 = self._G01
    
        Lk = self._calculateLk(G01, V)
    
        G01tV = ddot(G01.T, V, left=False)
        H = stl(Lk, G01tV)

        HK = self._ldotK(H)
    
        sig2 = self._dKn() - sign2**2*V - dotd(G01, dot(dot(G01tV, G01), G01.T))\
            - 2.0*sign2*dotd(G01, G01tV) + dotd(HK.T, HK)

        assert NP.all(NP.isfinite(sig2)), 'sig2 should be finite.'

        u = self._mean + self._rdotK(tnu)
    
        mu = u - self._rdotK(V*u) + self._rdotK(H.T.dot(H.dot(u)))
        assert NP.all(NP.isfinite(mu)), 'mu should be finite.'

        return (sig2, mu)
Beispiel #10
0
    def _calculateSig2Mu(self, ttau, tnu, mean):
        sign2 = self._sign2

        V = self._calculateV(ttau, sign2)
        G01 = self._G01

        Lk = self._calculateLk(G01, V)

        G01tV = ddot(G01.T, V, left=False)
        H = stl(Lk, G01tV)

        HK = self._ldotK(H)

        sig2 = self._dKn() - sign2**2*V - dotd(G01, dot(dot(G01tV, G01), G01.T))\
            - 2.0*sign2*dotd(G01, G01tV) + dotd(HK.T, HK)

        assert NP.all(NP.isfinite(sig2)), 'sig2 should be finite.'

        u = self._mean + self._rdotK(tnu)

        mu = u - self._rdotK(V * u) + self._rdotK(H.T.dot(H.dot(u)))
        assert NP.all(NP.isfinite(mu)), 'mu should be finite.'

        return (sig2, mu)
Beispiel #11
0
 def _calculateLk(self, G01, D):
     Bk  = dot(G01.T, ddot(D, G01, left=True))
     Bk[NP.diag_indices_from(Bk)] += 1.0
     Lk = cholesky(Bk, lower=True, check_finite=False)
     return Lk
Beispiel #12
0
 def _calculateLn(self, K, D):
     Bn = ddot(D, ddot(K, D, left=False), left=True)
     Bn[NP.diag_indices_from(Bn)] += 1.0
     Ln = cholesky(Bn, lower=True, check_finite=False)
     return Ln
Beispiel #13
0
    def _rmll_gradient(self, optSig02=True, optSig12=True, optSign2=True, optBeta=True):
        self._updateConstants()
        self._updateApproximation()

        (f,a)=(self._f,self._a)
        (W,Wsq) = (self._W,self._Wsq)
        Lk = self._Lk

        m = self._mean
        X = self._X
        G0 = self._G0
        G1 = self._G1
        sign2 = self._sign2
        G01 = self._G01

        #g = self._likelihood.gradient_log(f)
        #a==g

        h = self._likelihood.third_derivative_log(f)

        V = W/self._A

        d = self._dKn()
        G01tV = ddot(G01.T, V, left=False)
        H = stl(Lk, G01tV)
        dkH = self._ldotK(H)
        diags = (d - sign2**2 * V - dotd(G01, dot(dot(G01tV, G01), G01.T))\
            - 2.0*sign2*dotd(G01, G01tV) + dotd(dkH.T, dkH)) * h

        ret = []
        
        if optSig02:
            dK0a = dot(G0, dot(G0.T, a))
            t = V*dK0a - dot(H.T, dot(H, dK0a))
            dF0 = dK0a - self._rdotK(t)

            LkG01VG0 = dot(H, G0)
            VG0 = ddot(V, G0, left=True)

            ret0 = dot(a, dF0) - 0.5*dot(a, dK0a) + dot(f-m, t)\
                + 0.5*NP.sum( diags*dF0 )\
                + -0.5*trace2(VG0, G0.T) + 0.5*trace2( LkG01VG0.T, LkG01VG0 )
            
            ret.append(ret0)

        if optSig12:
            dK1a = dot(G1, dot(G1.T, a))
            t = V*dK1a - dot(H.T, dot(H, dK1a))
            dF1 = dK1a - self._rdotK(t)

            LkG01VG1 = dot(H, G1)
            VG1 = ddot(V, G1, left=True)

            ret1 = dot(a, dF1)- 0.5*dot(a, dK1a) + dot(f-m, t)\
                + 0.5*NP.sum( diags*dF1 )\
                + -0.5*trace2(VG1, G1.T) + 0.5*trace2( LkG01VG1.T, LkG01VG1 )
            
            ret.append(ret1)

        if optSign2:
            t = V*a - dot(H.T, dot(H, a))
            dFn = a - self._rdotK(t)

            retn = dot(a, dFn)- 0.5*dot(a, a) + dot(f-m, t)\
                + 0.5*NP.sum( diags*dFn )\
                + -0.5*NP.sum(V) + 0.5*trace2( H.T, H )
            
            ret.append(retn)

        if optBeta:
            t = ddot(V, X, left=True) - dot(H.T, dot(H, X))
            dFbeta = X - self._rdotK(t)

            retbeta = dot(a, dFbeta) + dot(f-m, t)
            for i in range(dFbeta.shape[1]):
                retbeta[i] += 0.5*NP.sum( diags*dFbeta[:,i] )

            ret.extend(retbeta)

        ret = NP.array(ret)
        assert NP.all(NP.isfinite(ret)), 'Not finite regular marginal loglikelihood gradient.'
        
        return ret
Beispiel #14
0
 def _calculateLk(self, G01, D):
     Bk = dot(G01.T, ddot(D, G01, left=True))
     Bk[NP.diag_indices_from(Bk)] += 1.0
     Lk = cholesky(Bk, lower=True, check_finite=False)
     return Lk
Beispiel #15
0
 def _calculateLn(self, K, D):
     Bn = ddot(D, ddot(K, D, left=False), left=True)
     Bn[NP.diag_indices_from(Bn)] += 1.0
     Ln = cholesky(Bn, lower=True, check_finite=False)
     return Ln
Beispiel #16
0
    def _rmll_gradient(self,
                       optSig02=True,
                       optSig12=True,
                       optSign2=True,
                       optBeta=True):
        self._updateConstants()
        self._updateApproximation()

        (f, a) = (self._f, self._a)
        (W, Wsq) = (self._W, self._Wsq)
        Lk = self._Lk

        m = self._mean
        X = self._X
        G0 = self._G0
        G1 = self._G1
        sign2 = self._sign2
        G01 = self._G01

        #g = self._likelihood.gradient_log(f)
        #a==g

        h = self._likelihood.third_derivative_log(f)

        V = W / self._A

        d = self._dKn()
        G01tV = ddot(G01.T, V, left=False)
        H = stl(Lk, G01tV)
        dkH = self._ldotK(H)
        diags = (d - sign2**2 * V - dotd(G01, dot(dot(G01tV, G01), G01.T))\
            - 2.0*sign2*dotd(G01, G01tV) + dotd(dkH.T, dkH)) * h

        ret = []

        if optSig02:
            dK0a = dot(G0, dot(G0.T, a))
            t = V * dK0a - dot(H.T, dot(H, dK0a))
            dF0 = dK0a - self._rdotK(t)

            LkG01VG0 = dot(H, G0)
            VG0 = ddot(V, G0, left=True)

            ret0 = dot(a, dF0) - 0.5*dot(a, dK0a) + dot(f-m, t)\
                + 0.5*NP.sum( diags*dF0 )\
                + -0.5*trace2(VG0, G0.T) + 0.5*trace2( LkG01VG0.T, LkG01VG0 )

            ret.append(ret0)

        if optSig12:
            dK1a = dot(G1, dot(G1.T, a))
            t = V * dK1a - dot(H.T, dot(H, dK1a))
            dF1 = dK1a - self._rdotK(t)

            LkG01VG1 = dot(H, G1)
            VG1 = ddot(V, G1, left=True)

            ret1 = dot(a, dF1)- 0.5*dot(a, dK1a) + dot(f-m, t)\
                + 0.5*NP.sum( diags*dF1 )\
                + -0.5*trace2(VG1, G1.T) + 0.5*trace2( LkG01VG1.T, LkG01VG1 )

            ret.append(ret1)

        if optSign2:
            t = V * a - dot(H.T, dot(H, a))
            dFn = a - self._rdotK(t)

            retn = dot(a, dFn)- 0.5*dot(a, a) + dot(f-m, t)\
                + 0.5*NP.sum( diags*dFn )\
                + -0.5*NP.sum(V) + 0.5*trace2( H.T, H )

            ret.append(retn)

        if optBeta:
            t = ddot(V, X, left=True) - dot(H.T, dot(H, X))
            dFbeta = X - self._rdotK(t)

            retbeta = dot(a, dFbeta) + dot(f - m, t)
            for i in range(dFbeta.shape[1]):
                retbeta[i] += 0.5 * NP.sum(diags * dFbeta[:, i])

            ret.extend(retbeta)

        ret = NP.array(ret)
        assert NP.all(NP.isfinite(
            ret)), 'Not finite regular marginal loglikelihood gradient.'

        return ret