Пример #1
0
    def _predict(self, meanstar, kstar, kstarstar, prob):
        self._updateConstants()
        self._updateApproximation()

        m = self._mean
        tnu = self._tnu
        Lk = self._Lk
        H = self._H
        V = self._V

        Ktnu = self._rdotK(tnu)
        mKtnu = m + Ktnu
        Vkstar = ddot(V, kstar, left=True)
        Hkstar = dot(H, kstar)

        mustar = meanstar + dot(kstar.T,tnu) - dot(Vkstar.T, mKtnu) + dot(Hkstar.T, dot(H, mKtnu))

        if prob is False:
            if nom > 0.0:
                return +1.0
            return -1.0

        sig2star = kstarstar - dotd(kstar.T,Vkstar)\
            + dotd(dot(Hkstar.T, H), kstar) + self._sign2

        return LH.probit_sigmoid(mustar/NP.sqrt(1.0 + sig2star))
Пример #2
0
    def _predict(self, meanstar, kstar, kstarstar, prob):
        self._updateConstants()
        self._updateApproximation()

        m = self._mean
        tnu = self._tnu
        Lk = self._Lk
        H = self._H
        V = self._V

        Ktnu = self._rdotK(tnu)
        mKtnu = m + Ktnu
        Vkstar = ddot(V, kstar, left=True)
        Hkstar = dot(H, kstar)

        mustar = meanstar + dot(kstar.T, tnu) - dot(Vkstar.T, mKtnu) + dot(
            Hkstar.T, dot(H, mKtnu))

        if prob is False:
            if nom > 0.0:
                return +1.0
            return -1.0

        sig2star = kstarstar - dotd(kstar.T,Vkstar)\
            + dotd(dot(Hkstar.T, H), kstar) + self._sign2

        return LH.probit_sigmoid(mustar / NP.sqrt(1.0 + sig2star))
Пример #3
0
    def _predict(self, meanstar, kstar, kstarstar, prob):
        self._updateConstants()
        self._updateApproximation()

        m = self._mean
        tnu = self._tnu
        K = self._K
        Ln = self._Ln
        Ssq = self._Ssq
        LnSsq = self._LnSsq
        LnSsqkstar = dot(LnSsq, kstar)
        LnSsqK = self._LnSsqK

        mustar = meanstar + dot(kstar.T, tnu) - dot(
            LnSsqkstar.T,
            dot(LnSsq, m) + dot(LnSsqK, tnu))

        if prob is False:
            if nom > 0.0:
                return +1.0
            return -1.0

        sig2star = kstarstar - dotd(dot(LnSsqkstar.T, LnSsq),
                                    kstar) + self._sign2

        return LH.probit_sigmoid(mustar / NP.sqrt(1.0 + sig2star))
Пример #4
0
    def _xMeanCov(self, xX, xG0, xG1):
        '''
        Computes the mean and covariance between the latent variables.
        You can provide one or n latent variables.
        ----------------------------------------------------------------------------
        Input:
        xX              : [D] array of fixed effects or
                          [n*D] array of fixed effects for each latent variable
        xG0             : [k0] array of random effects or
                          [n*k0] array of random effects for each latent variable
        xG1             : [k1] array of random effects or
                          [n*k1] array of random effects for each latent variable
        -----------------------------------------------------------------------------
        Output:
        xmean           : [n*D] means of the provided latent variables
        xK01            : [N] or [n*N] covariance between provided and prior latent
                          variables
        xkk             : float or [n] covariance between provided latent variables 
        -----------------------------------------------------------------------------
        '''
        xmean = xX.dot(self._beta)

        if xG0 is not None:
            xK01 = self._sig02*(xG0.dot(self._G0.T))
        else:
            if len(xX.shape)==1:
                xK01 = NP.zeros(self._N)
            else:
                xK01 = NP.zeros((xX.shape[0], self._N))

        if len(xG0.shape)==1:
            xkk = self._sig02 * xG0.dot(xG0) + self._sign2
        else:
            xkk = self._sig02 * dotd(xG0,xG0.T) + self._sign2

        if xG1 is not None:
            xK01 += self._sig12*(xG1.dot(self._G1.T))
            if len(xG0.shape)==1:
                xkk += self._sig12 * xG1.dot(xG1)
            else:
                xkk += self._sig12 * dotd(xG1,xG1.T)

        return (xmean,xK01,xkk)
Пример #5
0
    def _xMeanCov(self, xX, xG0, xG1):
        '''
        Computes the mean and covariance between the latent variables.
        You can provide one or n latent variables.
        ----------------------------------------------------------------------------
        Input:
        xX              : [D] array of fixed effects or
                          [n*D] array of fixed effects for each latent variable
        xG0             : [k0] array of random effects or
                          [n*k0] array of random effects for each latent variable
        xG1             : [k1] array of random effects or
                          [n*k1] array of random effects for each latent variable
        -----------------------------------------------------------------------------
        Output:
        xmean           : [n*D] means of the provided latent variables
        xK01            : [N] or [n*N] covariance between provided and prior latent
                          variables
        xkk             : float or [n] covariance between provided latent variables 
        -----------------------------------------------------------------------------
        '''
        xmean = xX.dot(self._beta)

        if xG0 is not None:
            xK01 = self._sig02 * (xG0.dot(self._G0.T))
        else:
            if len(xX.shape) == 1:
                xK01 = NP.zeros(self._N)
            else:
                xK01 = NP.zeros((xX.shape[0], self._N))

        if len(xG0.shape) == 1:
            xkk = self._sig02 * xG0.dot(xG0) + self._sign2
        else:
            xkk = self._sig02 * dotd(xG0, xG0.T) + self._sign2

        if xG1 is not None:
            xK01 += self._sig12 * (xG1.dot(self._G1.T))
            if len(xG0.shape) == 1:
                xkk += self._sig12 * xG1.dot(xG1)
            else:
                xkk += self._sig12 * dotd(xG1, xG1.T)

        return (xmean, xK01, xkk)
Пример #6
0
    def _calculateSig2Mu(self, ttau, tnu, mean):
        Ssq = NP.sqrt(ttau)

        Ln = self._calculateLn(self._K, Ssq)

        LnSsq = stl(Ln, NP.diag(Ssq))
        V = dot(LnSsq,self._K)
        sig2 = self._dKn() - dotd(V.T,V)
        mu = mean + dot(self._K,tnu) - dot(V.T,dot(LnSsq,mean)) - dot(V.T,dot(V,tnu))

        return (sig2, mu)
Пример #7
0
    def _calculateSig2Mu(self, ttau, tnu, mean):
        Ssq = NP.sqrt(ttau)

        Ln = self._calculateLn(self._K, Ssq)

        LnSsq = stl(Ln, NP.diag(Ssq))
        V = dot(LnSsq, self._K)
        sig2 = self._dKn() - dotd(V.T, V)
        mu = mean + dot(self._K, tnu) - dot(V.T, dot(LnSsq, mean)) - dot(
            V.T, dot(V, tnu))

        return (sig2, mu)
Пример #8
0
    def _calculateSig2Mu(self, ttau, tnu, mean):
        sign2 = self._sign2

        V = self._calculateV(ttau, sign2)
        G01 = self._G01
    
        Lk = self._calculateLk(G01, V)
    
        G01tV = ddot(G01.T, V, left=False)
        H = stl(Lk, G01tV)

        HK = self._ldotK(H)
    
        sig2 = self._dKn() - sign2**2*V - dotd(G01, dot(dot(G01tV, G01), G01.T))\
            - 2.0*sign2*dotd(G01, G01tV) + dotd(HK.T, HK)

        assert NP.all(NP.isfinite(sig2)), 'sig2 should be finite.'

        u = self._mean + self._rdotK(tnu)
    
        mu = u - self._rdotK(V*u) + self._rdotK(H.T.dot(H.dot(u)))
        assert NP.all(NP.isfinite(mu)), 'mu should be finite.'

        return (sig2, mu)
Пример #9
0
    def _calculateSig2Mu(self, ttau, tnu, mean):
        sign2 = self._sign2

        V = self._calculateV(ttau, sign2)
        G01 = self._G01

        Lk = self._calculateLk(G01, V)

        G01tV = ddot(G01.T, V, left=False)
        H = stl(Lk, G01tV)

        HK = self._ldotK(H)

        sig2 = self._dKn() - sign2**2*V - dotd(G01, dot(dot(G01tV, G01), G01.T))\
            - 2.0*sign2*dotd(G01, G01tV) + dotd(HK.T, HK)

        assert NP.all(NP.isfinite(sig2)), 'sig2 should be finite.'

        u = self._mean + self._rdotK(tnu)

        mu = u - self._rdotK(V * u) + self._rdotK(H.T.dot(H.dot(u)))
        assert NP.all(NP.isfinite(mu)), 'mu should be finite.'

        return (sig2, mu)
Пример #10
0
    def _predict(self, meanstar, kstar, kstarstar, prob):
        self._updateConstants()
        self._updateApproximation()

        m = self._mean
        tnu = self._tnu
        K = self._K
        Ln = self._Ln
        Ssq = self._Ssq
        LnSsq = self._LnSsq
        LnSsqkstar = dot(LnSsq, kstar)
        LnSsqK = self._LnSsqK

        mustar = meanstar + dot(kstar.T,tnu) - dot(LnSsqkstar.T, dot(LnSsq,m) + dot(LnSsqK,tnu))

        if prob is False:
            if nom > 0.0:
                return +1.0
            return -1.0

        sig2star = kstarstar - dotd(dot(LnSsqkstar.T, LnSsq), kstar) + self._sign2

        return LH.probit_sigmoid(mustar/NP.sqrt(1.0 + sig2star))
Пример #11
0
 def _dKn(self):
     d = self._sig02 * dotd(self._G0, self._G0.T) + self._sign2
     if self._G1 is not None:
         d += self._sig12 * dotd(self._G1, self._G1.T)
     return d
Пример #12
0
    def _rmll_gradient(self, optSig02=True, optSig12=True, optSign2=True, optBeta=True):
        self._updateConstants()
        self._updateApproximation()

        W = self._W
        Wsq = self._Wsq
        f = self._f
        K = self._K
        K0 = self._K0
        K1 = self._K1
        m = self._mean
        a = self._a
        Ln = self._Ln
        X = self._X

        LnWsq = stl(Ln, NP.diag(Wsq))
        LnWsqK = dot(LnWsq, K)

        d = self._dKn()
        h = self._likelihood.third_derivative_log(f)
        diags = (d - dotd(LnWsqK.T, LnWsqK)) * h

        ret = []

        if optSig02:
            dK0a = dot(K0, a)
            dF0 = dK0a - dot(LnWsqK.T, dot(LnWsq, dK0a))

            r = dot(a, dF0) - dot(a, dF0) + 0.5*dot(a, dK0a)\
                + 0.5*NP.sum( diags*dF0 )\
                - 0.5*trace2( LnWsq.T, dot(LnWsq,K0) )

            ret.append(r)

        if optSig12:
            dK1a = dot(K1, a)
            dF1 = dK1a - dot(LnWsqK.T, dot(LnWsq, dK1a))

            r = dot(a, dF1) - dot(a, dF1) + 0.5*dot(a, dK1a)\
                + 0.5*NP.sum( diags*dF1 )\
                - 0.5*trace2( LnWsq.T, dot(LnWsq,K1) )

            ret.append(r)

        if optSign2:
            dFn = a - dot(LnWsqK.T, dot(LnWsq, a))

            r = dot(a, dFn) - dot(a, dFn) + 0.5*dot(a, a)\
                + 0.5*NP.sum( diags*dFn )\
                - 0.5*trace2( LnWsq.T, LnWsq )

            ret.append(r)

        if optBeta:
            dFmb = -dot(LnWsqK.T, dot(LnWsq, X))
            dFb = dFmb+X

            r = dot(a, dFb) - dot(a, dFmb)\
                + 0.5*NP.sum( diags*dFb.T, 1)

            ret += list(r)

        ret = NP.array(ret)
        assert NP.all(NP.isfinite(ret)), 'Not finite regular marginal loglikelihood gradient.'
        
        return ret
Пример #13
0
    def _rmll_gradient(self, optSig02=True, optSig12=True, optSign2=True, optBeta=True):
        self._updateConstants()
        self._updateApproximation()

        (f,a)=(self._f,self._a)
        (W,Wsq) = (self._W,self._Wsq)
        Lk = self._Lk

        m = self._mean
        X = self._X
        G0 = self._G0
        G1 = self._G1
        sign2 = self._sign2
        G01 = self._G01

        #g = self._likelihood.gradient_log(f)
        #a==g

        h = self._likelihood.third_derivative_log(f)

        V = W/self._A

        d = self._dKn()
        G01tV = ddot(G01.T, V, left=False)
        H = stl(Lk, G01tV)
        dkH = self._ldotK(H)
        diags = (d - sign2**2 * V - dotd(G01, dot(dot(G01tV, G01), G01.T))\
            - 2.0*sign2*dotd(G01, G01tV) + dotd(dkH.T, dkH)) * h

        ret = []
        
        if optSig02:
            dK0a = dot(G0, dot(G0.T, a))
            t = V*dK0a - dot(H.T, dot(H, dK0a))
            dF0 = dK0a - self._rdotK(t)

            LkG01VG0 = dot(H, G0)
            VG0 = ddot(V, G0, left=True)

            ret0 = dot(a, dF0) - 0.5*dot(a, dK0a) + dot(f-m, t)\
                + 0.5*NP.sum( diags*dF0 )\
                + -0.5*trace2(VG0, G0.T) + 0.5*trace2( LkG01VG0.T, LkG01VG0 )
            
            ret.append(ret0)

        if optSig12:
            dK1a = dot(G1, dot(G1.T, a))
            t = V*dK1a - dot(H.T, dot(H, dK1a))
            dF1 = dK1a - self._rdotK(t)

            LkG01VG1 = dot(H, G1)
            VG1 = ddot(V, G1, left=True)

            ret1 = dot(a, dF1)- 0.5*dot(a, dK1a) + dot(f-m, t)\
                + 0.5*NP.sum( diags*dF1 )\
                + -0.5*trace2(VG1, G1.T) + 0.5*trace2( LkG01VG1.T, LkG01VG1 )
            
            ret.append(ret1)

        if optSign2:
            t = V*a - dot(H.T, dot(H, a))
            dFn = a - self._rdotK(t)

            retn = dot(a, dFn)- 0.5*dot(a, a) + dot(f-m, t)\
                + 0.5*NP.sum( diags*dFn )\
                + -0.5*NP.sum(V) + 0.5*trace2( H.T, H )
            
            ret.append(retn)

        if optBeta:
            t = ddot(V, X, left=True) - dot(H.T, dot(H, X))
            dFbeta = X - self._rdotK(t)

            retbeta = dot(a, dFbeta) + dot(f-m, t)
            for i in range(dFbeta.shape[1]):
                retbeta[i] += 0.5*NP.sum( diags*dFbeta[:,i] )

            ret.extend(retbeta)

        ret = NP.array(ret)
        assert NP.all(NP.isfinite(ret)), 'Not finite regular marginal loglikelihood gradient.'
        
        return ret
Пример #14
0
 def _dKn(self):
     d = self._sig02 * dotd(self._G0, self._G0.T) + self._sign2
     if self._G1 is not None:
         d += self._sig12 * dotd(self._G1, self._G1.T)
     return d
Пример #15
0
    def _rmll_gradient(self,
                       optSig02=True,
                       optSig12=True,
                       optSign2=True,
                       optBeta=True):
        self._updateConstants()
        self._updateApproximation()

        W = self._W
        Wsq = self._Wsq
        f = self._f
        K = self._K
        K0 = self._K0
        K1 = self._K1
        m = self._mean
        a = self._a
        Ln = self._Ln
        X = self._X

        LnWsq = stl(Ln, NP.diag(Wsq))
        LnWsqK = dot(LnWsq, K)

        d = self._dKn()
        h = self._likelihood.third_derivative_log(f)
        diags = (d - dotd(LnWsqK.T, LnWsqK)) * h

        ret = []

        if optSig02:
            dK0a = dot(K0, a)
            dF0 = dK0a - dot(LnWsqK.T, dot(LnWsq, dK0a))

            r = dot(a, dF0) - dot(a, dF0) + 0.5*dot(a, dK0a)\
                + 0.5*NP.sum( diags*dF0 )\
                - 0.5*trace2( LnWsq.T, dot(LnWsq,K0) )

            ret.append(r)

        if optSig12:
            dK1a = dot(K1, a)
            dF1 = dK1a - dot(LnWsqK.T, dot(LnWsq, dK1a))

            r = dot(a, dF1) - dot(a, dF1) + 0.5*dot(a, dK1a)\
                + 0.5*NP.sum( diags*dF1 )\
                - 0.5*trace2( LnWsq.T, dot(LnWsq,K1) )

            ret.append(r)

        if optSign2:
            dFn = a - dot(LnWsqK.T, dot(LnWsq, a))

            r = dot(a, dFn) - dot(a, dFn) + 0.5*dot(a, a)\
                + 0.5*NP.sum( diags*dFn )\
                - 0.5*trace2( LnWsq.T, LnWsq )

            ret.append(r)

        if optBeta:
            dFmb = -dot(LnWsqK.T, dot(LnWsq, X))
            dFb = dFmb + X

            r = dot(a, dFb) - dot(a, dFmb)\
                + 0.5*NP.sum( diags*dFb.T, 1)

            ret += list(r)

        ret = NP.array(ret)
        assert NP.all(NP.isfinite(
            ret)), 'Not finite regular marginal loglikelihood gradient.'

        return ret
Пример #16
0
    def _rmll_gradient(self,
                       optSig02=True,
                       optSig12=True,
                       optSign2=True,
                       optBeta=True):
        self._updateConstants()
        self._updateApproximation()

        (f, a) = (self._f, self._a)
        (W, Wsq) = (self._W, self._Wsq)
        Lk = self._Lk

        m = self._mean
        X = self._X
        G0 = self._G0
        G1 = self._G1
        sign2 = self._sign2
        G01 = self._G01

        #g = self._likelihood.gradient_log(f)
        #a==g

        h = self._likelihood.third_derivative_log(f)

        V = W / self._A

        d = self._dKn()
        G01tV = ddot(G01.T, V, left=False)
        H = stl(Lk, G01tV)
        dkH = self._ldotK(H)
        diags = (d - sign2**2 * V - dotd(G01, dot(dot(G01tV, G01), G01.T))\
            - 2.0*sign2*dotd(G01, G01tV) + dotd(dkH.T, dkH)) * h

        ret = []

        if optSig02:
            dK0a = dot(G0, dot(G0.T, a))
            t = V * dK0a - dot(H.T, dot(H, dK0a))
            dF0 = dK0a - self._rdotK(t)

            LkG01VG0 = dot(H, G0)
            VG0 = ddot(V, G0, left=True)

            ret0 = dot(a, dF0) - 0.5*dot(a, dK0a) + dot(f-m, t)\
                + 0.5*NP.sum( diags*dF0 )\
                + -0.5*trace2(VG0, G0.T) + 0.5*trace2( LkG01VG0.T, LkG01VG0 )

            ret.append(ret0)

        if optSig12:
            dK1a = dot(G1, dot(G1.T, a))
            t = V * dK1a - dot(H.T, dot(H, dK1a))
            dF1 = dK1a - self._rdotK(t)

            LkG01VG1 = dot(H, G1)
            VG1 = ddot(V, G1, left=True)

            ret1 = dot(a, dF1)- 0.5*dot(a, dK1a) + dot(f-m, t)\
                + 0.5*NP.sum( diags*dF1 )\
                + -0.5*trace2(VG1, G1.T) + 0.5*trace2( LkG01VG1.T, LkG01VG1 )

            ret.append(ret1)

        if optSign2:
            t = V * a - dot(H.T, dot(H, a))
            dFn = a - self._rdotK(t)

            retn = dot(a, dFn)- 0.5*dot(a, a) + dot(f-m, t)\
                + 0.5*NP.sum( diags*dFn )\
                + -0.5*NP.sum(V) + 0.5*trace2( H.T, H )

            ret.append(retn)

        if optBeta:
            t = ddot(V, X, left=True) - dot(H.T, dot(H, X))
            dFbeta = X - self._rdotK(t)

            retbeta = dot(a, dFbeta) + dot(f - m, t)
            for i in range(dFbeta.shape[1]):
                retbeta[i] += 0.5 * NP.sum(diags * dFbeta[:, i])

            ret.extend(retbeta)

        ret = NP.array(ret)
        assert NP.all(NP.isfinite(
            ret)), 'Not finite regular marginal loglikelihood gradient.'

        return ret