Beispiel #1
0
 def test_sumprod(self,thr=1e-6):
     """ Test the sum of products of several mean functions.
     """
     m1 = mean.one()
     m2 = mean.linear(a=np.random.rand(self.D))
     m3 = mean.one()
     m4 = mean.linear(a=np.random.rand(self.D))
     self.run_verifications(2.3*m1+m2*1.2+m3*m4,thr)
Beispiel #2
0
 def test_prodsum(self,thr=1e-6):
     """ Test the product of sums of several mean functions.
     """
     m1 = mean.one()
     m2 = mean.linear(a=np.random.rand(self.D))
     m3 = mean.one()
     m4 = mean.linear(a=np.random.rand(self.D))
     self.run_verifications((2.3+m1)*(m2+1.2)*(m3+m4),thr)
Beispiel #3
0
 def test_one(self,thr=1e-6):
     """ Test one mean.
     """
     n,D,X = self.n,self.D,self.X
     m = mean.one()
     self.run_verifications(m,thr)
     d = np.linalg.norm( m(X)-1.0 )
     self.assertLessEqual(d,thr)
Beispiel #4
0
 def test_pow(self,thr=1e-6):
     """ Test power of a mean function.
     """
     n,D,X = self.n,self.D,self.X
     m1 = mean.one()
     m2 = mean.linear(a=np.random.rand(self.D))
     m = 1.2*m1 * m2**2
     d = np.linalg.norm( 1.2*m1(X)*m2(X)**2 - m(X) )
     self.assertLessEqual(d,thr)
     self.run_verifications(m,thr)
Beispiel #5
0
 def test_prod(self,thr=1e-6):
     """ Test the product of several mean functions.
     """
     n,D,X = self.n,self.D,self.X
     m1 = mean.one()
     m2 = mean.linear(a=np.random.rand(self.D))
     m = m1*m2*1.7
     d = np.linalg.norm( m1(X)*m2(X)*1.7 - m(X) )
     self.assertLessEqual(d,thr)
     self.run_verifications(m,thr)
Beispiel #6
0
 def test_sum(self,thr=1e-6):
     """ Test the sum of several meanfunctions.
     """
     n,D,X = self.n,self.D,self.X
     m1 = mean.one()
     m2 = mean.linear(a=np.random.rand(self.D))
     m = m1+m2+1.2
     d = np.linalg.norm( m1(X)+m2(X)+1.2 - m(X) )
     self.assertLessEqual(d,thr)
     self.run_verifications(m,thr)
 def test_ep(self,thr=1e-4):
     """ Test expectation propagation approximate inference.
     """
     im = inf.ep
     cf,mf,lf = 1.1*cov.se(ell=0.8),mean.zero(),lik.erf()
     self.run_verifications(im,cf,mf,lf,thr)
     lf = lik.gauss(sn=0.2)
     self.run_verifications(im,cf,mf,lf,thr)
     mf = 2.3*mean.one()
     self.run_verifications(im,cf,mf,lf,thr)
     cf = 1.3*cov.se(ell=[0.8,0.7,0.3])
     self.run_verifications(im,cf,mf,lf,thr)
     mf = 1.3*mean.linear(a=[0.8,0.7,0.3])
     self.run_verifications(im,cf,mf,lf,thr)
 def test_exact(self,thr=1e-4):
     """ Test exact inference.
     """
     im = inf.exact
     cf,mf,lf = 1.1*cov.se(ell=0.8),mean.zero(),lik.gauss(sn=1e-5)
     self.run_verifications(im,cf,mf,lf,thr)
     lf = lik.gauss(sn=0.2)
     self.run_verifications(im,cf,mf,lf,thr)
     mf = 2.3*mean.one()
     self.run_verifications(im,cf,mf,lf,thr)
     cf = 1.3*cov.se(ell=[0.8,0.7,0.3])
     self.run_verifications(im,cf,mf,lf,thr)
     mf = 1.3*mean.linear(a=[0.8,0.7,0.3])
     self.run_verifications(im,cf,mf,lf,thr)
Beispiel #9
0
            kss = self.cov(Xs[idx], diag=True)  # self variance
            Ks = self.cov(Xs[idx], X)  # cross-covariances
            ms = self.mean(Xs[idx])

            al, sW, L, C = self.post.alpha, self.post.sW, self.post.L, self.post.C
            fmu[idx] = ms + np.dot(Ks, al)
            if L == None:
                fs2[idx] = kss + np.sum(Ks * np.dot(Ks, L), axis=1)
            else:
                V = np.linalg.solve(L, sW * Ks.T)
                fs2[idx] = kss - np.sum(V * V, axis=0)
            if ys == 0: yi = 0
            else: yi = ys[idx]
            lp[idx], ymu[idx], ys2[idx] = self.lik.pred(yi, fmu[idx], fs2[idx])
            na += nb

        return fmu, fs2, ymu, ys2, lp


if __name__ == "__main__":

    def f(x):
        return x * np.sin(x)

    X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
    y = f(X).ravel()
    Xs = np.atleast_2d(np.linspace(0, 10, 2007)).T
    from gp import GaussianProcess as gp
    gp = gp(mean=1.0 * one())
    post, nlZ, dnlZ = gp.inference(X, y, deriv=True)
    fmu, fs2, ymu, ys2, lp = gp.predict(X, y, Xs)
Beispiel #10
0
        ymu,ys2 = np.zeros(ns),np.zeros(ns)
        lp = np.zeros(ns)
        while na<ns:
            idx = np.arange(na,min(na+nb,ns))
            kss = self.cov(Xs[idx],diag=True)                   # self variance
            Ks = self.cov(Xs[idx],X)                        # cross-covariances
            ms = self.mean(Xs[idx])
            
            al,sW,L,C = self.post.alpha,self.post.sW,self.post.L,self.post.C
            fmu[idx] = ms + np.dot(Ks,al)
            if L==None:
                fs2[idx] = kss + np.sum(Ks*np.dot(Ks,L),axis=1)
            else:
                V = np.linalg.solve(L,sW*Ks.T)
                fs2[idx] = kss - np.sum(V*V,axis=0)
            if ys==0: yi = 0
            else:     yi = ys[idx]
            lp[idx],ymu[idx],ys2[idx] = self.lik.pred(yi,fmu[idx],fs2[idx])
            na += nb

        return fmu,fs2,ymu,ys2,lp

if __name__ == "__main__":
    def f(x): return x * np.sin(x)
    X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
    y = f(X).ravel()
    Xs = np.atleast_2d(np.linspace(0, 10, 2007)).T
    from gp import GaussianProcess as gp
    gp = gp(mean=1.0*one())
    post,nlZ,dnlZ = gp.inference(X,y,deriv=True)
    fmu,fs2,ymu,ys2,lp = gp.predict(X,y,Xs)