Beispiel #1
0
    def testTraining(self):

        # test that sequential training gives the same result as batch

        tf = Shekel5()
        X = lhcSample(tf.bounds, 25, seed=1)
        Y = [tf.f(x) for x in X]

        # GP1 adds all data during initialization
        GP1 = GaussianProcess(GaussianKernel_iso([.1]), X, Y, noise=.2)

        # GP2 adds data one at a time
        GP2 = GaussianProcess(GaussianKernel_iso([.1]), noise=.2)

        # GP3 uses addData()
        GP3 = GaussianProcess(GaussianKernel_iso([.1]), noise=.2)

        # GP4 adds using various methods
        GP4 = GaussianProcess(GaussianKernel_iso([.1]),
                              X[:10],
                              Y[:10],
                              noise=.2)

        for x, y in zip(X, Y):
            GP2.addData(x, y)

        for i in xrange(0, 25, 5):
            GP3.addData(X[i:i + 5], Y[i:i + 5])

        GP4.addData(X[10], Y[10])
        GP4.addData(X[11:18], Y[11:18])
        for i in xrange(18, 25):
            GP4.addData(X[i], Y[i])

        self.failUnless(all(GP1.R == GP2.R))
        self.failUnless(all(GP1.R == GP3.R))
        self.failUnless(all(GP1.R == GP4.R))

        testX = lhcSample(tf.bounds, 25, seed=2)
        for x in testX:
            mu1, s1 = GP1.posterior(x)
            mu2, s2 = GP2.posterior(x)
            mu3, s3 = GP3.posterior(x)
            mu4, s4 = GP4.posterior(x)
            self.failUnlessEqual(mu1, mu2)
            self.failUnlessEqual(mu1, mu3)
            self.failUnlessEqual(mu1, mu4)
            self.failUnlessEqual(s1, s2)
            self.failUnlessEqual(s1, s3)
            self.failUnlessEqual(s1, s4)
Beispiel #2
0
 def testTraining(self):
     
     # test that sequential training gives the same result as batch
     
     tf = Shekel5()
     X = lhcSample(tf.bounds, 25, seed=1)
     Y = [tf.f(x) for x in X]
     
     # GP1 adds all data during initialization
     GP1 = GaussianProcess(GaussianKernel_iso([.1]), X, Y, noise=.2)
     
     # GP2 adds data one at a time
     GP2 = GaussianProcess(GaussianKernel_iso([.1]), noise=.2)
     
     # GP3 uses addData()
     GP3 = GaussianProcess(GaussianKernel_iso([.1]), noise=.2)
     
     # GP4 adds using various methods
     GP4 = GaussianProcess(GaussianKernel_iso([.1]), X[:10], Y[:10], noise=.2)
     
     for x, y in zip(X, Y):
         GP2.addData(x, y)
         
     for i in xrange(0, 25, 5):
         GP3.addData(X[i:i+5], Y[i:i+5])
     
     GP4.addData(X[10], Y[10])
     GP4.addData(X[11:18], Y[11:18])
     for i in xrange(18, 25):
         GP4.addData(X[i], Y[i])
     
     
     self.failUnless(all(GP1.R==GP2.R))
     self.failUnless(all(GP1.R==GP3.R))
     self.failUnless(all(GP1.R==GP4.R))
     
     testX = lhcSample(tf.bounds, 25, seed=2)
     for x in testX:
         mu1, s1 = GP1.posterior(x)
         mu2, s2 = GP2.posterior(x)
         mu3, s3 = GP3.posterior(x)
         mu4, s4 = GP4.posterior(x)
         self.failUnlessEqual(mu1, mu2)
         self.failUnlessEqual(mu1, mu3)
         self.failUnlessEqual(mu1, mu4)
         self.failUnlessEqual(s1, s2)
         self.failUnlessEqual(s1, s3)
         self.failUnlessEqual(s1, s4)
Beispiel #3
0
    def testShekelClass(self):

        S = Shekel5()

        # get 50 latin hypercube samples
        X = lhcSample(S.bounds, 50, seed=2)
        Y = [S.f(x) for x in X]

        hyper = [.2, .2, .2, .2]
        noise = 0.1

        gkernel = GaussianKernel_ard(hyper)
        # print gkernel.sf2
        GP = GaussianProcess(gkernel, X, Y, noise=noise)

        # let's take a look at the trained GP.  first, make sure variance at
        # the samples is determined by noise
        mu, sig2 = GP.posteriors(X)
        for m, s, y in zip(mu, sig2, Y):
            # print m, s
            self.failUnless(s < 1 / (1 + noise))
            self.failUnless(abs(m - y) < 2 * noise)

        # now get some test samples and see how well we are fitting the function
        testX = lhcSample(S.bounds, 50, seed=3)
        testY = [S.f(x) for x in X]
        for tx, ty in zip(testX, testY):
            m, s = GP.posterior(tx)
            # prediction should be within one stdev of mean
            self.failUnless(abs(ty - m) / sqrt(s) < 1)
Beispiel #4
0
 def testShekelClass(self):
     
     S = Shekel5()
     
     # get 50 latin hypercube samples
     X = lhcSample(S.bounds, 50, seed=2)
     Y = [S.f(x) for x in X]
     
     hyper = [.2, .2, .2, .2]
     noise = 0.1
     
     gkernel = GaussianKernel_ard(hyper)
     # print gkernel.sf2
     GP = GaussianProcess(gkernel, X, Y, noise=noise)
     
     # let's take a look at the trained GP.  first, make sure variance at
     # the samples is determined by noise
     mu, sig2 = GP.posteriors(X)
     for m, s, y in zip(mu, sig2, Y):
         # print m, s
         self.failUnless(s < 1/(1+noise))
         self.failUnless(abs(m-y) < 2*noise)
     
     # now get some test samples and see how well we are fitting the function
     testX = lhcSample(S.bounds, 50, seed=3)
     testY = [S.f(x) for x in X]
     for tx, ty in zip(testX, testY):
         m, s = GP.posterior(tx)
         # prediction should be within one stdev of mean
         self.failUnless(abs(ty-m)/sqrt(s) < 1)