Esempio n. 1
0
    def testRBFN_1D(self):
        
        # sample from a synthetic function and see how much we improve the
        # error by using the prior function
        def foo(x):
            return sum(sin(x*20))
            
        X = lhcSample([[0., 1.]], 50, seed=3)
        Y = [foo(x) for x in X]
        
        prior = RBFNMeanPrior()
        prior.train(X, Y, [[0., 1.]], k=10, seed=100)
        
        # See how well we fit the function by getting the average squared error
        # over 100 samples of the function.  Baseline foo(x)=0 MSE is 0.48.
        # We will aim for MSE < 0.05.
        S = arange(0, 1, .01)
        error = mean([foo(x)-prior.mu(x) for x in S])
        self.failUnless(error < 0.05)

        # for debugging
        if False:
            figure(1)
            plot(S, [foo(x) for x in S], 'b-')
            plot(S, [prior.mu(x) for x in S], 'k-')
            show()
Esempio n. 2
0
    def testShekelGPPrior(self):
        
        # see how the GP works on the Shekel function
        S5 = Shekel5()

        pX = lhcSample(S5.bounds, 100, seed=8)
        pY = [S5.f(x) for x in pX]
        prior = RBFNMeanPrior()
        prior.train(pX, pY, S5.bounds, k=10, seed=103)
        
        X = lhcSample(S5.bounds, 10, seed=9)
        Y = [S5.f(x) for x in X]

        hv = .1
        hyper = [hv, hv, hv, hv]
        gkernel = GaussianKernel_ard(hyper)
        priorGP = GaussianProcess(gkernel, X, Y, prior=prior)
        nopriorGP = GaussianProcess(gkernel, X, Y)
        
        S = lhcSample(S5.bounds, 1000, seed=10)
        nopriorErr = mean([(S5.f(x)-nopriorGP.mu(x))**2 for x in S])
        priorErr = mean([(S5.f(x)-priorGP.mu(x))**2 for x in S])
        
        # print '\nno prior Err =', nopriorErr
        # print 'prior Err =', priorErr
        self.failUnless(priorErr < nopriorErr*.8)
Esempio n. 3
0
 def testPriorAndPrefs(self):
     
     S5 = Shekel5()
     
     pX = lhcSample(S5.bounds, 100, seed=13)
     pY = [S5.f(x) for x in pX]
     prior = RBFNMeanPrior()
     prior.train(pX, pY, S5.bounds, k=10)
     
     hv = .1
     hyper = [hv, hv, hv, hv]
     gkernel = GaussianKernel_ard(hyper)
     GP = PrefGaussianProcess(gkernel, prior=prior)
     
     X = [array([i+.5]*4) for i in xrange(5)]
     valX = [x.copy() for x in X]
     
     prefs = []
     for i in xrange(len(X)):
         for j in xrange(i):
             if S5.f(X[i]) > S5.f(X[j]):
                 prefs.append((X[i], X[j], 0))
             else:
                 prefs.append((X[j], X[i], 0))
     
     GP.addPreferences(prefs)
     opt, optx = maximizeEI(GP, S5.bounds)
Esempio n. 4
0
    def _testKernelMaxEI(self):
        
        # test different methods of optimizing kernel
        S5 = Shekel5()
        
        hv = 0.1
        testkernels = [GaussianKernel_iso([hv]), 
                   GaussianKernel_ard([hv, hv, hv, hv]),
                   MaternKernel3([hv, 1.0])]
                   # MaternKernel5([hv, 1.0])]

        for kernel in testkernels:
            # print
            # print kernel.__class__
            
        
            # train GPs
            X = lhcSample(S5.bounds, 10, seed=0)
            Y = [S5.f(x) for x in X]
        
            GP = GaussianProcess(kernel, X, Y)
        
            eif = EI(GP)
            dopt, doptx = direct(eif.negf, S5.bounds, maxiter=10)
            copt, coptx = cdirect(eif.negf, S5.bounds, maxiter=10)
            mopt, moptx = maximizeEI(GP, S5.bounds, maxiter=10)
            # print dopt, doptx
            # print copt, coptx
            # print mopt, moptx
        
            self.failUnlessAlmostEqual(dopt, copt, 4)
            self.failUnlessAlmostEqual(-dopt, mopt, 4)
            self.failUnlessAlmostEqual(-copt, mopt, 4)
        
            self.failUnless(sum(abs(doptx-coptx)) < .01)
            self.failUnless(sum(abs(moptx-coptx)) < .01)
            self.failUnless(sum(abs(moptx-doptx)) < .01)
        
            # train GP w/prior
            pX = lhcSample(S5.bounds, 100, seed=101)
            pY = [S5.f(x) for x in pX]
            prior = RBFNMeanPrior()
            prior.train(pX, pY, bounds=S5.bounds, k=10, seed=102)
        
            GP = GaussianProcess(kernel, X, Y, prior=prior)        
        
            eif = EI(GP)
            pdopt, pdoptx = direct(eif.negf, S5.bounds, maxiter=10)
            pcopt, pcoptx = cdirect(eif.negf, S5.bounds, maxiter=10)
            pmopt, pmoptx = maximizeEI(GP, S5.bounds, maxiter=10)
        
            self.failIfAlmostEqual(pdopt, dopt, 3)
            self.failUnlessAlmostEqual(pdopt, pcopt, 4)
            self.failUnlessAlmostEqual(-pdopt, pmopt, 4)
            self.failUnlessAlmostEqual(-pcopt, pmopt, 4)
        
            self.failUnless(sum(abs(pdoptx-pcoptx)) < .01)
            self.failUnless(sum(abs(pmoptx-pcoptx)) < .01)
            self.failUnless(sum(abs(pmoptx-pdoptx)) < .01)
Esempio n. 5
0
    def _testKernelMaxEI(self):

        # test different methods of optimizing kernel
        S5 = Shekel5()

        hv = 0.1
        testkernels = [
            GaussianKernel_iso([hv]),
            GaussianKernel_ard([hv, hv, hv, hv]),
            MaternKernel3([hv, 1.0])
        ]
        # MaternKernel5([hv, 1.0])]

        for kernel in testkernels:
            # train GPs
            X = lhcSample(S5.bounds, 10, seed=0)
            Y = [S5.f(x) for x in X]

            GP = GaussianProcess(kernel, X, Y)

            eif = EI(GP)
            dopt, doptx = direct(eif.negf, S5.bounds, maxiter=10)
            copt, coptx = cdirect(eif.negf, S5.bounds, maxiter=10)
            mopt, moptx = maximizeEI(GP, S5.bounds, maxiter=10)

            self.failUnlessAlmostEqual(dopt, copt, 4)
            self.failUnlessAlmostEqual(-dopt, mopt, 4)
            self.failUnlessAlmostEqual(-copt, mopt, 4)

            self.failUnless(sum(abs(doptx - coptx)) < .01)
            self.failUnless(sum(abs(moptx - coptx)) < .01)
            self.failUnless(sum(abs(moptx - doptx)) < .01)

            # train GP w/prior
            pX = lhcSample(S5.bounds, 100, seed=101)
            pY = [S5.f(x) for x in pX]
            prior = RBFNMeanPrior()
            prior.train(pX, pY, bounds=S5.bounds, k=10, seed=102)

            GP = GaussianProcess(kernel, X, Y, prior=prior)

            eif = EI(GP)
            pdopt, pdoptx = direct(eif.negf, S5.bounds, maxiter=10)
            pcopt, pcoptx = cdirect(eif.negf, S5.bounds, maxiter=10)
            pmopt, pmoptx = maximizeEI(GP, S5.bounds, maxiter=10)

            self.failIfAlmostEqual(pdopt, dopt, 3)
            self.failUnlessAlmostEqual(pdopt, pcopt, 4)
            self.failUnlessAlmostEqual(-pdopt, pmopt, 4)
            self.failUnlessAlmostEqual(-pcopt, pmopt, 4)

            self.failUnless(sum(abs(pdoptx - pcoptx)) < .01)
            self.failUnless(sum(abs(pmoptx - pcoptx)) < .01)
            self.failUnless(sum(abs(pmoptx - pdoptx)) < .01)
Esempio n. 6
0
    def testMaxEIPrior(self):

        # make sure that the prior works with the different methods of EI
        # maximization
        
        S5 = Shekel5()
        pX = lhcSample(S5.bounds, 100, seed=511)
        pY = [S5.f(x) for x in pX]
        prior = RBFNMeanPrior()
        prior.train(pX, pY, bounds=S5.bounds, k=10, seed=504)
        
        hv = .1
        hyper = [hv, hv, hv, hv]
        kernel = GaussianKernel_ard(hyper)
        
        # train GPs
        X = lhcSample(S5.bounds, 10, seed=512)
        Y = [S5.f(x) for x in X]
        
        # validation
        valX = list(x.copy() for x in X)
        valY = copy(Y)
        
        GP = GaussianProcess(kernel, X, Y, prior=prior)
        
        eif = EI(GP)
        copt, _ = cdirect(eif.negf, S5.bounds, maxiter=20)
        mopt, _ = maximizeEI(GP, S5.bounds, maxiter=20)

        self.failUnlessAlmostEqual(-copt, mopt, 2)
        
        for i in xrange(len(GP.X)):
            self.failUnless(all(valX[i]==GP.X[i]))
            self.failUnless(valY[i]==GP.Y[i])
        
        GP.prior.mu(GP.X[0])
        self.failUnless(all(valX[0]==GP.X[0]))
        # print GP.X
        
        for i in xrange(len(GP.X)):
            self.failUnless(all(valX[i]==GP.X[i]))
            self.failUnless(valY[i]==GP.Y[i])
        
        GP.prior.mu(GP.X[0])
        self.failUnless(all(valX[0]==GP.X[0]))
Esempio n. 7
0
    def testRNFN_10D(self):

        # as above, but with a 10D test function and more data
        def foo(x):
            return sum(sin(x * 2))

        bounds = [[0., 1.]] * 10
        X = lhcSample(bounds, 100, seed=4)
        Y = [foo(x) for x in X]

        prior = RBFNMeanPrior()
        prior.train(X, Y, bounds, k=20, seed=5)

        S = lhcSample(bounds, 100, seed=6)
        RBNError = mean([(foo(x) - prior.mu(x))**2 for x in S])
        baselineError = mean([foo(x)**2 for x in S])

        self.failUnless(RBNError < baselineError)
Esempio n. 8
0
 def testRNFN_10D(self):
     
     # as above, but with a 10D test function and more data
     def foo(x):
         return sum(sin(x*2))
         
     bounds = [[0., 1.]]*10
     X = lhcSample(bounds, 100, seed=4)
     Y = [foo(x) for x in X]
     
     prior = RBFNMeanPrior()
     prior.train(X, Y, bounds, k=20, seed=5)
     
     S = lhcSample(bounds, 100, seed=6)
     RBNError = mean([(foo(x)-prior.mu(x))**2 for x in S])
     baselineError = mean([foo(x)**2 for x in S])
     
     # print '\nRBN err  =', RBNError
     # print 'baseline =', baselineError
     self.failUnless(RBNError < baselineError)
Esempio n. 9
0
    def testGPPrior(self):
        
        # see how GP works with the dataprior...
        def foo(x):
            return sum(sin(x*20))
        
        bounds = [[0., 1.]]
        # train prior
        pX = lhcSample([[0., 1.]], 100, seed=6)
        pY = [foo(x) for x in pX]
        prior = RBFNMeanPrior()
        prior.train(pX, pY, bounds, k=10, seed=102)
        
        X = lhcSample([[0., 1.]], 2, seed=7)
        Y = [foo(x) for x in X]
        
        kernel = GaussianKernel_ard(array([.1]))
        GP = GaussianProcess(kernel, X, Y, prior=prior)
        GPnoprior = GaussianProcess(kernel, X, Y)

        S = arange(0, 1, .01)

        nopriorErr = mean([(foo(x)-GPnoprior.mu(x))**2 for x in S])
        priorErr = mean([(foo(x)-GP.mu(x))**2 for x in S])
        
        # print '\nno prior Err =', nopriorErr
        # print 'prior Err =', priorErr
        
        self.failUnless(priorErr < nopriorErr*.5)
        
        if False:
            figure(1)
            clf()
            plot(S, [prior.mu(x) for x in S], 'g-', alpha=0.3)
            plot(S, [GPnoprior.mu(x) for x in S], 'b-', alpha=0.3)
            plot(S, [GP.mu(x) for x in S], 'k-', lw=2)
            plot(X, Y, 'ko')
            show()
Esempio n. 10
0
    def testShekelGPPrior(self):

        # see how the GP works on the Shekel function
        S5 = Shekel5()

        pX = lhcSample(S5.bounds, 100, seed=8)
        pY = [S5.f(x) for x in pX]
        prior = RBFNMeanPrior()
        prior.train(pX, pY, S5.bounds, k=10, seed=103)

        X = lhcSample(S5.bounds, 10, seed=9)
        Y = [S5.f(x) for x in X]

        hv = .1
        hyper = [hv, hv, hv, hv]
        gkernel = GaussianKernel_ard(hyper)
        priorGP = GaussianProcess(gkernel, X, Y, prior=prior)
        nopriorGP = GaussianProcess(gkernel, X, Y)

        S = lhcSample(S5.bounds, 1000, seed=10)
        nopriorErr = mean([(S5.f(x) - nopriorGP.mu(x))**2 for x in S])
        priorErr = mean([(S5.f(x) - priorGP.mu(x))**2 for x in S])

        self.failUnless(priorErr < nopriorErr * .8)