def testShekelGPPrior(self): # see how the GP works on the Shekel function S5 = Shekel5() pX = lhcSample(S5.bounds, 100, seed=8) pY = [S5.f(x) for x in pX] prior = RBFNMeanPrior() prior.train(pX, pY, S5.bounds, k=10, seed=103) hv = .1 hyper = [hv, hv, hv, hv] gkernel = GaussianKernel_ard(hyper) X = lhcSample(S5.bounds, 10, seed=9) Y = [S5.f(x) for x in X] priorGP = GaussianProcess(gkernel, X, Y, prior=prior) nopriorGP = GaussianProcess(gkernel, X, Y, prior=None) S = lhcSample(S5.bounds, 1000, seed=10) nopriorErr = mean([(S5.f(x) - nopriorGP.mu(x))**2 for x in S]) priorErr = mean([(S5.f(x) - priorGP.mu(x))**2 for x in S]) # print '\nno prior Err =', nopriorErr # print 'prior Err =', priorErr self.failUnless(priorErr < nopriorErr * .8)
def testShekelGPPrior(self): # see how the GP works on the Shekel function S5 = Shekel5() pX = lhcSample(S5.bounds, 100, seed=8) pY = [S5.f(x) for x in pX] prior = RBFNMeanPrior() prior.train(pX, pY, S5.bounds, k=10, seed=103) hv = .1 hyper = [hv, hv, hv, hv] gkernel = GaussianKernel_ard(hyper) X = lhcSample(S5.bounds, 10, seed=9) Y = [S5.f(x) for x in X] priorGP = GaussianProcess(gkernel, X, Y, prior=prior) nopriorGP = GaussianProcess(gkernel, X, Y, prior=None) S = lhcSample(S5.bounds, 1000, seed=10) nopriorErr = mean([(S5.f(x)-nopriorGP.mu(x))**2 for x in S]) priorErr = mean([(S5.f(x)-priorGP.mu(x))**2 for x in S]) # print '\nno prior Err =', nopriorErr # print 'prior Err =', priorErr self.failUnless(priorErr < nopriorErr*.8)
def testRBFN_1D(self): # sample from a synthetic function and see how much we improve the # error by using the prior function def foo(x): return sum(sin(x*20)) X = lhcSample([[0., 1.]], 50, seed=3) Y = [foo(x) for x in X] prior = RBFNMeanPrior() prior.train(X, Y, [[0., 1.]], k=10, seed=100) # See how well we fit the function by getting the average squared error # over 100 samples of the function. Baseline foo(x)=0 MSE is 0.48. # We will aim for MSE < 0.05. S = arange(0, 1, .01) error = mean([foo(x)-prior.mu(x) for x in S]) self.failUnless(error < 0.05) # for debugging if False: figure(1) plot(S, [foo(x) for x in S], 'b-') plot(S, [prior.mu(x) for x in S], 'k-') show()
def testRNFN_10D(self): # as above, but with a 10D test function and more data def foo(x): return sum(sin(x * 2)) bounds = [[0., 1.]] * 10 X = lhcSample(bounds, 100, seed=4) Y = [foo(x) for x in X] prior = RBFNMeanPrior() prior.train(X, Y, bounds, k=20, seed=5) S = lhcSample(bounds, 100, seed=6) RBNError = mean([(foo(x) - prior.mu(x))**2 for x in S]) baselineError = mean([foo(x)**2 for x in S]) # print '\nRBN err =', RBNError # print 'baseline =', baselineError self.failUnless(RBNError < baselineError)
def testRNFN_10D(self): # as above, but with a 10D test function and more data def foo(x): return sum(sin(x*2)) bounds = [[0., 1.]]*10 X = lhcSample(bounds, 100, seed=4) Y = [foo(x) for x in X] prior = RBFNMeanPrior() prior.train(X, Y, bounds, k=20, seed=5) S = lhcSample(bounds, 100, seed=6) RBNError = mean([(foo(x)-prior.mu(x))**2 for x in S]) baselineError = mean([foo(x)**2 for x in S]) # print '\nRBN err =', RBNError # print 'baseline =', baselineError self.failUnless(RBNError < baselineError)
def testRBFN_1D(self): # sample from a synthetic function and see how much we improve the # error by using the prior function def foo(x): return sum(sin(x * 20)) X = lhcSample([[0., 1.]], 50, seed=3) Y = [foo(x) for x in X] prior = RBFNMeanPrior() prior.train(X, Y, [[0., 1.]], k=10, seed=100) # See how well we fit the function by getting the average squared error # over 100 samples of the function. Baseline foo(x)=0 MSE is 0.48. # We will aim for MSE < 0.05. S = arange(0, 1, .01) error = mean([foo(x) - prior.mu(x) for x in S]) self.failUnless(error < 0.05) # for debugging if False: figure(1) plot(S, [foo(x) for x in S], 'b-') plot(S, [prior.mu(x) for x in S], 'k-') show()
def testGPPrior(self): # see how GP works with the dataprior... def foo(x): return sum(sin(x * 20)) bounds = [[0., 1.]] # train prior pX = lhcSample([[0., 1.]], 100, seed=6) pY = [foo(x) for x in pX] prior = RBFNMeanPrior() prior.train(pX, pY, bounds, k=10, seed=102) X = lhcSample([[0., 1.]], 2, seed=7) Y = [foo(x) for x in X] kernel = GaussianKernel_ard(array([.1])) GP = GaussianProcess(kernel, X, Y, prior=prior) GPnoprior = GaussianProcess(kernel, X, Y) S = arange(0, 1, .01) nopriorErr = mean([(foo(x) - GPnoprior.mu(x))**2 for x in S]) priorErr = mean([(foo(x) - GP.mu(x))**2 for x in S]) # print '\nno prior Err =', nopriorErr # print 'prior Err =', priorErr self.failUnless(priorErr < nopriorErr * .5) if False: figure(1) clf() plot(S, [prior.mu(x) for x in S], 'g-', alpha=0.3) plot(S, [GPnoprior.mu(x) for x in S], 'b-', alpha=0.3) plot(S, [GP.mu(x) for x in S], 'k-', lw=2) plot(X, Y, 'ko') show()
def testGPPrior(self): # see how GP works with the dataprior... def foo(x): return sum(sin(x*20)) bounds = [[0., 1.]] # train prior pX = lhcSample([[0., 1.]], 100, seed=6) pY = [foo(x) for x in pX] prior = RBFNMeanPrior() prior.train(pX, pY, bounds, k=10, seed=102) X = lhcSample([[0., 1.]], 2, seed=7) Y = [foo(x) for x in X] kernel = GaussianKernel_ard(array([.1])) GP = GaussianProcess(kernel, X, Y, prior=prior) GPnoprior = GaussianProcess(kernel, X, Y) S = arange(0, 1, .01) nopriorErr = mean([(foo(x)-GPnoprior.mu(x))**2 for x in S]) priorErr = mean([(foo(x)-GP.mu(x))**2 for x in S]) # print '\nno prior Err =', nopriorErr # print 'prior Err =', priorErr self.failUnless(priorErr < nopriorErr*.5) if False: figure(1) clf() plot(S, [prior.mu(x) for x in S], 'g-', alpha=0.3) plot(S, [GPnoprior.mu(x) for x in S], 'b-', alpha=0.3) plot(S, [GP.mu(x) for x in S], 'k-', lw=2) plot(X, Y, 'ko') show()