示例#1
0
 def testShekelClass(self):
     
     S = Shekel5()
     
     # get 50 latin hypercube samples
     X = lhcSample(S.bounds, 50, seed=2)
     Y = [S.f(x) for x in X]
     
     hyper = [.2, .2, .2, .2]
     noise = 0.1
     
     gkernel = GaussianKernel_ard(hyper)
     # print gkernel.sf2
     GP = GaussianProcess(gkernel, X, Y, noise=noise)
     
     # let's take a look at the trained GP.  first, make sure variance at
     # the samples is determined by noise
     mu, sig2 = GP.posteriors(X)
     for m, s, y in zip(mu, sig2, Y):
         # print m, s
         self.failUnless(s < 1/(1+noise))
         self.failUnless(abs(m-y) < 2*noise)
     
     # now get some test samples and see how well we are fitting the function
     testX = lhcSample(S.bounds, 50, seed=3)
     testY = [S.f(x) for x in X]
     for tx, ty in zip(testX, testY):
         m, s = GP.posterior(tx)
         # prediction should be within one stdev of mean
         self.failUnless(abs(ty-m)/sqrt(s) < 1)
示例#2
0
    def testShekelGPPrior(self):
        
        # see how the GP works on the Shekel function
        S5 = Shekel5()

        pX = lhcSample(S5.bounds, 100, seed=8)
        pY = [S5.f(x) for x in pX]
        prior = RBFNMeanPrior()
        prior.train(pX, pY, S5.bounds, k=10, seed=103)
        
        hv = .1
        hyper = [hv, hv, hv, hv]
        gkernel = GaussianKernel_ard(hyper)
        X = lhcSample(S5.bounds, 10, seed=9)
        Y = [S5.f(x) for x in X]
        priorGP = GaussianProcess(gkernel, X, Y, prior=prior)
        nopriorGP = GaussianProcess(gkernel, X, Y, prior=None)
        
        
        S = lhcSample(S5.bounds, 1000, seed=10)
        nopriorErr = mean([(S5.f(x)-nopriorGP.mu(x))**2 for x in S])
        priorErr = mean([(S5.f(x)-priorGP.mu(x))**2 for x in S])
        
        # print '\nno prior Err =', nopriorErr
        # print 'prior Err =', priorErr
        self.failUnless(priorErr < nopriorErr*.8)
示例#3
0
    def testShekelGPPrior(self):

        # see how the GP works on the Shekel function
        S5 = Shekel5()

        pX = lhcSample(S5.bounds, 100, seed=8)
        pY = [S5.f(x) for x in pX]
        prior = RBFNMeanPrior()
        prior.train(pX, pY, S5.bounds, k=10, seed=103)

        hv = .1
        hyper = [hv, hv, hv, hv]
        gkernel = GaussianKernel_ard(hyper)
        X = lhcSample(S5.bounds, 10, seed=9)
        Y = [S5.f(x) for x in X]
        priorGP = GaussianProcess(gkernel, X, Y, prior=prior)
        nopriorGP = GaussianProcess(gkernel, X, Y, prior=None)

        S = lhcSample(S5.bounds, 1000, seed=10)
        nopriorErr = mean([(S5.f(x) - nopriorGP.mu(x))**2 for x in S])
        priorErr = mean([(S5.f(x) - priorGP.mu(x))**2 for x in S])

        # print '\nno prior Err =', nopriorErr
        # print 'prior Err =', priorErr
        self.failUnless(priorErr < nopriorErr * .8)
示例#4
0
    def testShekelClass(self):

        S = Shekel5()

        # get 50 latin hypercube samples
        X = lhcSample(S.bounds, 50, seed=2)
        Y = [S.f(x) for x in X]

        hyper = [.2, .2, .2, .2]
        noise = 0.1

        gkernel = GaussianKernel_ard(hyper)
        # print gkernel.sf2
        GP = GaussianProcess(gkernel, X, Y, noise=noise)

        # let's take a look at the trained GP.  first, make sure variance at
        # the samples is determined by noise
        mu, sig2 = GP.posteriors(X)
        for m, s, y in zip(mu, sig2, Y):
            # print m, s
            self.failUnless(s < 1 / (1 + noise))
            self.failUnless(abs(m - y) < 2 * noise)

        # now get some test samples and see how well we are fitting the function
        testX = lhcSample(S.bounds, 50, seed=3)
        testY = [S.f(x) for x in X]
        for tx, ty in zip(testX, testY):
            m, s = GP.posterior(tx)
            # prediction should be within one stdev of mean
            self.failUnless(abs(ty - m) / sqrt(s) < 1)
示例#5
0
    def _testTreeAccuracy(self):

        RF1 = RandomForest(ntrees=1, m=4, ndata=2, pRetrain=.2)
        RF10 = RandomForest(ntrees=10, m=4, ndata=2, pRetrain=.2)
        RF100 = RandomForest(ntrees=10, m=4, ndata=2, pRetrain=.2)

        tf = Branin()
        X = lhcSample(tf.bounds, 20, seed=0)
        Y = [tf.f(x) for x in X]

        RF1.addData(X, Y)
        RF10.addData(X, Y)
        RF100.addData(X, Y)

        rmse1 = 0.0
        rmse10 = 0.0
        rmse100 = 0.0

        nsamp = 1000

        for testx in lhcSample(tf.bounds, nsamp, seed=1):
            testy = tf.f(testx)
            rmse1 += (RF1.mu(testx) - testy)**2
            rmse10 += (RF10.mu(testx) - testy)**2
            rmse100 += (RF100.mu(testx) - testy)**2

        # this isn't consistent, since random forests are, you know, random
        print 'RMSE 1 = %.4f' % (rmse1 / nsamp)
        print 'RMSE 10 = %.4f' % (rmse10 / nsamp)
        print 'RMSE 100 = %.4f' % (rmse100 / nsamp)

        self.failUnless(rmse1 > rmse100)
示例#6
0
    def _testTreeAccuracy(self):

        RF1 = RandomForest(ntrees=1, m=4, ndata=2, pRetrain=0.2)
        RF10 = RandomForest(ntrees=10, m=4, ndata=2, pRetrain=0.2)
        RF100 = RandomForest(ntrees=10, m=4, ndata=2, pRetrain=0.2)

        tf = Branin()
        X = lhcSample(tf.bounds, 20, seed=0)
        Y = [tf.f(x) for x in X]

        RF1.addData(X, Y)
        RF10.addData(X, Y)
        RF100.addData(X, Y)

        rmse1 = 0.0
        rmse10 = 0.0
        rmse100 = 0.0

        nsamp = 1000

        for testx in lhcSample(tf.bounds, nsamp, seed=1):
            testy = tf.f(testx)
            rmse1 += (RF1.mu(testx) - testy) ** 2
            rmse10 += (RF10.mu(testx) - testy) ** 2
            rmse100 += (RF100.mu(testx) - testy) ** 2

        # this isn't consistent, since random forests are, you know, random
        print "RMSE 1 = %.4f" % (rmse1 / nsamp)
        print "RMSE 10 = %.4f" % (rmse10 / nsamp)
        print "RMSE 100 = %.4f" % (rmse100 / nsamp)

        self.failUnless(rmse1 > rmse100)
示例#7
0
    def testTraining(self):

        # test that sequential training gives the same result as batch

        tf = Shekel5()
        X = lhcSample(tf.bounds, 25, seed=1)
        Y = [tf.f(x) for x in X]

        # GP1 adds all data during initialization
        GP1 = GaussianProcess(GaussianKernel_iso([.1]), X, Y, noise=.2)

        # GP2 adds data one at a time
        GP2 = GaussianProcess(GaussianKernel_iso([.1]), noise=.2)

        # GP3 uses addData()
        GP3 = GaussianProcess(GaussianKernel_iso([.1]), noise=.2)

        # GP4 adds using various methods
        GP4 = GaussianProcess(GaussianKernel_iso([.1]),
                              X[:10],
                              Y[:10],
                              noise=.2)

        for x, y in zip(X, Y):
            GP2.addData(x, y)

        for i in xrange(0, 25, 5):
            GP3.addData(X[i:i + 5], Y[i:i + 5])

        GP4.addData(X[10], Y[10])
        GP4.addData(X[11:18], Y[11:18])
        for i in xrange(18, 25):
            GP4.addData(X[i], Y[i])

        self.failUnless(all(GP1.R == GP2.R))
        self.failUnless(all(GP1.R == GP3.R))
        self.failUnless(all(GP1.R == GP4.R))

        testX = lhcSample(tf.bounds, 25, seed=2)
        for x in testX:
            mu1, s1 = GP1.posterior(x)
            mu2, s2 = GP2.posterior(x)
            mu3, s3 = GP3.posterior(x)
            mu4, s4 = GP4.posterior(x)
            self.failUnlessEqual(mu1, mu2)
            self.failUnlessEqual(mu1, mu3)
            self.failUnlessEqual(mu1, mu4)
            self.failUnlessEqual(s1, s2)
            self.failUnlessEqual(s1, s3)
            self.failUnlessEqual(s1, s4)
示例#8
0
 def testTraining(self):
     
     # test that sequential training gives the same result as batch
     
     tf = Shekel5()
     X = lhcSample(tf.bounds, 25, seed=1)
     Y = [tf.f(x) for x in X]
     
     # GP1 adds all data during initialization
     GP1 = GaussianProcess(GaussianKernel_iso([.1]), X, Y, noise=.2)
     
     # GP2 adds data one at a time
     GP2 = GaussianProcess(GaussianKernel_iso([.1]), noise=.2)
     
     # GP3 uses addData()
     GP3 = GaussianProcess(GaussianKernel_iso([.1]), noise=.2)
     
     # GP4 adds using various methods
     GP4 = GaussianProcess(GaussianKernel_iso([.1]), X[:10], Y[:10], noise=.2)
     
     for x, y in zip(X, Y):
         GP2.addData(x, y)
         
     for i in xrange(0, 25, 5):
         GP3.addData(X[i:i+5], Y[i:i+5])
     
     GP4.addData(X[10], Y[10])
     GP4.addData(X[11:18], Y[11:18])
     for i in xrange(18, 25):
         GP4.addData(X[i], Y[i])
     
     
     self.failUnless(all(GP1.R==GP2.R))
     self.failUnless(all(GP1.R==GP3.R))
     self.failUnless(all(GP1.R==GP4.R))
     
     testX = lhcSample(tf.bounds, 25, seed=2)
     for x in testX:
         mu1, s1 = GP1.posterior(x)
         mu2, s2 = GP2.posterior(x)
         mu3, s3 = GP3.posterior(x)
         mu4, s4 = GP4.posterior(x)
         self.failUnlessEqual(mu1, mu2)
         self.failUnlessEqual(mu1, mu3)
         self.failUnlessEqual(mu1, mu4)
         self.failUnlessEqual(s1, s2)
         self.failUnlessEqual(s1, s3)
         self.failUnlessEqual(s1, s4)
示例#9
0
    def testRBFN_1D(self):
        
        # sample from a synthetic function and see how much we improve the
        # error by using the prior function
        def foo(x):
            return sum(sin(x*20))
            
        X = lhcSample([[0., 1.]], 50, seed=3)
        Y = [foo(x) for x in X]
        
        prior = RBFNMeanPrior()
        prior.train(X, Y, [[0., 1.]], k=10, seed=100)
        
        # See how well we fit the function by getting the average squared error
        # over 100 samples of the function.  Baseline foo(x)=0 MSE is 0.48.
        # We will aim for MSE < 0.05.
        S = arange(0, 1, .01)
        error = mean([foo(x)-prior.mu(x) for x in S])
        self.failUnless(error < 0.05)

        # for debugging
        if False:
            figure(1)
            plot(S, [foo(x) for x in S], 'b-')
            plot(S, [prior.mu(x) for x in S], 'k-')
            show()
示例#10
0
    def test1DGP(self):
        f = lambda x: float(sin(x * 5.))
        X = lhcSample([[0., 1.]], 5, seed=25)
        Y = [f(x) for x in X]

        kernel = GaussianKernel_ard(array([1.0, 1.0]))
        GP = GaussianProcess(kernel, X=X, Y=Y)
示例#11
0
    def testRBFN_1D(self):

        # sample from a synthetic function and see how much we improve the
        # error by using the prior function
        def foo(x):
            return sum(sin(x * 20))

        X = lhcSample([[0., 1.]], 50, seed=3)
        Y = [foo(x) for x in X]

        prior = RBFNMeanPrior()
        prior.train(X, Y, [[0., 1.]], k=10, seed=100)

        # See how well we fit the function by getting the average squared error
        # over 100 samples of the function.  Baseline foo(x)=0 MSE is 0.48.
        # We will aim for MSE < 0.05.
        S = arange(0, 1, .01)
        error = mean([foo(x) - prior.mu(x) for x in S])
        self.failUnless(error < 0.05)

        # for debugging
        if False:
            figure(1)
            plot(S, [foo(x) for x in S], 'b-')
            plot(S, [prior.mu(x) for x in S], 'k-')
            show()
示例#12
0
 def test1DGP(self):
     
     f = lambda x: float(sin(x*5.))
     X = lhcSample([[0., 1.]], 5, seed=25)
     Y = [f(x) for x in X]
     
     kernel = GaussianKernel_ard(array([1.0, 1.0]))
     GP = GaussianProcess(kernel, X=X, Y=Y)
示例#13
0
 def testRNFN_10D(self):
     
     # as above, but with a 10D test function and more data
     def foo(x):
         return sum(sin(x*2))
         
     bounds = [[0., 1.]]*10
     X = lhcSample(bounds, 100, seed=4)
     Y = [foo(x) for x in X]
     
     prior = RBFNMeanPrior()
     prior.train(X, Y, bounds, k=20, seed=5)
     
     S = lhcSample(bounds, 100, seed=6)
     RBNError = mean([(foo(x)-prior.mu(x))**2 for x in S])
     baselineError = mean([foo(x)**2 for x in S])
     
     # print '\nRBN err  =', RBNError
     # print 'baseline =', baselineError
     self.failUnless(RBNError < baselineError)
示例#14
0
    def testRNFN_10D(self):

        # as above, but with a 10D test function and more data
        def foo(x):
            return sum(sin(x * 2))

        bounds = [[0., 1.]] * 10
        X = lhcSample(bounds, 100, seed=4)
        Y = [foo(x) for x in X]

        prior = RBFNMeanPrior()
        prior.train(X, Y, bounds, k=20, seed=5)

        S = lhcSample(bounds, 100, seed=6)
        RBNError = mean([(foo(x) - prior.mu(x))**2 for x in S])
        baselineError = mean([foo(x)**2 for x in S])

        # print '\nRBN err  =', RBNError
        # print 'baseline =', baselineError
        self.failUnless(RBNError < baselineError)
示例#15
0
    def testGPPrior(self):
        
        # see how GP works with the dataprior...
        def foo(x):
            return sum(sin(x*20))
        
        bounds = [[0., 1.]]
        # train prior
        pX = lhcSample([[0., 1.]], 100, seed=6)
        pY = [foo(x) for x in pX]
        prior = RBFNMeanPrior()
        prior.train(pX, pY, bounds, k=10, seed=102)
        
        X = lhcSample([[0., 1.]], 2, seed=7)
        Y = [foo(x) for x in X]
        
        kernel = GaussianKernel_ard(array([.1]))
        GP = GaussianProcess(kernel, X, Y, prior=prior)
        GPnoprior = GaussianProcess(kernel, X, Y)

        S = arange(0, 1, .01)

        nopriorErr = mean([(foo(x)-GPnoprior.mu(x))**2 for x in S])
        priorErr = mean([(foo(x)-GP.mu(x))**2 for x in S])
        
        # print '\nno prior Err =', nopriorErr
        # print 'prior Err =', priorErr
        
        self.failUnless(priorErr < nopriorErr*.5)
        
        if False:
            figure(1)
            clf()
            plot(S, [prior.mu(x) for x in S], 'g-', alpha=0.3)
            plot(S, [GPnoprior.mu(x) for x in S], 'b-', alpha=0.3)
            plot(S, [GP.mu(x) for x in S], 'k-', lw=2)
            plot(X, Y, 'ko')
            show()
示例#16
0
    def testGPPrior(self):

        # see how GP works with the dataprior...
        def foo(x):
            return sum(sin(x * 20))

        bounds = [[0., 1.]]
        # train prior
        pX = lhcSample([[0., 1.]], 100, seed=6)
        pY = [foo(x) for x in pX]
        prior = RBFNMeanPrior()
        prior.train(pX, pY, bounds, k=10, seed=102)

        X = lhcSample([[0., 1.]], 2, seed=7)
        Y = [foo(x) for x in X]

        kernel = GaussianKernel_ard(array([.1]))
        GP = GaussianProcess(kernel, X, Y, prior=prior)
        GPnoprior = GaussianProcess(kernel, X, Y)

        S = arange(0, 1, .01)

        nopriorErr = mean([(foo(x) - GPnoprior.mu(x))**2 for x in S])
        priorErr = mean([(foo(x) - GP.mu(x))**2 for x in S])

        # print '\nno prior Err =', nopriorErr
        # print 'prior Err =', priorErr

        self.failUnless(priorErr < nopriorErr * .5)

        if False:
            figure(1)
            clf()
            plot(S, [prior.mu(x) for x in S], 'g-', alpha=0.3)
            plot(S, [GPnoprior.mu(x) for x in S], 'b-', alpha=0.3)
            plot(S, [GP.mu(x) for x in S], 'k-', lw=2)
            plot(X, Y, 'ko')
            show()
示例#17
0
    def testOneTree(self):

        forest = RandomForest(ntrees=1, m=4, ndata=2, pRetrain=.2)
        self.failUnlessEqual(forest.ntrees, 1)
        self.failUnlessEqual(forest.m, 4)
        self.failUnlessEqual(forest.ndata, 2)
        self.failUnlessEqual(forest.pRetrain, .2)

        tf = Branin()
        X = lhcSample(tf.bounds, 20, seed=0)
        Y = [tf.f(x) for x in X]

        forest.addData(X, Y)
        self.failUnlessEqual(len(forest.forest), 1)
        # checkTree(forest.forest[0])

        # maximizeEI(forest, tf.bounds)
        # print forest.forest[0]

        if False:
            figure(1, figsize=(5, 10))
            c0 = [(i / 100.) * (tf.bounds[0][1] - tf.bounds[0][0]) +
                  tf.bounds[0][0] for i in xrange(101)]
            c1 = [(i / 100.) * (tf.bounds[1][1] - tf.bounds[1][0]) +
                  tf.bounds[1][0] for i in xrange(101)]

            ax = subplot(121)
            mu = array([[forest.mu(array([i, j])) for i in c0] for j in c1])
            cs = ax.contourf(c0, c1, mu, 50)
            colorbar(cs)
            ax.plot([x[0] for x in X], [x[1] for x in X], 'ro', alpha=.2)
            ax.set_xbound(tf.bounds[0][0], tf.bounds[0][1])
            ax.set_ybound(tf.bounds[1][0], tf.bounds[1][1])
            ax.set_title(r'$\mu$')

            ax = subplot(122)
            mu = array([[forest.sigma2(array([i, j])) for i in c0]
                        for j in c1])
            cs = ax.contourf(c0, c1, mu, 50)
            colorbar(cs)
            ax.plot([x[0] for x in X], [x[1] for x in X], 'ro', alpha=.2)
            ax.set_xbound(tf.bounds[0][0], tf.bounds[0][1])
            ax.set_ybound(tf.bounds[1][0], tf.bounds[1][1])
            ax.set_title(r'$\sigma^2$')

            show()
示例#18
0
    def testOneTree(self):

        forest = RandomForest(ntrees=1, m=4, ndata=2, pRetrain=0.2)
        self.failUnlessEqual(forest.ntrees, 1)
        self.failUnlessEqual(forest.m, 4)
        self.failUnlessEqual(forest.ndata, 2)
        self.failUnlessEqual(forest.pRetrain, 0.2)

        tf = Branin()
        X = lhcSample(tf.bounds, 20, seed=0)
        Y = [tf.f(x) for x in X]

        forest.addData(X, Y)
        self.failUnlessEqual(len(forest.forest), 1)
        # checkTree(forest.forest[0])

        # maximizeEI(forest, tf.bounds)
        # print forest.forest[0]

        if False:
            figure(1, figsize=(5, 10))
            c0 = [(i / 100.0) * (tf.bounds[0][1] - tf.bounds[0][0]) + tf.bounds[0][0] for i in xrange(101)]
            c1 = [(i / 100.0) * (tf.bounds[1][1] - tf.bounds[1][0]) + tf.bounds[1][0] for i in xrange(101)]

            ax = subplot(121)
            mu = array([[forest.mu(array([i, j])) for i in c0] for j in c1])
            cs = ax.contourf(c0, c1, mu, 50)
            colorbar(cs)
            ax.plot([x[0] for x in X], [x[1] for x in X], "ro", alpha=0.2)
            ax.set_xbound(tf.bounds[0][0], tf.bounds[0][1])
            ax.set_ybound(tf.bounds[1][0], tf.bounds[1][1])
            ax.set_title(r"$\mu$")

            ax = subplot(122)
            mu = array([[forest.sigma2(array([i, j])) for i in c0] for j in c1])
            cs = ax.contourf(c0, c1, mu, 50)
            colorbar(cs)
            ax.plot([x[0] for x in X], [x[1] for x in X], "ro", alpha=0.2)
            ax.set_xbound(tf.bounds[0][0], tf.bounds[0][1])
            ax.set_ybound(tf.bounds[1][0], tf.bounds[1][1])
            ax.set_title(r"$\sigma^2$")

            show()
示例#19
0
    def testForestUCB(self):

        RF = RandomForest(ntrees=2)
        tf = Branin()
        X = lhcSample(tf.bounds, 20, seed=0)
        Y = [tf.f(x) for x in X]
        RF.addData(X, Y)

        ucbf = UCB(RF, len(tf.bounds))
        dopt, doptx = direct(ucbf.negf, tf.bounds, maxiter=10)
        copt, coptx = cdirect(ucbf.negf, tf.bounds, maxiter=10)
        mopt, moptx = maximizeUCB(RF, tf.bounds, maxiter=10)

        self.failUnlessAlmostEqual(dopt, copt, 4)
        self.failUnlessAlmostEqual(-dopt, mopt, 4)
        self.failUnlessAlmostEqual(-copt, mopt, 4)

        self.failUnless(sum(abs(doptx - coptx)) < 0.01)
        self.failUnless(sum(abs(moptx - coptx)) < 0.01)
        self.failUnless(sum(abs(moptx - doptx)) < 0.01)
示例#20
0
    def testForestUCB(self):

        RF = RandomForest(ntrees=2)
        tf = Branin()
        X = lhcSample(tf.bounds, 20, seed=0)
        Y = [tf.f(x) for x in X]
        RF.addData(X, Y)

        ucbf = UCB(RF, len(tf.bounds))
        dopt, doptx = direct(ucbf.negf, tf.bounds, maxiter=10)
        copt, coptx = cdirect(ucbf.negf, tf.bounds, maxiter=10)
        mopt, moptx = maximizeUCB(RF, tf.bounds, maxiter=10)

        self.failUnlessAlmostEqual(dopt, copt, 4)
        self.failUnlessAlmostEqual(-dopt, mopt, 4)
        self.failUnlessAlmostEqual(-copt, mopt, 4)

        self.failUnless(sum(abs(doptx - coptx)) < .01)
        self.failUnless(sum(abs(moptx - coptx)) < .01)
        self.failUnless(sum(abs(moptx - doptx)) < .01)
示例#21
0
    def testForestPI(self):

        RF = RandomForest(ntrees=2)
        tf = Branin()
        X = lhcSample(tf.bounds, 20, seed=0)
        Y = [tf.f(x) for x in X]
        RF.addData(X, Y)
        mu, sigma = RF.posterior(ones(len(tf.bounds)) * 0.4)
        print "[python] = 0.4 x 2, mu =", mu, "  sigma =", sigma

        pif1 = PI(RF)
        dopt1, doptx1 = direct(pif1.negf, tf.bounds, maxiter=10)
        copt1, coptx1 = cdirect(pif1.negf, tf.bounds, maxiter=10)
        mopt1, moptx1 = maximizePI(RF, tf.bounds, maxiter=10)

        self.failUnlessAlmostEqual(dopt1, copt1, 4)
        self.failUnlessAlmostEqual(-dopt1, mopt1, 4)
        self.failUnlessAlmostEqual(-copt1, mopt1, 4)

        self.failUnless(sum(abs(doptx1 - coptx1)) < 0.01)
        self.failUnless(sum(abs(moptx1 - coptx1)) < 0.01)
        self.failUnless(sum(abs(moptx1 - doptx1)) < 0.01)

        pif2 = PI(RF, xi=0.5)
        dopt2, doptx2 = direct(pif2.negf, tf.bounds, maxiter=10)
        copt2, coptx2 = cdirect(pif2.negf, tf.bounds, maxiter=10)
        mopt2, moptx2 = maximizePI(RF, tf.bounds, xi=0.5, maxiter=10)

        self.failUnlessAlmostEqual(dopt2, copt2, 4)
        self.failUnlessAlmostEqual(-dopt2, mopt2, 4)
        self.failUnlessAlmostEqual(-copt2, mopt2, 4)

        self.failUnless(sum(abs(doptx2 - coptx2)) < 0.01)
        self.failUnless(sum(abs(moptx2 - coptx2)) < 0.01)
        self.failUnless(sum(abs(moptx2 - doptx2)) < 0.01)

        self.failIfAlmostEqual(dopt1, dopt2, 4)
        self.failIfAlmostEqual(copt1, copt2, 4)
        self.failIfAlmostEqual(mopt1, mopt2, 4)
示例#22
0
    def testForestPI(self):

        RF = RandomForest(ntrees=2)
        tf = Branin()
        X = lhcSample(tf.bounds, 20, seed=0)
        Y = [tf.f(x) for x in X]
        RF.addData(X, Y)
        mu, sigma = RF.posterior(ones(len(tf.bounds)) * .4)
        print '[python] = 0.4 x 2, mu =', mu, '  sigma =', sigma

        pif1 = PI(RF)
        dopt1, doptx1 = direct(pif1.negf, tf.bounds, maxiter=10)
        copt1, coptx1 = cdirect(pif1.negf, tf.bounds, maxiter=10)
        mopt1, moptx1 = maximizePI(RF, tf.bounds, maxiter=10)

        self.failUnlessAlmostEqual(dopt1, copt1, 4)
        self.failUnlessAlmostEqual(-dopt1, mopt1, 4)
        self.failUnlessAlmostEqual(-copt1, mopt1, 4)

        self.failUnless(sum(abs(doptx1 - coptx1)) < .01)
        self.failUnless(sum(abs(moptx1 - coptx1)) < .01)
        self.failUnless(sum(abs(moptx1 - doptx1)) < .01)

        pif2 = PI(RF, xi=0.5)
        dopt2, doptx2 = direct(pif2.negf, tf.bounds, maxiter=10)
        copt2, coptx2 = cdirect(pif2.negf, tf.bounds, maxiter=10)
        mopt2, moptx2 = maximizePI(RF, tf.bounds, xi=0.5, maxiter=10)

        self.failUnlessAlmostEqual(dopt2, copt2, 4)
        self.failUnlessAlmostEqual(-dopt2, mopt2, 4)
        self.failUnlessAlmostEqual(-copt2, mopt2, 4)

        self.failUnless(sum(abs(doptx2 - coptx2)) < .01)
        self.failUnless(sum(abs(moptx2 - coptx2)) < .01)
        self.failUnless(sum(abs(moptx2 - doptx2)) < .01)

        self.failIfAlmostEqual(dopt1, dopt2, 4)
        self.failIfAlmostEqual(copt1, copt2, 4)
        self.failIfAlmostEqual(mopt1, mopt2, 4)