Beispiel #1
0
    def test2DpyEI(self):
        
        f = lambda x: sum(sin(x))
        bounds = [[0., 5.], [0., 5.]]
        X = lhcSample(bounds, 5, seed=24)
        Y = [f(x) for x in X]

        kernel = GaussianKernel_ard(array([1.0, 1.0]))
        GP = GaussianProcess(kernel, X, Y)

        maxei = maximizeEI(GP, bounds)
        
        if False:
            figure(1)
            c0 = [(i/50.)*(bounds[0][1]-bounds[0][0])+bounds[0][0] for i in xrange(51)]
            c1 = [(i/50.)*(bounds[1][1]-bounds[1][0])+bounds[1][0] for i in xrange(51)]
            z = array([[GP.ei(array([i, j])) for i in c0] for j in c1])

            ax = plt.subplot(111)
            cs = ax.contour(c0, c1, z, 10, alpha=0.5, cmap=cm.Blues_r)
            plot([x[0] for x in X], [x[1] for x in X], 'ro')
            for i in xrange(len(X)):
                annotate('%2f'%Y[i], X[i])
            plot(maxei[1][0], maxei[1][1], 'ko')
            show()
Beispiel #2
0
    def test2DpyEI(self):

        f = lambda x: sum(sin(x))
        bounds = [[0., 5.], [0., 5.]]
        X = lhcSample(bounds, 5, seed=24)
        Y = [f(x) for x in X]

        kernel = GaussianKernel_ard(array([1.0, 1.0]))
        GP = GaussianProcess(kernel, X, Y)

        maxei = maximizeEI(GP, bounds)

        if False:
            figure(1)
            c0 = [(i / 50.) * (bounds[0][1] - bounds[0][0]) + bounds[0][0]
                  for i in range(51)]
            c1 = [(i / 50.) * (bounds[1][1] - bounds[1][0]) + bounds[1][0]
                  for i in range(51)]
            z = array([[GP.ei(array([i, j])) for i in c0] for j in c1])

            ax = plt.subplot(111)
            cs = ax.contour(c0, c1, z, 10, alpha=0.5, cmap=cm.Blues_r)
            plot([x[0] for x in X], [x[1] for x in X], 'ro')
            for i in range(len(X)):
                annotate('%2f' % Y[i], X[i])
            plot(maxei[1][0], maxei[1][1], 'ko')
            show()
Beispiel #3
0
def test():

    GP = GaussianProcess(GaussianKernel_iso([0.2, 1.0]))
    X = array([[0.2], [0.3], [0.5], [1.5]])
    Y = [1, 0, 1, 0.75]
    GP.addData(X, Y)

    figure(1)
    A = arange(0, 2, 0.01)
    mu = array([GP.mu(x) for x in A])
    sig2 = array([GP.posterior(x)[1] for x in A])

    Ei = EI(GP)
    ei = [-Ei.negf(x) for x in A]

    Pi = PI(GP)
    pi = [-Pi.negf(x) for x in A]

    Ucb = UCB(GP, 1, T=2)
    ucb = [-Ucb.negf(x) for x in A]

    ax = subplot(1, 1, 1)
    ax.plot(A, mu, "k-", lw=2)
    xv, yv = poly_between(A, mu - sig2, mu + sig2)
    ax.fill(xv, yv, color="#CCCCCC")

    ax.plot(A, ei, "g-", lw=2, label="EI")
    ax.plot(A, ucb, "g--", lw=2, label="UCB")
    ax.plot(A, pi, "g:", lw=2, label="PI")
    ax.plot(X, Y, "ro")
    ax.legend()
    draw()
    show()
Beispiel #4
0
    def _testKernelMaxEI(self):
        
        # test different methods of optimizing kernel
        S5 = Shekel5()
        
        hv = 0.1
        testkernels = [GaussianKernel_iso([hv]), 
                   GaussianKernel_ard([hv, hv, hv, hv]),
                   MaternKernel3([hv, 1.0])]
                   # MaternKernel5([hv, 1.0])]

        for kernel in testkernels:
            # print
            # print kernel.__class__
            
        
            # train GPs
            X = lhcSample(S5.bounds, 10, seed=0)
            Y = [S5.f(x) for x in X]
        
            GP = GaussianProcess(kernel, X, Y)
        
            eif = EI(GP)
            dopt, doptx = direct(eif.negf, S5.bounds, maxiter=10)
            copt, coptx = cdirect(eif.negf, S5.bounds, maxiter=10)
            mopt, moptx = maximizeEI(GP, S5.bounds, maxiter=10)
            # print dopt, doptx
            # print copt, coptx
            # print mopt, moptx
        
            self.failUnlessAlmostEqual(dopt, copt, 4)
            self.failUnlessAlmostEqual(-dopt, mopt, 4)
            self.failUnlessAlmostEqual(-copt, mopt, 4)
        
            self.failUnless(sum(abs(doptx-coptx)) < .01)
            self.failUnless(sum(abs(moptx-coptx)) < .01)
            self.failUnless(sum(abs(moptx-doptx)) < .01)
        
            # train GP w/prior
            pX = lhcSample(S5.bounds, 100, seed=101)
            pY = [S5.f(x) for x in pX]
            prior = RBFNMeanPrior()
            prior.train(pX, pY, bounds=S5.bounds, k=10, seed=102)
        
            GP = GaussianProcess(kernel, X, Y, prior=prior)        
        
            eif = EI(GP)
            pdopt, pdoptx = direct(eif.negf, S5.bounds, maxiter=10)
            pcopt, pcoptx = cdirect(eif.negf, S5.bounds, maxiter=10)
            pmopt, pmoptx = maximizeEI(GP, S5.bounds, maxiter=10)
        
            self.failIfAlmostEqual(pdopt, dopt, 3)
            self.failUnlessAlmostEqual(pdopt, pcopt, 4)
            self.failUnlessAlmostEqual(-pdopt, pmopt, 4)
            self.failUnlessAlmostEqual(-pcopt, pmopt, 4)
        
            self.failUnless(sum(abs(pdoptx-pcoptx)) < .01)
            self.failUnless(sum(abs(pmoptx-pcoptx)) < .01)
            self.failUnless(sum(abs(pmoptx-pdoptx)) < .01)
Beispiel #5
0
 def testGDelta(self):
     
     # usually, Gdelta==G
     GP = GaussianProcess(GaussianKernel_iso([0.05]))
     X = lhcSample([[0., 1.]], 5, seed=10)
     Y = [x**2 for x in X]
     GP.train(X, Y)
     
     G = (Y[0]-max(Y)) / (Y[0]-1)
     self.failUnlessEqual(G, Gdelta(GP, [[0.,1.]], Y[0], 1.0, 0.01))
     
     # sometimes, though, Gdelta > G -- this GP has a very high confidence
     # prediction of a very good point at x ~ .65
     GP = GaussianProcess(GaussianKernel_iso([0.1]))
     X = array([[.5], [.51], [.59], [.6]])
     Y = array([1., 2., 2., 1.])
     GP.train(X, Y)
     # figure(1)
     # A = arange(0, 1, 0.01)
     # post = [GP.posterior(x) for x in A]
     # plot(A, [p[0] for p in post], 'k-')
     # plot(A, [p[0]+p[1] for p in post], 'k:')
     # show()
     G = (Y[0]-max(Y)) / (Y[0]-4.0)
     Gd = Gdelta(GP, [[0., 1.]], Y[0], 4.0, 0.01)
     self.failUnless(G < Gd)
     
     # however, if there is more variance, we will collapse back to G
     GP = GaussianProcess(GaussianKernel_iso([.001]))
     GP.train(X, Y)
     G = (Y[0]-max(Y)) / (Y[0]-4.0)
     self.failUnlessEqual(G, Gdelta(GP, [[0., 1.]], Y[0], 4.0, 0.01))
Beispiel #6
0
class Synthetic(TestFunction):
    """
    randomly-generated synthetic function
    """
    def __init__(self, kernel, bounds, NX, noise=0.05, xstar=None, **kwargs):
        super(Synthetic, self).__init__("Synthetic", 0, None, bounds, **kwargs)
        
        self.name += ' %d'%len(bounds)
        
        self.GP = GaussianProcess(kernel)
        X = lhcSample(bounds, NX)
        self.GP.addData([X[0]], [normal(0, 1)])
        if xstar is not None:
            ystar = min(self.GP.Y[0]-1.0, -2.0)
            self.GP.addData(xstar, ystar)
        for x in X[1:]:
            mu, sig2 = self.GP.posterior(x)
            y = normal(mu, sqrt(sig2)) + normal(0, noise)
            # preserve min if necessary
            if xstar is not None and y < ystar+.5:
                y = ystar+.5
            self.GP.addData(x, y)
            
        # now, try minimizing with BFGS
        start = self.GP.X[argmin(self.GP.Y)]
        xopt = fmin_bfgs(self.GP.mu, start, disp=False)
        
        print "\t[synthetic] optimization started at %s, ended at %s" % (start, xopt)
        
        if xstar is not None:
            print '\t[synthetic] realigning minimum'
            # now, align minimum with what we specified
            for i, (target, origin) in enumerate(zip(xstar, xopt)):
                self.GP.X[:,i] += target-origin
            xopt = xstar
            
        
        self.minimum = self.GP.mu(xopt)
        self.xstar = xopt
        
        # print self.GP.X
        # print self.GP.Y
        print '\t[synthetic] x+ = %s, f(x+) = %.3f' % (self.xstar, self.f(self.xstar))
            
            
    def f(self, x):
        
        y = self.GP.mu(x)
        if y < self.minimum:
            self.minimum = y
            
        if self.maximize:
            return -y
        else:
            return y
Beispiel #7
0
    def testGPPrior(self):

        # see how GP works with the dataprior...
        def foo(x):
            return sum(sin(x * 20))

        bounds = [[0., 1.]]
        # train prior
        pX = lhcSample([[0., 1.]], 100, seed=6)
        pY = [foo(x) for x in pX]
        prior = RBFNMeanPrior()
        prior.train(pX, pY, bounds, k=10, seed=102)

        X = lhcSample([[0., 1.]], 2, seed=7)
        Y = [foo(x) for x in X]

        kernel = GaussianKernel_ard(array([.1]))
        GP = GaussianProcess(kernel, X, Y, prior=prior)
        GPnoprior = GaussianProcess(kernel, X, Y)

        S = arange(0, 1, .01)

        nopriorErr = mean([(foo(x) - GPnoprior.mu(x))**2 for x in S])
        priorErr = mean([(foo(x) - GP.mu(x))**2 for x in S])

        self.failUnless(priorErr < nopriorErr * .5)

        if False:
            figure(1)
            clf()
            plot(S, [prior.mu(x) for x in S], 'g-', alpha=0.3)
            plot(S, [GPnoprior.mu(x) for x in S], 'b-', alpha=0.3)
            plot(S, [GP.mu(x) for x in S], 'k-', lw=2)
            plot(X, Y, 'ko')
            show()
Beispiel #8
0
    def testNoise(self):

        tf = Branin()

        X = lhcSample(tf.bounds, 10, seed=0)
        Y = [tf.f(x) for x in X]
        GP1 = GaussianProcess(MaternKernel3([1.0, 1.0]), X, Y, noise=1e-4)
        self.failUnlessEqual(GP1.noise, 1e-4)

        eif1 = EI(GP1)
        dopt1, _ = direct(eif1.negf, tf.bounds, maxiter=10)
        copt1, _ = cdirect(eif1.negf, tf.bounds, maxiter=10)
        mopt1, _ = maximizeEI(GP1, tf.bounds, maxiter=10)

        self.failUnlessAlmostEqual(dopt1, copt1, 4)
        self.failUnlessAlmostEqual(-dopt1, mopt1, 4)
        self.failUnlessAlmostEqual(-copt1, mopt1, 4)

        GP2 = GaussianProcess(MaternKernel3([1.0, 1.0]), X, Y, noise=0.01)
        self.failUnlessEqual(GP2.noise, 0.01)

        eif2 = EI(GP2)
        dopt2, _ = direct(eif2.negf, tf.bounds, maxiter=10)
        copt2, _ = cdirect(eif2.negf, tf.bounds, maxiter=10)
        mopt2, _ = maximizeEI(GP2, tf.bounds, maxiter=10)
        self.failUnlessAlmostEqual(dopt2, copt2, 4)
        self.failUnlessAlmostEqual(-dopt2, mopt2, 4)
        self.failUnlessAlmostEqual(-copt2, mopt2, 4)

        self.failIfAlmostEqual(dopt1, dopt2, 4)
        self.failIfAlmostEqual(copt1, copt2, 4)
        self.failIfAlmostEqual(mopt1, mopt2, 4)

        GP3 = GaussianProcess(MaternKernel3([1.0, 1.0]), X, Y, noise=0.1)
        self.failUnlessEqual(GP3.noise, 0.1)
        eif3 = EI(GP3)
        dopt3, _ = direct(eif3.negf, tf.bounds, maxiter=10)
        copt3, _ = cdirect(eif3.negf, tf.bounds, maxiter=10)
        mopt3, _ = maximizeEI(GP3, tf.bounds, maxiter=10)
        self.failUnlessAlmostEqual(dopt3, copt3, 4)
        self.failUnlessAlmostEqual(-dopt3, mopt3, 4)
        self.failUnlessAlmostEqual(-copt3, mopt3, 4)

        self.failIfAlmostEqual(dopt1, dopt3, 4)
        self.failIfAlmostEqual(copt1, copt3, 4)
        self.failIfAlmostEqual(mopt1, mopt3, 4)
        self.failIfAlmostEqual(dopt2, dopt3, 4)
        self.failIfAlmostEqual(copt2, copt3, 4)
        self.failIfAlmostEqual(mopt2, mopt3, 4)
Beispiel #9
0
    def testXi(self):
        
        S5 = Shekel5()
        
        GP1 = GaussianProcess(GaussianKernel_iso([.2]))
        # self.failUnlessEqual(GP1.xi, 0.0)
        X = lhcSample(S5.bounds, 10, seed=0)
        Y = [S5.f(x) for x in X]
        GP1.addData(X, Y)

        eif1 = EI(GP1, xi=0.0)
        dopt1, _ = direct(eif1.negf, S5.bounds, maxiter=10)
        copt1, _ = cdirect(eif1.negf, S5.bounds, maxiter=10)
        mopt1, _ = maximizeEI(GP1, S5.bounds, xi=0.0, maxiter=10)

        self.failUnlessAlmostEqual(dopt1, copt1, 4)
        self.failUnlessAlmostEqual(-dopt1, mopt1, 4)
        self.failUnlessAlmostEqual(-copt1, mopt1, 4)

        GP2 = GaussianProcess(GaussianKernel_iso([.3]), X, Y)
        eif2 = EI(GP2, xi=0.01)    
        self.failUnlessEqual(eif2.xi, 0.01)    
        dopt2, _ = direct(eif2.negf, S5.bounds, maxiter=10)
        copt2, _ = cdirect(eif2.negf, S5.bounds, maxiter=10)
        mopt2, _ = maximizeEI(GP2, S5.bounds, xi=0.01, maxiter=10)
        self.failUnlessAlmostEqual(dopt2, copt2, 4)
        self.failUnlessAlmostEqual(-dopt2, mopt2, 4)
        self.failUnlessAlmostEqual(-copt2, mopt2, 4)

        self.failIfAlmostEqual(dopt1, dopt2, 4)
        self.failIfAlmostEqual(copt1, copt2, 4)
        self.failIfAlmostEqual(mopt1, mopt2, 4)

        GP3 = GaussianProcess(GaussianKernel_iso([.3]), X, Y)
        eif3 = EI(GP3, xi=0.1)    
        dopt3, _ = direct(eif3.negf, S5.bounds, maxiter=10)
        copt3, _ = cdirect(eif3.negf, S5.bounds, maxiter=10)
        mopt3, _ = maximizeEI(GP3, S5.bounds, xi=0.1, maxiter=10)
        self.failUnlessAlmostEqual(dopt3, copt3, 4)
        self.failUnlessAlmostEqual(-dopt3, mopt3, 4)
        self.failUnlessAlmostEqual(-copt3, mopt3, 4)

        self.failIfAlmostEqual(dopt1, dopt3, 4)
        self.failIfAlmostEqual(copt1, copt3, 4)
        self.failIfAlmostEqual(mopt1, mopt3, 4)
        self.failIfAlmostEqual(dopt2, dopt3, 4)
        self.failIfAlmostEqual(copt2, copt3, 4)
        self.failIfAlmostEqual(mopt2, mopt3, 4)
Beispiel #10
0
    def testXi(self):
        
        S5 = Shekel5()
        
        GP1 = GaussianProcess(GaussianKernel_iso([.2]))
        # self.failUnlessEqual(GP1.xi, 0.0)
        X = lhcSample(S5.bounds, 10, seed=0)
        Y = [S5.f(x) for x in X]
        GP1.addData(X, Y)

        ucbf1 = UCB(GP1, len(S5.bounds), scale=0.5)
        dopt1, _ = direct(ucbf1.negf, S5.bounds, maxiter=10)
        copt1, _ = cdirect(ucbf1.negf, S5.bounds, maxiter=10)
        mopt1, _ = maximizeUCB(GP1, S5.bounds, scale=0.5, maxiter=10)

        self.failUnlessAlmostEqual(dopt1, copt1, 4)
        self.failUnlessAlmostEqual(-dopt1, mopt1, 4)
        self.failUnlessAlmostEqual(-copt1, mopt1, 4)

        GP2 = GaussianProcess(GaussianKernel_iso([.3]), X, Y)
        ucbf2 = UCB(GP2, len(S5.bounds), scale=0.01)    
        dopt2, _ = direct(ucbf2.negf, S5.bounds, maxiter=10)
        copt2, _ = cdirect(ucbf2.negf, S5.bounds, maxiter=10)
        mopt2, _ = maximizeUCB(GP2, S5.bounds, scale=.01, maxiter=10)
        self.failUnlessAlmostEqual(dopt2, copt2, 4)
        self.failUnlessAlmostEqual(-dopt2, mopt2, 4)
        self.failUnlessAlmostEqual(-copt2, mopt2, 4)

        self.failIfAlmostEqual(dopt1, dopt2, 4)
        self.failIfAlmostEqual(copt1, copt2, 4)
        self.failIfAlmostEqual(mopt1, mopt2, 4)

        GP3 = GaussianProcess(GaussianKernel_iso([.3]), X, Y)
        ucbf3 = UCB(GP3, len(S5.bounds), scale=.9)    
        dopt3, _ = direct(ucbf3.negf, S5.bounds, maxiter=10)
        copt3, _ = cdirect(ucbf3.negf, S5.bounds, maxiter=10)
        mopt3, _ = maximizeUCB(GP3, S5.bounds, scale=0.9, maxiter=10)
        self.failUnlessAlmostEqual(dopt3, copt3, 4)
        self.failUnlessAlmostEqual(-dopt3, mopt3, 4)
        self.failUnlessAlmostEqual(-copt3, mopt3, 4)

        self.failIfAlmostEqual(dopt1, dopt3, 4)
        self.failIfAlmostEqual(copt1, copt3, 4)
        self.failIfAlmostEqual(mopt1, mopt3, 4)
        self.failIfAlmostEqual(dopt2, dopt3, 4)
        self.failIfAlmostEqual(copt2, copt3, 4)
        self.failIfAlmostEqual(mopt2, mopt3, 4)
Beispiel #11
0
    def test1DcEI(self):
        
        f = lambda x: float(sin(x*5.))
        X = lhcSample([[0., 1.]], 5, seed=22)
        Y = [f(x) for x in X]

        kernel = GaussianKernel_ard(array([1.0]))
        GP = GaussianProcess(kernel)
        GP.addData(X, Y)
        
        # should use optimizeGP.cpp
        maxei = maximizeEI(GP, [[0., 1.]])
        
        if False:
            figure(1)
            plot(X, Y, 'ro')
            plot([x/100 for x in xrange(100)], [GP.ei(x/100) for x in xrange(100)])
            plot(maxei[1][0], maxei[0], 'ko')
            show()
Beispiel #12
0
    def load_results(self, *args):
        super(IM_EGO_Test, self).load_results(*args)

        from ego.gaussianprocess import GaussianProcess
        from ego.gaussianprocess.kernel import GaussianKernel_ard

        # Build the gp
        kernel = GaussianKernel_ard(self.kernel_hyperparms)
        self.gp = GaussianProcess(kernel, noise=self.gp_noise)

        for x, y in zip(self.gp_X, self.gp_Y):
            self.gp.addData(x, y)
Beispiel #13
0
    def testShekelGPPrior(self):
        
        # see how the GP works on the Shekel function
        S5 = Shekel5()

        pX = lhcSample(S5.bounds, 100, seed=8)
        pY = [S5.f(x) for x in pX]
        prior = RBFNMeanPrior()
        prior.train(pX, pY, S5.bounds, k=10, seed=103)
        
        X = lhcSample(S5.bounds, 10, seed=9)
        Y = [S5.f(x) for x in X]

        hv = .1
        hyper = [hv, hv, hv, hv]
        gkernel = GaussianKernel_ard(hyper)
        priorGP = GaussianProcess(gkernel, X, Y, prior=prior)
        nopriorGP = GaussianProcess(gkernel, X, Y)
        
        S = lhcSample(S5.bounds, 1000, seed=10)
        nopriorErr = mean([(S5.f(x)-nopriorGP.mu(x))**2 for x in S])
        priorErr = mean([(S5.f(x)-priorGP.mu(x))**2 for x in S])
        
        # print '\nno prior Err =', nopriorErr
        # print 'prior Err =', priorErr
        self.failUnless(priorErr < nopriorErr*.8)
Beispiel #14
0
def test():
    GP = GaussianProcess(GaussianKernel_iso([.2, 1.0]))
    X = array([[.2], [.3], [.5], [1.5]])
    Y = [1, 0, 1, .75]
    GP.addData(X, Y)

    figure(1)
    A = arange(0, 2, 0.01)
    mu = array([GP.mu(x) for x in A])
    sig2 = array([GP.posterior(x)[1] for x in A])

    Ei = EI(GP)
    ei = [-Ei.negf(x) for x in A]

    Pi = PI(GP)
    pi = [-Pi.negf(x) for x in A]

    Ucb = UCB(GP, 1, T=2)
    ucb = [-Ucb.negf(x) for x in A]

    ax = subplot(1, 1, 1)
    ax.plot(A, mu, 'k-', lw=2)
    xv, yv = poly_between(A, mu - sig2, mu + sig2)
    ax.fill(xv, yv, color="#CCCCCC")

    ax.plot(A, ei, 'g-', lw=2, label='EI')
    ax.plot(A, ucb, 'g--', lw=2, label='UCB')
    ax.plot(A, pi, 'g:', lw=2, label='PI')
    ax.plot(X, Y, 'ro')
    ax.legend()
    draw()
    show()
Beispiel #15
0
    def test1DcUCB(self):

        f = lambda x: float(sin(x * 5.))
        X = lhcSample([[0., 1.]], 5, seed=22)
        Y = [f(x) for x in X]

        kernel = GaussianKernel_ard(array([1.0]))
        GP = GaussianProcess(kernel)
        GP.addData(X, Y)

        # should use optimizeGP.cpp
        ucbf = UCB(GP, 1)
        dopt, doptx = direct(ucbf.negf, [[0., 1.]], maxiter=10)
        copt, coptx = cdirect(ucbf.negf, [[0., 1.]], maxiter=10)
        mopt, moptx = maximizeUCB(GP, [[0., 1.]], maxiter=10)

        self.failUnlessAlmostEqual(dopt, copt, 4)
        self.failUnlessAlmostEqual(-dopt, mopt, 4)
        self.failUnlessAlmostEqual(-copt, mopt, 4)

        self.failUnless(sum(abs(doptx - coptx)) < .01)
        self.failUnless(sum(abs(moptx - coptx)) < .01)
        self.failUnless(sum(abs(moptx - doptx)) < .01)
Beispiel #16
0
    def test1DcUCB(self):
        
        f = lambda x: float(sin(x*5.))
        X = lhcSample([[0., 1.]], 5, seed=22)
        Y = [f(x) for x in X]

        kernel = GaussianKernel_ard(array([1.0]))
        GP = GaussianProcess(kernel)
        GP.addData(X, Y)
        
        # should use optimizeGP.cpp
        ucbf = UCB(GP, 1)
        dopt, doptx = direct(ucbf.negf, [[0., 1.]], maxiter=10)
        copt, coptx = cdirect(ucbf.negf, [[0., 1.]], maxiter=10)
        mopt, moptx = maximizeUCB(GP, [[0., 1.]], maxiter=10)
        
        self.failUnlessAlmostEqual(dopt, copt, 4)
        self.failUnlessAlmostEqual(-dopt, mopt, 4)
        self.failUnlessAlmostEqual(-copt, mopt, 4)
    
        self.failUnless(sum(abs(doptx-coptx)) < .01)
        self.failUnless(sum(abs(moptx-coptx)) < .01)
        self.failUnless(sum(abs(moptx-doptx)) < .01)
Beispiel #17
0
    def load_results(self, *args):
        super(SARD_EGO_Test, self).load_results(*args)

        from ego.gaussianprocess import GaussianProcess
        from ego.gaussianprocess.kernel import GaussianKernel_ard

        # Build the gp
        kernel = GaussianKernel_ard(self.kernel_hyperparms)
        self.gp = GaussianProcess(kernel, noise=self.gp_noise)

        for x, y in zip(self.gp_X, self.gp_Y):
            self.gp.addData(x, y)

	print shape(self.S_end)
	print shape(self.E_samples)
	print shape(self.MH_ratios)
	print shape(self.E_proposeds)
Beispiel #18
0
    def testMaxEIPrior(self):

        # make sure that the prior works with the different methods of EI
        # maximization
        
        S5 = Shekel5()
        pX = lhcSample(S5.bounds, 100, seed=511)
        pY = [S5.f(x) for x in pX]
        prior = RBFNMeanPrior()
        prior.train(pX, pY, bounds=S5.bounds, k=10, seed=504)
        
        hv = .1
        hyper = [hv, hv, hv, hv]
        kernel = GaussianKernel_ard(hyper)
        
        # train GPs
        X = lhcSample(S5.bounds, 10, seed=512)
        Y = [S5.f(x) for x in X]
        
        # validation
        valX = list(x.copy() for x in X)
        valY = copy(Y)
        
        GP = GaussianProcess(kernel, X, Y, prior=prior)
        
        eif = EI(GP)
        copt, _ = cdirect(eif.negf, S5.bounds, maxiter=20)
        mopt, _ = maximizeEI(GP, S5.bounds, maxiter=20)

        self.failUnlessAlmostEqual(-copt, mopt, 2)
        
        for i in xrange(len(GP.X)):
            self.failUnless(all(valX[i]==GP.X[i]))
            self.failUnless(valY[i]==GP.Y[i])
        
        GP.prior.mu(GP.X[0])
        self.failUnless(all(valX[0]==GP.X[0]))
        # print GP.X
        
        for i in xrange(len(GP.X)):
            self.failUnless(all(valX[i]==GP.X[i]))
            self.failUnless(valY[i]==GP.Y[i])
        
        GP.prior.mu(GP.X[0])
        self.failUnless(all(valX[0]==GP.X[0]))
Beispiel #19
0
    def testXi(self):

        S5 = Shekel5()

        GP1 = GaussianProcess(GaussianKernel_iso([.2]))
        # self.failUnlessEqual(GP1.xi, 0.0)
        X = lhcSample(S5.bounds, 10, seed=0)
        Y = [S5.f(x) for x in X]
        GP1.addData(X, Y)

        eif1 = EI(GP1, xi=0.0)
        dopt1, _ = direct(eif1.negf, S5.bounds, maxiter=10)
        copt1, _ = cdirect(eif1.negf, S5.bounds, maxiter=10)
        mopt1, _ = maximizeEI(GP1, S5.bounds, xi=0.0, maxiter=10)

        self.failUnlessAlmostEqual(dopt1, copt1, 4)
        self.failUnlessAlmostEqual(-dopt1, mopt1, 4)
        self.failUnlessAlmostEqual(-copt1, mopt1, 4)

        GP2 = GaussianProcess(GaussianKernel_iso([.3]), X, Y)
        eif2 = EI(GP2, xi=0.01)
        self.failUnlessEqual(eif2.xi, 0.01)
        dopt2, _ = direct(eif2.negf, S5.bounds, maxiter=10)
        copt2, _ = cdirect(eif2.negf, S5.bounds, maxiter=10)
        mopt2, _ = maximizeEI(GP2, S5.bounds, xi=0.01, maxiter=10)
        self.failUnlessAlmostEqual(dopt2, copt2, 4)
        self.failUnlessAlmostEqual(-dopt2, mopt2, 4)
        self.failUnlessAlmostEqual(-copt2, mopt2, 4)

        self.failIfAlmostEqual(dopt1, dopt2, 4)
        self.failIfAlmostEqual(copt1, copt2, 4)
        self.failIfAlmostEqual(mopt1, mopt2, 4)

        GP3 = GaussianProcess(GaussianKernel_iso([.3]), X, Y)
        eif3 = EI(GP3, xi=0.1)
        dopt3, _ = direct(eif3.negf, S5.bounds, maxiter=10)
        copt3, _ = cdirect(eif3.negf, S5.bounds, maxiter=10)
        mopt3, _ = maximizeEI(GP3, S5.bounds, xi=0.1, maxiter=10)
        self.failUnlessAlmostEqual(dopt3, copt3, 4)
        self.failUnlessAlmostEqual(-dopt3, mopt3, 4)
        self.failUnlessAlmostEqual(-copt3, mopt3, 4)

        self.failIfAlmostEqual(dopt1, dopt3, 4)
        self.failIfAlmostEqual(copt1, copt3, 4)
        self.failIfAlmostEqual(mopt1, mopt3, 4)
        self.failIfAlmostEqual(dopt2, dopt3, 4)
        self.failIfAlmostEqual(copt2, copt3, 4)
        self.failIfAlmostEqual(mopt2, mopt3, 4)
Beispiel #20
0
    def testXi(self):

        S5 = Shekel5()

        GP1 = GaussianProcess(GaussianKernel_iso([.2]))
        # self.failUnlessEqual(GP1.xi, 0.0)
        X = lhcSample(S5.bounds, 10, seed=0)
        Y = [S5.f(x) for x in X]
        GP1.addData(X, Y)

        ucbf1 = UCB(GP1, len(S5.bounds), scale=0.5)
        dopt1, _ = direct(ucbf1.negf, S5.bounds, maxiter=10)
        copt1, _ = cdirect(ucbf1.negf, S5.bounds, maxiter=10)
        mopt1, _ = maximizeUCB(GP1, S5.bounds, scale=0.5, maxiter=10)

        self.failUnlessAlmostEqual(dopt1, copt1, 4)
        self.failUnlessAlmostEqual(-dopt1, mopt1, 4)
        self.failUnlessAlmostEqual(-copt1, mopt1, 4)

        GP2 = GaussianProcess(GaussianKernel_iso([.3]), X, Y)
        ucbf2 = UCB(GP2, len(S5.bounds), scale=0.01)
        dopt2, _ = direct(ucbf2.negf, S5.bounds, maxiter=10)
        copt2, _ = cdirect(ucbf2.negf, S5.bounds, maxiter=10)
        mopt2, _ = maximizeUCB(GP2, S5.bounds, scale=.01, maxiter=10)
        self.failUnlessAlmostEqual(dopt2, copt2, 4)
        self.failUnlessAlmostEqual(-dopt2, mopt2, 4)
        self.failUnlessAlmostEqual(-copt2, mopt2, 4)

        self.failIfAlmostEqual(dopt1, dopt2, 4)
        self.failIfAlmostEqual(copt1, copt2, 4)
        self.failIfAlmostEqual(mopt1, mopt2, 4)

        GP3 = GaussianProcess(GaussianKernel_iso([.3]), X, Y)
        ucbf3 = UCB(GP3, len(S5.bounds), scale=.9)
        dopt3, _ = direct(ucbf3.negf, S5.bounds, maxiter=10)
        copt3, _ = cdirect(ucbf3.negf, S5.bounds, maxiter=10)
        mopt3, _ = maximizeUCB(GP3, S5.bounds, scale=0.9, maxiter=10)
        self.failUnlessAlmostEqual(dopt3, copt3, 4)
        self.failUnlessAlmostEqual(-dopt3, mopt3, 4)
        self.failUnlessAlmostEqual(-copt3, mopt3, 4)

        self.failIfAlmostEqual(dopt1, dopt3, 4)
        self.failIfAlmostEqual(copt1, copt3, 4)
        self.failIfAlmostEqual(mopt1, mopt3, 4)
        self.failIfAlmostEqual(dopt2, dopt3, 4)
        self.failIfAlmostEqual(copt2, copt3, 4)
        self.failIfAlmostEqual(mopt2, mopt3, 4)
Beispiel #21
0
    def testGPPrior(self):
        
        # see how GP works with the dataprior...
        def foo(x):
            return sum(sin(x*20))
        
        bounds = [[0., 1.]]
        # train prior
        pX = lhcSample([[0., 1.]], 100, seed=6)
        pY = [foo(x) for x in pX]
        prior = RBFNMeanPrior()
        prior.train(pX, pY, bounds, k=10, seed=102)
        
        X = lhcSample([[0., 1.]], 2, seed=7)
        Y = [foo(x) for x in X]
        
        kernel = GaussianKernel_ard(array([.1]))
        GP = GaussianProcess(kernel, X, Y, prior=prior)
        GPnoprior = GaussianProcess(kernel, X, Y)

        S = arange(0, 1, .01)

        nopriorErr = mean([(foo(x)-GPnoprior.mu(x))**2 for x in S])
        priorErr = mean([(foo(x)-GP.mu(x))**2 for x in S])
        
        # print '\nno prior Err =', nopriorErr
        # print 'prior Err =', priorErr
        
        self.failUnless(priorErr < nopriorErr*.5)
        
        if False:
            figure(1)
            clf()
            plot(S, [prior.mu(x) for x in S], 'g-', alpha=0.3)
            plot(S, [GPnoprior.mu(x) for x in S], 'b-', alpha=0.3)
            plot(S, [GP.mu(x) for x in S], 'k-', lw=2)
            plot(X, Y, 'ko')
            show()
Beispiel #22
0
def fastUCBGallery(GP, bounds, N, useBest=True, samples=300, useCDIRECT=True, xi=0, passback={}):
    """
    Use UCB to generate a gallery of N instances using Monte Carlo to 
    approximate the optimization of the utility function.
    """
    gallery = []

    if len(GP.X) > 0:
        if useBest:
            # find best sample already seen, that lies within the bounds
            bestY = -inf
            bestX = None
            for x, y in zip(GP.X, GP.Y):
                if y > bestY:
                    for v, b in zip(x, bounds):
                        if v < b[0] or v > b[1]:
                            break
                    else:
                        bestY = y
                        bestX = x
            if bestX is not None:
                gallery.append(bestX)
    
        # create a "fake" GP from the GP that was passed in (can't just copy 
        # b/c original could have been PrefGP)
        hallucGP = GaussianProcess(deepcopy(GP.kernel), deepcopy(GP.X), deepcopy(GP.Y), prior=GP.prior)
    elif GP.prior is None:            
        # if we have no data and no prior, start in the center
        x = array([(b[0]+b[1])/2. for b in bounds])
        gallery.append(x)
        hallucGP = GaussianProcess(deepcopy(GP.kernel), [x], [0.0], prior=GP.prior)
    else:
        # optimize from prior
        if DEBUG: print 'GET DATA FROM PRIOR'
        bestmu = -inf
        bestX = None
        for m in GP.prior.means:
            argmin = fmin_bfgs(GP.negmu, m, disp=False)
            if DEBUG: print argmin,
            for i in xrange(len(argmin)):
                argmin[i] = clip(argmin[i], bounds[i][0], bounds[i][1])
            # if DEBUG: print 'converted to', argmin
            if GP.mu(argmin) > bestmu:
                bestX = argmin
                bestmu = GP.mu(argmin)
                if DEBUG: print '***** bestmu =', bestmu
                if DEBUG: print '***** bestX =', bestX
        gallery.append(bestX)
        hallucGP = GaussianProcess(deepcopy(GP.kernel), bestX, bestmu, prior=GP.prior)
        
        
    while len(gallery) < N:
        if DEBUG: print '\n\n\thave %d data for gallery' % len(gallery)
        bestUCB = -inf
        bestX = None
        # ut = UCB(hallucGP, len(bounds), N)
        ut = EI(hallucGP, xi=xi)
        
        if DEBUG: print '\tget with max EI'
        opt, optx = maximizeEI(hallucGP, bounds, xi=xi, useCDIRECT=useCDIRECT)
        #if len(gallery)==0 or min(norm(optx-gx) for gx in gallery) > .5:
        #    if DEBUG: print '\tgot one'
        bestUCB = opt
        bestX = optx
        #else:
        #    if DEBUG: print '\ttoo close to existing'
        '''        
        # try some random samples
        if DEBUG: print '\ttry random samples'
        for x in lhcSample(bounds, samples):
            u = -ut.negf(x)
            if u > bestUCB and min(norm(x-gx) for gx in gallery) > .5:
                '\they, this one is even better!'
                bestUCB = u
                bestX = x
                passback['found_random'] = 1
        
        # now try the prior means
        if hallucGP.prior is not None:
            if DEBUG: print '\ttry prior means (bestUCB = %f)'%bestUCB
            for x in hallucGP.prior.means:
                x = array([clip(x[i], bounds[i][0], bounds[i][1]) for i in xrange(len(x))])
                x = x * hallucGP.prior.width + hallucGP.prior.lowerb
                u = -ut.negf(x)
                # if DEBUG: print 'u = %f', u
                if u > bestUCB:
                    if len(gallery)==0 or min(norm(x-gx) for gx in gallery) > .5:
                        if DEBUG: print '\tthis one is even better!  prior mean %s has u = %f' % (x, u)
                        bestUCB = u
                        bestX = x
                        passback['found_prior'] = 1
        '''
                    
        gallery.append(bestX)
        if len(gallery) < N-1:
            hallucGP.addData(bestX, hallucGP.mu(bestX))

    # these can be optionally passed back if the caller passes in its own dict
    passback['hallucGP'] = hallucGP
    passback['utility'] = ut
        
    return gallery
Beispiel #23
0
def demoObservations():
    """
    Simple demo for a scenario where we have direct observations (ie ratings
    or responses) with noise.  The model has three parameters, but after
    initial training, we fix one to be 1.0 and optimize the other two.  At
    each step, we visualize the posterior mean, variance and expected
    improvement.  We then find the point of maximum expected improvement and
    ask the user for the scalar response value.  
    
    To see how the model adapts to inputs, try rating the first few values 
    higher or lower than predicted and see what happens to the visualizations.
    """

    # the kernel parameters control the impact of different values on the 
    # parameters.  we are defining a model with three parameters
    kernel = GaussianKernel_ard(array([.5, .5, .3]))
    
    # we want to allow some noise in the observations -- the noise parameter
    # is the variance of the additive Gaussian noise   Y + N(0, noise)
    noise = 0.1
    
    # create the Gaussian Process using the kernel we've just defined
    GP = GaussianProcess(kernel, noise=noise)
    
    # add some data to the model.  the data must have the same dimensionality 
    # as the kernel
    X = [array([1, 1.5, 0.9]),
         array([.8, -.2, -0.1]),
         array([2, .8, -.2]),
         array([0, 0, .5])]
    Y = [1, .7, .6, -.1]
    
    print 'adding data to model'
    for x, y in zip(X, Y):
        print '\tx = %s, y = %.1f' % (x, y)
        
    GP.addData(X, Y)
    
    # the GP.posterior(x) function returns, for x, the posterior distribution
    # at x, characterized as a normal distribution with mean mu, variance 
    # sigma^2
    testX = [array([1, 1.45, 1.0]),
             array([-10, .5, -10])]
    
    for tx in testX:
        mu, sig2 = GP.posterior(tx)
        print 'the posterior of %s is a normal distribution N(%.3f, %.3f)' % (tx, mu, sig2)
        
    # now, let's find the best points to evaluate next.  we fix the first 
    # dimension to be 1 and for the others, we search the range [-2, 2]
    bound = [[1, 1], [-1.99, 1.98], [-1.99, 1.98]]
    
    figure(1, figsize=(5, 10))
    while True:
        _, optx = maximizeEI(GP, bound, xi=.1, useCDIRECT=False)
        print "X"

        # visualize the mean, variance and expected improvement functions on 
        # the free parameters
        x1 = arange(bound[1][0], bound[1][1], 0.1)
        x2 = arange(bound[2][0], bound[2][1], 0.1)
        X1, X2 = meshgrid(x1, x2)
        ei = zeros_like(X1)
        m = zeros_like(X1)
        v = zeros_like(X1)
        for i in xrange(X1.shape[0]):
            for j in xrange(X1.shape[1]):
                z = array([1.0, X1[i,j], X2[i,j]])
                ei[i,j] = -EI(GP).negf(z)
                m[i,j], v[i,j] = GP.posterior(z)
        
        clf()
        for i, (func, title) in enumerate(([m, 'prediction (posterior mean)'], [v, 'uncertainty (posterior variance)'], [ei, 'utility (expected improvement)'])):
            ax = subplot(3, 1, i+1)
            cs = ax.contourf(X1, X2, func, 20)
            ax.plot(optx[1], optx[2], 'wo')
            colorbar(cs)
            ax.set_title(title)
            ax.set_xlabel('x[1]')
            ax.set_ylabel('x[2]')
            ax.set_xticks([-2,0,2])
            ax.set_yticks([-2,0,2])


        m, v = GP.posterior(optx)
        try:
            response = input('\nmaximum expected improvement is at parameters x = [%.3f, %.3f, %.3f], where mean is %.3f, variance is %.3f.  \nwhat is the value there (non-numeric to quit)? ' % (optx[0], optx[1], optx[2], m, v))
        except:
            break
        GP.addData(optx, response)
        print 'updating model.'
Beispiel #24
0
class SARD_EGO_Test(SARD_Test):
    name = "SARD_EGO"

    nadapted_moves = None
    nruns_per_param_update = None
    nadaptations = None

    saved_res_fields = \
        set(['nadapted_moves', 'nruns_per_param_update',
             'nadaptations', 'gp_X', 'gp_Y', 'rewards',
             'kernel_hyperparms', 'gp_noise', 'policy']) | \
        SARD_Test.saved_res_fields

    def __init__(self, graph,
                 nmoves, nadaptations, nruns_per_param_update,
                 policy_size,
                 gamma_hi_bounds, gamma_lo_bounds,
                 iters_per_move_bounds, strategy, exp_temp, MH_rate,
                 max_SAW_length_bounds, min_SAW_length_bounds,
                 S_in=None, return_samples=1):
        super(SARD_EGO_Test, self).__init__\
            (graph, nmoves, gamma_hi_bounds, gamma_lo_bounds,
             iters_per_move_bounds,
             max_SAW_length_bounds, min_SAW_length_bounds, S_in)

	self.max_SAW_length_bounds = [self.max_SAW_length_bounds[i] - \
	    self.min_SAW_length_bounds[i] for i in range(len(self.min_SAW_length_bounds))]

	self.gamma_hi_bounds = [self.gamma_hi_bounds[i] - \
	    self.gamma_lo_bounds[i] for i in range(len(self.gamma_lo_bounds))]

        self.policy_size = policy_size
        self.hdf5_base_loc += "/p" + str(self.policy_size)

        self.nadapted_moves = \
            array([get_num_adapted_moves(nruns_per_param_update,
                                         nadaptations)])
        self.nadaptations = nadaptations
        self.nruns_per_param_update = nruns_per_param_update

        h_effective_in = self.graph.get_effective_fields(self.S_in)

        self.sampler_fn = \
            lambda : sard.SARDRun_EGO \
            (self.graph.nbrs_list_i, self.graph.incident_edge_list_i,
             self.graph.edges_i, self.graph.J, self.graph.h,
             self.S_in, h_effective_in, self.graph.beta_true,
             self.gamma_hi_bounds, self.gamma_lo_bounds,
             self.iters_per_move_bounds,
             self.max_SAW_length_bounds, self.min_SAW_length_bounds,
             self.nmoves, self.nadaptations,
             self.nruns_per_param_update, self.policy_size, strategy, exp_temp, MH_rate, return_samples)
    def make_policy_dict(self):
	reshaped = self.policy.reshape(8, self.policy_size)
	
        policy = map(lambda x, y: (round(x[0],0), round(y[0], 0)),
                     self.policy[1::8], self.policy[1::8]+self.policy[2::8])
                     
        policy_dict = {}
        for x in policy:
            policy_dict[x] = policy_dict.get(x, 0) + 1

        return policy_dict

    def plot_policy_as_histogram(self):
        plot_name = "policy_histogram"

        import matplotlib.pyplot as plt
        
        xpos = []
        ypos = []
        dz = []
        
        policy_dict = self.make_policy_dict()
        for k, v in policy_dict.iteritems():
            xpos.append(k[0])
            ypos.append(k[1])
            dz.append(v)

	dx_size = (self.min_SAW_length_bounds[1] - self.min_SAW_length_bounds[0])/200
	dy_size = (self.max_SAW_length_bounds[1] - self.max_SAW_length_bounds[0])/200 + dx_size
	
	
        zpos = zeros_like(xpos)
        dx = dx_size * ones_like(xpos)
        dy = dy_size * ones_like(ypos)

        colour_max = max(dz)
        colours = map(lambda x: plt.cm.jet(x / colour_max), dz)

        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D
        from matplotlib import cm
        from matplotlib.ticker import LinearLocator, FormatStrFormatter
        
        fig = plt.figure()
        ax = Axes3D(fig, elev=70)

        ax.w_zaxis.set_major_locator(LinearLocator(10))
        ax.w_zaxis.set_major_formatter(FormatStrFormatter('%.03f'))

        ax.bar3d(xpos, ypos, zpos, dx, dy, dz,
                 edgecolors=(0.0, 0.0, 0.0, 0.0),
                 color=colours)
	ax.set_xlim3d(self.min_SAW_length_bounds[0], self.min_SAW_length_bounds[1])
	ax.set_ylim3d(self.min_SAW_length_bounds[0] + self.max_SAW_length_bounds[0], self.min_SAW_length_bounds[1] + self.max_SAW_length_bounds[1])

        self.save_current_plot(plot_name)

    def run(self):
        self.S_end, self.E_samples, self.E_proposeds, \
        self.log_f_fwds, self.log_f_revs, \
        self.MH_ratios, self.move_types, \
        self.used_sigma_lengths, self.used_rho_lengths, \
        self.used_iters_per_move, \
        self.used_gammas_his, self.used_gammas_los, \
        self.used_P_LL, self.used_P_LH, self.used_P_HL, \
        self.rewards, gp, self.policy, self.samples \
        = self.sampler_fn()

        self.gp_X = gp.X
        self.gp_Y = gp.Y

        self.kernel_hyperparms = gp.kernel.getHyperparams()
        self.gp_noise = gp.noise
    
    def load_results(self, *args):
        super(SARD_EGO_Test, self).load_results(*args)

        from ego.gaussianprocess import GaussianProcess
        from ego.gaussianprocess.kernel import GaussianKernel_ard

        # Build the gp
        kernel = GaussianKernel_ard(self.kernel_hyperparms)
        self.gp = GaussianProcess(kernel, noise=self.gp_noise)

        for x, y in zip(self.gp_X, self.gp_Y):
            self.gp.addData(x, y)

	print shape(self.S_end)
	print shape(self.E_samples)
	print shape(self.MH_ratios)
	print shape(self.E_proposeds)
Beispiel #25
0
    def testGDelta(self):

        # usually, Gdelta==G
        GP = GaussianProcess(GaussianKernel_iso([0.05]))
        X = lhcSample([[0., 1.]], 5, seed=10)
        Y = [x**2 for x in X]
        GP.train(X, Y)

        G = (Y[0] - max(Y)) / (Y[0] - 1)
        self.failUnlessEqual(G, Gdelta(GP, [[0., 1.]], Y[0], 1.0, 0.01))

        # sometimes, though, Gdelta > G -- this GP has a very high confidence
        # prediction of a very good point at x ~ .65
        GP = GaussianProcess(GaussianKernel_iso([0.1]))
        X = array([[.5], [.51], [.59], [.6]])
        Y = array([1., 2., 2., 1.])
        GP.train(X, Y)
        # figure(1)
        # A = arange(0, 1, 0.01)
        # post = [GP.posterior(x) for x in A]
        # plot(A, [p[0] for p in post], 'k-')
        # plot(A, [p[0]+p[1] for p in post], 'k:')
        # show()
        G = (Y[0] - max(Y)) / (Y[0] - 4.0)
        Gd = Gdelta(GP, [[0., 1.]], Y[0], 4.0, 0.01)
        self.failUnless(G < Gd)

        # however, if there is more variance, we will collapse back to G
        GP = GaussianProcess(GaussianKernel_iso([.001]))
        GP.train(X, Y)
        G = (Y[0] - max(Y)) / (Y[0] - 4.0)
        self.failUnlessEqual(G, Gdelta(GP, [[0., 1.]], Y[0], 4.0, 0.01))
Beispiel #26
0
class IM_EGO_Test(IM_Test):
    name = "IM_EGO"

    nadapted_moves = None
    nruns_per_param_update = None
    nadaptations = None

    gamma_min = None
    gamma_max = None

    saved_res_fields = \
        set(['nadapted_moves', 'nruns_per_param_update',
             'nadaptations', 'gp_X', 'gp_Y', 'rewards',
             'kernel_hyperparms', 'gp_noise',
             'gamma_min', 'gamma_max', 'policy']) | \
        IM_Test.saved_res_fields

    def __init__(self, graph,
                 nmoves, nadaptations, nruns_per_param_update,
                 policy_size,
                 SAW_length_min, SAW_length_max, strategy, exp_temp, 
                 MH_rate, S_ref=None, S_in=None):
        super(IM_EGO_Test, self).__init__\
            (graph, nmoves,
             SAW_length_min, SAW_length_max, S_ref, S_in)

	self.exp_temp = exp_temp
	self.MH_rate = MH_rate
	self.strategy = strategy
        self.policy_size = policy_size
        self.hdf5_base_loc += "/p" + str(self.policy_size)

        self.nadapted_moves = \
            array([get_num_adapted_moves(nruns_per_param_update,
                                         nadaptations)])
        self.nadaptations = nadaptations
        self.nruns_per_param_update = nruns_per_param_update

        self.gammas = arange(0,2.0001,0.2) * self.graph.beta_true
        self.h_effective_in = self.graph.get_effective_fields(self.S_in)

        self.sampler_fn = \
            lambda : im.IMRun_EGO\
            (self.graph.nbrs_list_i, self.graph.incident_edge_list_i,
             self.graph.edges_i, self.graph.J, self.graph.h,
             self.S_ref, self.S_in, self.h_effective_in,
             self.graph.beta_true, self.gammas,
             self.SAW_length_min, self.SAW_length_max,
             self.nmoves, self.nadaptations,
             self.nruns_per_param_update, self.policy_size,
             self.strategy, self.exp_temp, self.MH_rate)

    def run(self):
        self.S_end, self.E_samples, self.E_proposeds, \
        self.log_f_fwds, self.log_f_revs, \
        self.MH_ratios, self.SAW_lengths, self.used_gammas, \
        self.rewards, gp, self.gamma_min, self.gamma_max, \
        self.policy \
        = self.sampler_fn()

        self.gp_X = gp.X
        self.gp_Y = gp.Y

        self.kernel_hyperparms = gp.kernel.getHyperparams()
        self.gp_noise = gp.noise

    def load_results(self, *args):
        super(IM_EGO_Test, self).load_results(*args)

        from ego.gaussianprocess import GaussianProcess
        from ego.gaussianprocess.kernel import GaussianKernel_ard

        # Build the gp
        kernel = GaussianKernel_ard(self.kernel_hyperparms)
        self.gp = GaussianProcess(kernel, noise=self.gp_noise)

        for x, y in zip(self.gp_X, self.gp_Y):
            self.gp.addData(x, y)

    def save_current_plot(self, plot_name):
        plot_dir = "plots/%s" % (self.graph.name)
        plot_fn = "%s/%s_%s_p%d_r%d.pdf" % \
                  (plot_dir, plot_name, self.name,
                   self.policy_size, self.res_num)
        self.save_current_plot_to_file(plot_fn)


    def compute_gp_posterior_over_grid(self):
        x1 = arange(self.SAW_length_min, self.SAW_length_max, 1, dtype=float)
        x2 = arange(self.gamma_min, self.gamma_max,
                    (self.gamma_max - self.gamma_min) / 100,
                    dtype=float)
        
        X1, X2 = meshgrid(x1, x2)

        m = zeros_like(X1)
        v = zeros_like(X1)

        for i in xrange(X1.shape[0]):
            for j in xrange(X1.shape[1]):
                z = array([X1[i, j], X2[i, j]])

                res = self.gp.posterior(z)
                m[i, j] = res[0]
                v[i, j] = res[1]

        return X1, X2, m, v

    def plot_surface_on_grid(self, X1, X2, s):
        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D
        from matplotlib import cm
        from matplotlib.ticker import LinearLocator, FormatStrFormatter
        
        fig = plt.figure()
        ax = Axes3D(fig, elev=70)
        surf = ax.plot_surface(X1, X2, s, rstride=1, cstride=1, cmap=cm.jet,
                       linewidth=0, antialiased=True)
        ax.set_zlim3d(-1.01, 1.01)

        ax.w_zaxis.set_major_locator(LinearLocator(10))
        ax.w_zaxis.set_major_formatter(FormatStrFormatter('%.03f'))

        fig.colorbar(surf, shrink=0.5, aspect=5)

    def plot_gp_mean(self):
        plot_name = "gp_mean"
        
        X1, X2, m, _ = self.compute_gp_posterior_over_grid()
        self.plot_surface_on_grid(X1, X2, m)
        self.save_current_plot(plot_name)

    def plot_flat_gp_mean(self):
        plot_name = "flat_gp_mean"

        import matplotlib.pyplot as plt
        from matplotlib import cm

        _, _, m, _ = self.compute_gp_posterior_over_grid()
        
        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.imshow(m, cmap=cm.jet, interpolation='nearest', origin='lower',
                  extent=[self.SAW_length_min, self.SAW_length_max,
                          self.gamma_min, self.gamma_max],
                  aspect=self.SAW_length_max/self.gamma_max)
        ax.set_xlabel('SAW length $k$')
        ax.set_ylabel('Energy-biasing parameter $\gamma$')
        
        self.save_current_plot(plot_name)

    def plot_average_gp_mean(self):
        plot_name = "average_gp_mean"

        nresults = self.get_num_results()
        
        X1 = None
        X2 = None
        m_avg = None

        if self.res_num is not None:
            old_res_num = self.res_num
        else:
            old_res_num = 0

        for i in xrange(nresults):
            self.load_results(i)
            X1, X2, m, _ = self.compute_gp_posterior_over_grid()

            if m_avg is None:
                m_avg = m
            else:
                m_avg += m

        m_avg /= nresults

        self.plot_surface_on_grid(X1, X2, m_avg)
        self.save_current_plot(plot_name)

        self.load_results(old_res_num)

    def plot_flat_average_gp_mean(self):
        plot_name = "flat_average_gp_mean"

        import matplotlib.pyplot as plt
        from matplotlib import cm

        nresults = self.get_num_results()

        m_avg = None

        if self.res_num is not None:
            old_res_num = self.res_num
        else:
            old_res_num = 0
            
        for i in xrange(nresults):
            self.load_results(i)
            _, _, m, _ = self.compute_gp_posterior_over_grid()

            if m_avg is None:
                m_avg = m
            else:
                m_avg += m

        m_avg /= nresults

        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.imshow(m_avg, cmap=cm.jet, interpolation='nearest', origin='lower',
                  extent=[self.SAW_length_min, self.SAW_length_max,
                          self.gamma_min, self.gamma_max],
                  aspect=self.SAW_length_max/self.gamma_max)
        ax.set_xlabel('SAW length $k$')
        ax.set_ylabel('Energy-biasing parameter $\gamma$')

        self.save_current_plot(plot_name)

        self.load_results(old_res_num)

    def plot_gp_var(self):
        plot_name = "gp_var"
        
        X1, X2, _, v = self.compute_gp_posterior_over_grid()
        self.plot_surface_on_grid(X1, X2, v)
        self.save_current_plot(plot_name)

    def plot_gp_query_points(self):
        plot_name = "gp_query_points"
        import matplotlib.pyplot as plt
        
        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.scatter(self.gp.X[:, 0], self.gp.X[:, 1], c=self.gp.Y)

        self.save_current_plot(plot_name)

    def plot_parameter_cdf(self):
        pass

    def make_policy_dict(self):
        policy = map(lambda x, y: (round(x[0],3), round(y[0], 3)),
                     self.policy[::2], self.policy[1::2])
        policy_dict = {}
        for x in policy:
            policy_dict[x] = policy_dict.get(x, 0) + 1

        return policy_dict

    def plot_policy_as_histogram(self):
        plot_name = "policy_histogram"

        import matplotlib.pyplot as plt
        
        xpos = []
        ypos = []
        dz = []
        
        policy_dict = self.make_policy_dict()
        for k, v in policy_dict.iteritems():
            xpos.append(k[0])
            ypos.append(k[1])
            dz.append(v)

        dy_size = (self.gammas[-1] - self.gammas[0]) / 100

        zpos = zeros_like(xpos)
        dx = ones_like(xpos)
        dy = dy_size * ones_like(ypos)

        colour_max = max(dz)
        colours = map(lambda x: plt.cm.jet(x / colour_max), dz)

        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D
        from matplotlib import cm
        from matplotlib.ticker import LinearLocator, FormatStrFormatter
        
        fig = plt.figure()
        ax = Axes3D(fig, elev=70)

        ax.w_zaxis.set_major_locator(LinearLocator(10))
        ax.w_zaxis.set_major_formatter(FormatStrFormatter('%.03f'))

        ax.bar3d(xpos, ypos, zpos, dx, dy, dz,
                 edgecolors=(0.0, 0.0, 0.0, 0.0),
                 color=colours)
        ax.set_xlim3d(self.SAW_length_min, self.SAW_length_max)
        ax.set_ylim3d(self.gammas[0], self.gammas[-1])

        self.save_current_plot(plot_name)

    def plot_policy_as_flat_surface():
        plot_name = 'flat_policy'
Beispiel #27
0
def demoObservations():
    """
    Simple demo for a scenario where we have direct observations (ie ratings
    or responses) with noise.  The model has three parameters, but after
    initial training, we fix one to be 1.0 and optimize the other two.  At
    each step, we visualize the posterior mean, variance and expected
    improvement.  We then find the point of maximum expected improvement and
    ask the user for the scalar response value.  
    
    To see how the model adapts to inputs, try rating the first few values 
    higher or lower than predicted and see what happens to the visualizations.
    """

    # the kernel parameters control the impact of different values on the
    # parameters.  we are defining a model with three parameters
    kernel = GaussianKernel_ard(array([.5, .5, .3]))

    # we want to allow some noise in the observations -- the noise parameter
    # is the variance of the additive Gaussian noise   Y + N(0, noise)
    noise = 0.1

    # create the Gaussian Process using the kernel we've just defined
    GP = GaussianProcess(kernel, noise=noise)

    # add some data to the model.  the data must have the same dimensionality
    # as the kernel
    X = [
        array([1, 1.5, 0.9]),
        array([.8, -.2, -0.1]),
        array([2, .8, -.2]),
        array([0, 0, .5])
    ]
    Y = [1, .7, .6, -.1]

    print 'adding data to model'
    for x, y in zip(X, Y):
        print '\tx = %s, y = %.1f' % (x, y)

    GP.addData(X, Y)

    # the GP.posterior(x) function returns, for x, the posterior distribution
    # at x, characterized as a normal distribution with mean mu, variance
    # sigma^2
    testX = [array([1, 1.45, 1.0]), array([-10, .5, -10])]

    for tx in testX:
        mu, sig2 = GP.posterior(tx)
        print 'the posterior of %s is a normal distribution N(%.3f, %.3f)' % (
            tx, mu, sig2)

    # now, let's find the best points to evaluate next.  we fix the first
    # dimension to be 1 and for the others, we search the range [-2, 2]
    bound = [[1, 1], [-1.99, 1.98], [-1.99, 1.98]]

    figure(1, figsize=(5, 10))
    while True:
        _, optx = maximizeEI(GP, bound, xi=.1)

        # visualize the mean, variance and expected improvement functions on
        # the free parameters
        x1 = arange(bound[1][0], bound[1][1], 0.1)
        x2 = arange(bound[2][0], bound[2][1], 0.1)
        X1, X2 = meshgrid(x1, x2)
        ei = zeros_like(X1)
        m = zeros_like(X1)
        v = zeros_like(X1)
        for i in xrange(X1.shape[0]):
            for j in xrange(X1.shape[1]):
                z = array([1.0, X1[i, j], X2[i, j]])
                ei[i, j] = -EI(GP).negf(z)
                m[i, j], v[i, j] = GP.posterior(z)

        clf()
        for i, (func, title) in enumerate(
            ([m, 'prediction (posterior mean)'
              ], [v, 'uncertainty (posterior variance)'],
             [ei, 'utility (expected improvement)'])):
            ax = subplot(3, 1, i + 1)
            cs = ax.contourf(X1, X2, func, 20)
            ax.plot(optx[1], optx[2], 'wo')
            colorbar(cs)
            ax.set_title(title)
            ax.set_xlabel('x[1]')
            ax.set_ylabel('x[2]')
            ax.set_xticks([-2, 0, 2])
            ax.set_yticks([-2, 0, 2])

        show()

        m, v = GP.posterior(optx)
        try:
            response = input(
                '\nmaximum expected improvement is at parameters x = [%.3f, %.3f, %.3f], where mean is %.3f, variance is %.3f.  \nwhat is the value there (non-numeric to quit)? '
                % (optx[0], optx[1], optx[2], m, v))
        except:
            break
        GP.addData(optx, response)
        print 'updating model.'
Beispiel #28
0
def fastUCBGallery(GP, bounds, N, useBest=True, samples=300, useCDIRECT=True):
    """
    Use UCB to generate a gallery of N instances using Monte Carlo to 
    approximate the optimization of the utility function.
    """
    gallery = []

    if len(GP.X) > 0:
        if useBest:
            # find best sample already seen, that lies within the bounds
            bestY = -inf
            bestX = None
            for x, y in zip(GP.X, GP.Y):
                if y > bestY:
                    for v, b in zip(x, bounds):
                        if v < b[0] or v > b[1]:
                            break
                    else:
                        bestY = y
                        bestX = x
            if bestX is not None:
                gallery.append(bestX)

        # create a "fake" GP from the GP that was passed in (can't just copy
        # b/c original could have been PrefGP)
        hallucGP = GaussianProcess(deepcopy(GP.kernel),
                                   deepcopy(GP.X),
                                   deepcopy(GP.Y),
                                   prior=GP.prior)
    elif GP.prior is None:
        # if we have no data and no prior, start in the center
        x = array([(b[0] + b[1]) / 2. for b in bounds])
        gallery.append(x)
        hallucGP = GaussianProcess(deepcopy(GP.kernel), [x], [0.0],
                                   prior=GP.prior)
    else:
        # optimize from prior
        bestmu = -inf
        bestX = None
        for m in GP.prior.means:
            argmin = fmin_bfgs(GP.negmu, m, disp=False)
            if GP.mu(argmin) > bestmu:
                bestX = argmin
                bestmu = GP.mu(argmin)
        gallery.append(bestX)
        hallucGP = GaussianProcess(deepcopy(GP.kernel),
                                   bestX,
                                   bestmu,
                                   prior=GP.prior)

    while len(gallery) < N:
        bestUCB = -inf
        bestX = None
        # ut = UCB(hallucGP, len(bounds), N)
        ut = EI(hallucGP, xi=.4)

        opt, optx = maximizeEI(hallucGP, bounds, xi=.3, useCDIRECT=useCDIRECT)
        if len(gallery) == 0 or min(norm(optx - gx) for gx in gallery) > .5:
            bestUCB = opt
            bestX = optx

        # try some random samples
        for x in lhcSample(bounds, samples):
            u = -ut.negf(x)
            if u > bestUCB and min(norm(x - gx) for gx in gallery) > .5:
                '\they, this one is even better!'
                bestUCB = u
                bestX = x

        # now try the prior means
        if hallucGP.prior is not None:
            for x in hallucGP.prior.means:
                x = array([
                    clip(x[i], bounds[i][0], bounds[i][1])
                    for i in xrange(len(x))
                ])
                x = x * hallucGP.prior.width + hallucGP.prior.lowerb
                u = -ut.negf(x)
                if u > bestUCB:
                    if len(gallery) == 0 or min(
                            norm(x - gx) for gx in gallery) > .5:
                        bestUCB = u
                        bestX = x

        gallery.append(bestX)

        hallucGP.addData(bestX, hallucGP.mu(bestX))

    return gallery