Esempio n. 1
0
def recthread(i, th, sc0, X, Y, Sigma, covfunction, Xstar, mu, muargs):
    try:
        if (sc0 == False):
            theta = th
            scale = 1
        else:
            theta = th[:-1]
            scale = th[-1]
        g = gp.GaussianProcess(X,
                               Y,
                               Sigma,
                               covfunction,
                               theta,
                               Xstar,
                               mu=mu,
                               muargs=muargs,
                               thetatrain='False',
                               scale=scale,
                               scaletrain='False')
        nstar = len(Xstar)
        (fmean, fstd) = g.gp(unpack='True')[1:3]
        pred = concatenate((reshape(fmean,
                                    (nstar, 1)), reshape(fstd, (nstar, 1))),
                           axis=1)
        return (i, pred)
    except KeyboardInterrupt:
        return
Esempio n. 2
0
def mcmc_log_likelihood(th, sc0, X, Y_mu, Sigma, covfunction, prior,
                        priorargs):
    try:
        if (np.min(th) < 0.0):
            return np.NINF
        if (sc0 == False):
            theta = th
            scale = 1
        else:
            theta = th[:-1]
            scale = th[-1]
        g = gp.GaussianProcess(X,
                               Y_mu,
                               Sigma,
                               covfunction,
                               theta,
                               prior=prior,
                               priorargs=priorargs,
                               scale=scale)
        logp = g.log_likelihood()
        return logp
    except KeyboardInterrupt:
        return
Esempio n. 3
0
def optim():
    values = []
    evals = []
    gps = []
    nj = 100
    for j in range(0, nj):
        evals.append([])
        gps.append([])
        print optime, j
        fail = False

        #kernel = GP.SquaredExponentialKernel([2.], 1.)#, [1e-2, 1e-2])
        #gp = GP.GaussianProcess(kernel, orig_bounds, noise=[1e-4, [1e-5]], noiseGP=True)
        #gp.explore(2, objective)

        kernel = GP.SquaredExponentialKernel([2., 0.1], 1.)  #, [1e-2, 1e-2])

        def constraint(x):
            return sum(x) - 30.6

        gp = GP.GaussianProcess(kernel,
                                orig_bounds,
                                noise=[1e-4, [1e-5, 1e-3]],
                                noiseGP=True,
                                cons=constraint)
        gp.explore(4, objective)
        #orig_bounds = np.array([[-3., 3], [-3, 3.]])
        #kernel = GP.SquaredExponentialKernel([1., 1], 100.)#, [1e-2, 1e-2])
        #gp = GP.GaussianProcess(kernel, orig_bounds, noise=[sig**2, sig**2])
        #gp.explore(4, test_func)

        x1 = np.linspace(gp.bounds[0, 0], gp.bounds[0, 1], 40)
        xs = x1.reshape(-1, 1)
        #x2 = np.linspace(gp.bounds[1,0], gp.bounds[1,1], 40)
        #x1, x2 = np.meshgrid(x1, x2)
        #xs = np.vstack((x1.flatten(), x2.flatten())).T

        ei = GP.ExpectedImprovement(gp)

        # acquisition function improvements:
        # * noise: using EI with std for now
        # * batch
        # * finite budget

        print
        for i in range(0, 100):
            #x = ei.optimize()
            try:
                x = ei.optimize()
            except:
                fail = True
                break
            evals[-1].append(gp.data_min())
            gps[-1].append(gp.posterior_min())
            print 'ei choice:', i, x

            #eix = ei.evaluate(xs)
            #plt.plot(x1, eix.reshape(x1.shape))
            #plt.contourf(x1, x2, eix.reshape(x1.shape), 100)
            #plt.colorbar()
            ##plt.savefig('ei_{}.pdf'.format(i))
            ##plt.clf()
            #plt.show()

            #mu, cov = gp.evaluate(xs)
            ##mu, cov = gp.noiseGP[1].exponential(xs)
            ##plt.plot(x1, mu*gp.noise[0])
            ##plt.show()
            ##std = np.diag(cov)**0.5
            #plt.contourf(x1, x2, mu.reshape(x1.shape), 100)
            #plt.colorbar()
            ###plt.savefig('mu_{}.pdf'.format(i))
            ###plt.clf()
            #plt.show()
            #plt.contourf(x1, x2, std.reshape(x1.shape), 1000)
            #plt.colorbar()
            #plt.savefig('cov_{}.pdf'.format(i))
            #plt.clf()

            y, yd, yn, ydn = objective_single(x)
            #y, yd, yn, ydn = test_func(x)
            gp.train(x, y, yd, yn, ydn)
            print
        if not fail:
            values.append(gp.y)
        else:
            evals = evals[:-1]
            gps = gps[:-1]
        with open('{}.pkl'.format(optime), 'w') as f:
            pkl.dump([evals, gps, values], f)
Esempio n. 4
0
def transition_model(x_query,
                     X_train_normalized,
                     y_train_normalized,
                     k1=1000,
                     m=3,
                     k2=100):
    file_path = os.path.join(datapath_base, 'nbrs.pkl')
    if os.path.exists(file_path):
        with open(file_path, 'rb') as f:
            nbrs = pickle.load(f)
    else:
        nbrs = NearestNeighbors(n_neighbors=k2,
                                algorithm='ball_tree').fit(X_train_normalized)
        print("This is going to take about 15 min...")
        with open('nbrs.pkl', 'wb') as f:
            pickle.dump(nbrs, f)

    # Step1: Find K1 (=1000) nearest points in training data. Let the closest pair to $(x,a)$ be $(x_h, a_h)$
    distances, indices = nbrs.kneighbors(x_query.reshape(1, -1))
    #print(distances[0,0])
    #print(X_train_normalized.shape)
    X_selected = X_train_normalized[indices.reshape(-1), :]
    y_selected = y_train_normalized[indices.reshape(-1), :]

    # Step2: Diffusin map is created based on these 1000 points, which yields a reduced dimensional data
    #embedding = SpectralEmbedding(n_components=m)
    #X_spectral = embedding.fit_transform(X_selected)
    # y_spectral = embedding.fit_transform(y_selected)

    # Step3: K2 (=100) closest points in the reduced dimensional data to the $(x_h, a_h)$ in the diffusion map is found
    #nbrs_diffusion = NearestNeighbors(n_neighbors=k2, algorithm='ball_tree').fit(X_spectral)
    #distances_diffusion, indices_diffusion = nbrs_diffusion.kneighbors(X_spectral[0, :].reshape(1, -1))

    # Step4: Perform GP regression on these 100 points.
    X_GP_input = X_selected
    y_GP_input = y_selected
    #print("X_GP_input.shape: {}".format(X_GP_input.shape))
    #print("y_GP_input.shape: {}".format(y_GP_input.shape))
    """
    #kernel = DotProduct() + WhiteKernel()
    y_GP_input = y_GP_input - X_GP_input[:, :4]
    gpr = GaussianProcessRegressor(kernel=None,random_state=0,n_restarts_optimizer=10).fit(X_GP_input, y_GP_input)
    ds_next = gpr.predict(x_query.reshape(1,-1)) 
    print("ds_next.shape: {}".format(ds_next.shape))
    s_next = x_query[0, :4] + ds_next
    print("s_next.shape: {}".format(s_next.shape))
    """
    ### Avishai's stuff

    ds_next = np.zeros((4, ))
    std_next = np.zeros((4, ))
    y_GP_input = y_GP_input - X_GP_input[:, :4]
    for i in range(4):
        gpr = gp.GaussianProcess(X_GP_input,
                                 y_GP_input[:, i],
                                 optimize=True,
                                 theta=None)
        mm, vv = gpr.predict(x_query.reshape(-1))
        ds_next[i] = mm
        std_next[i] = np.sqrt(np.diag(vv))
    print("std_next: {}".format(std_next))
    s_next = x_query[0, :4] + ds_next.reshape(
        1, -1)  #np.random.normal(ds_next, std_next).reshape(1,-1)

    #kernel = GPy.kern.RBF(input_dim=6)
    #model = GPy.models.GPRegression(X_GP_input, y_GP_input, kernel)
    #model.optimize()
    #y_pred = model.predict(x_query.reshape(1,-1))
    #print(y_pred)
    #print(s_next)
    return s_next
Esempio n. 5
0
def optim():

    orig_bounds = np.array([[0., 1.], [0, 1], [0, 1], [0, 1]])
    L, sigma = 0.5, 0.001
    #L, sigma = 0.3, 0.003
    mean = 0.0092
    if grad:
        kernel = GP2.SquaredExponentialKernel([L, L, L, L], sigma)
        gp = GP2.GaussianProcess(kernel,
                                 orig_bounds,
                                 noise=[5e-9, [1e-9, 1e-7, 1e-8, 1e-7]],
                                 noiseGP=True,
                                 cons=constraint)
        ei = GP2.ExpectedImprovement(gp)
    else:
        kernel = GP.SquaredExponentialKernel([L, L, L, L], sigma)
        gp = GP.GaussianProcess(kernel,
                                orig_bounds,
                                noise=5e-9,
                                noiseGP=True,
                                cons=constraint)
        ei = GP.ExpectedImprovement(gp)

    kernel = GP2.SquaredExponentialKernel([L, L, L, L], sigma)
    gp2 = GP2.GaussianProcess(kernel,
                              orig_bounds,
                              noise=[5e-9, [1e-9, 1e-7, 1e-8, 1e-7]],
                              noiseGP=True,
                              cons=constraint)
    #ei = GP2.ExpectedImprovement(gp)

    assert os.path.exists(stateFile)
    state = load_state()
    #for index in range(0, len(state['state'])):
    #    if get_state(state, index) != 'DONE':
    #        x = state['points'][index]
    #        res = evaluate(x, state, index)
    #        state['evals'].append(res)
    #        update_state(state, 'DONE', index)
    #        exit(1)
    n = len(state['points'])
    if len(state['evals']) < len(state['points']):
        n -= 1
    m = 8
    x = state['points'][:n]
    y = [res[0] - mean for res in state['evals']][:n]
    yd = [res[2:6] for res in state['evals']][:n]
    yn = [res[1] for res in state['evals']][:n]
    ydn = [res[6:10] for res in state['evals']][:n]
    #print yd
    #print ydn
    #print yd
    #print ydn
    #exit(1)
    gp2.train(x[:m], y[:m], yd[:m], yn[:m], ydn[:m])
    for i in range(m, n):
        gp2.train(x[i], y[i], yd[i], yn[i], ydn[i])
        # GRAD based optim
        #pmin = gp2.posterior_min()
        #print pmin[1] + mean, pmin[2]
    #exit(1)
    # GRAD based optim

    x = x[:m]
    y = y[:m]
    yn = yn[:m]
    yd = yd[:m]
    ydn = ydn[:m]

    if grad:
        gp.train(x, y, yd, yn, ydn)
    else:
        gp.train(x, y, yn)

    for i in range(m, 25):
        dmin = gp.data_min()
        pmin = gp.posterior_min()
        #print 'data min:', dmin[0], dmin[1] + mean
        #print 'posterior min:', pmin[0], pmin[1] + mean
        print '{},{},{},{}'.format(i, ','.join(np.char.mod('%f', pmin[0])),
                                   pmin[1] + mean, pmin[2])

        x = ei.optimize()

        if grad:
            y, _ = gp2.evaluate_grad(x)
            y, yd = y[0], y[1:]
            yn, ydn = gp2.get_noise(x)
            yn = yn[0]
            ydn = np.array(ydn).flatten()
            z = np.random.randn() * np.sqrt(yn) + y
            zd = np.random.randn(ydn.shape[0]) * np.sqrt(ydn) + yd
            gp.train(x, z, zd, yn, ydn)
        else:
            y, _ = gp2.evaluate(x)
            y = y[0]
            yn = gp2.get_noise(x)[0]
            z = np.random.randn() * np.sqrt(yn) + y
            gp.train(x, z, yn)
Esempio n. 6
0
def optim():
    values = []
    evals = []
    gps = []
    #nj = 1500
    #ne = 35
    nj = 1000
    ne = 50
    for j in range(0, nj):
        evals.append([])
        gps.append([])
        print j
        fail = False

        #kernel = GP.SquaredExponentialKernel([2.], 1.)#, [1e-2, 1e-2])
        #gp = GP.GaussianProcess(kernel, orig_bounds, noise=[1e-4, [1e-5]], noiseGP=True)
        #gp.explore(2, objective)

        #kernel = GP.SquaredExponentialKernel([2., 0.1], 1.)#, [1e-2, 1e-2])
        #def constraint(x):
        #    return sum(x) - 30.6
        #gp = GP.GaussianProcess(kernel, orig_bounds, noise=[1e-4, [1e-5, 1e-3]], noiseGP=True, cons=constraint)
        #gp.explore(4, objective)
        #kernel = GP.SquaredExponentialKernel([1., 1.], 100)#, [1e-2, 1e-2])
        kernel = GP.SquaredExponentialKernel([1., 1.], 50.)#, [1e-2, 1e-2])
        gp = GP.GaussianProcess(kernel, orig_bounds, noise=[sig**2, [sig**2, sig**2]], noiseGP=True)
        #gp = GP.GaussianProcess(kernel, orig_bounds, noise=[sig**2, sig**2], noiseGP=False)
        gp.explore(4, objective_single)
        #gp.explore(100, objective_single)

        x1 = np.linspace(gp.bounds[0,0], gp.bounds[0,1], 40)
        xs = x1.reshape(-1,1)
        x2 = np.linspace(gp.bounds[1,0], gp.bounds[1,1], 40)
        x1, x2 = np.meshgrid(x1, x2)
        xs = np.vstack((x1.flatten(), x2.flatten())).T

        ei = GP.ExpectedImprovement(gp)

        # acquisition function improvements:
        # * noise: using EI with std for now
        # * batch
        # * finite budget
        #print

        for i in range(0, ne):
            print j, i
            x = ei.optimize()
            #try:
            #    x = ei.optimize()
            #except:
            #    fail = True
            #    break
            #x = ei.optimize()
            evals[-1].append(gp.data_min())
            gps[-1].append(gp.posterior_min())
            #print 'ei choice:', i, x
            #print 'data min', evals[-1][-1]
            #print 'posterior min', gps[-1][-1]

            #eix = ei.evaluate(xs)
            #plt.contourf(x1, x2, eix.reshape(x1.shape), 100)
            #plt.colorbar()
            #plt.savefig('debug/ei_{}.pdf'.format(i))
            #plt.clf()
            #plt.show()

            #mu, cov = gp.evaluate(xs)
            #plt.contourf(x1, x2, mu.reshape(x1.shape), 100)
            #plt.colorbar()
            #plt.savefig('debug/mu_{}.pdf'.format(i))
            #plt.clf()
            #plt.show()

            y, yd, yn, ydn = objective_single(x)
            #y, yd, yn, ydn = test_func(x)
            gp.train(x, y, yd, yn, ydn)
            #print
        if not fail:
            values.append(gp.y)
        else:
            evals = evals[:-1]
            gps = gps[:-1]
        with open('{}.pkl'.format(optime), 'w') as f:
            pkl.dump([evals, gps, values], f)
Esempio n. 7
0
# initialize gp model
import kernels
import gp
import numpy as np

kernel = kernels.two_plus_three_body
kernel_grad = kernels.two_plus_three_body_grad
hyps = np.array([1, 1, 0.1, 1, 1e-3])  # sig2, ls2, sig3, ls3, noise std
cutoffs = np.array([4.9, 4.9])  # (don't need to optimize for lab)
energy_force_kernel = kernels.two_plus_three_force_en

gp_model = gp.GaussianProcess(kernel,
                              kernel_grad,
                              hyps,
                              cutoffs,
                              energy_force_kernel=energy_force_kernel)

# import calculator and set potential
import os
from ase.calculators.espresso import Espresso
pot_file = os.environ.get('LAMMPS_POTENTIALS') + '/Al_zhou.eam.alloy'
pseudopotentials = {'Al': pot_file}
input_data = {'system': {'ecutwfc': 29, 'ecutrho': 143}, 'disk_io': 'low'}
calc = Espresso(pseudopotentials=pseudopotentials,
                tstress=True,
                tprnfor=True,
                kpts=(4, 4, 1),
                input_data=input_data)

#create structure as FCC surface with addsorbate
from ase.build import fcc111, add_adsorbate
Esempio n. 8
0
                    level=logging.INFO)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

x = torch.rand(100)
y = torch.rand(100)
train_X = torch.cat((x[:, None], y[:, None]), dim=-1)
dfdx = 2 * math.pi * torch.cos((2 * math.pi) * x) * torch.cos(
    (2 * math.pi) * y) + torch.randn(x.size()) * 0.2
dfdy = -2 * math.pi * torch.sin((2 * math.pi) * x) * torch.sin(
    (2 * math.pi) * y) + torch.randn(x.size()) * 0.2
train_Y = torch.cat((dfdx[:, None], dfdy[:, None]), dim=-1)

C = torch.tensor([5.0])
l = torch.rand(2)
kernel = gp.Deriv2Matern52(C, l)
gaussprocess = gp.GaussianProcess(gp.ZeroVectorMean(), kernel, 1e-2)
optimizer = torch.optim.Adam(gaussprocess.parameters(), lr=0.1)
training_iter = 50
gp.train((train_X, train_Y),
         gaussprocess,
         optimizer,
         training_iter,
         device,
         workdir="data/%s" % (current_time))

x1 = torch.ones(51) * 0.1
y1 = torch.linspace(0, 1, 51)
test_X = torch.cat((x1[:, None], y1[:, None]), dim=-1)
# test_xx, test_yy = torch.meshgrid(x1, y1)
# test_X = torch.cat((test_xx.flatten()[:, None], test_yy.flatten()[:, None]), dim=-1)
test_Y_mean, test_Y_var = gp.evaluate(test_X, (train_X, train_Y), gaussprocess,
Esempio n. 9
0
def optim():

    orig_bounds = np.array([[0., 1.], [0, 1], [0, 1], [0, 1]])
    L, sigma = 0.5, 0.001
    #L, sigma = 0.3, 0.003
    mean = 0.0092
    if grad:
        kernel = GP2.SquaredExponentialKernel([L, L, L, L], sigma)
        gp = GP2.GaussianProcess(kernel,
                                 orig_bounds,
                                 noise=[5e-9, [1e-9, 1e-7, 1e-8, 1e-7]],
                                 noiseGP=True,
                                 cons=constraint)
        ei = GP2.ExpectedImprovement(gp)
    else:
        kernel = GP.SquaredExponentialKernel([L, L, L, L], sigma)
        gp = GP.GaussianProcess(kernel,
                                orig_bounds,
                                noise=5e-9,
                                noiseGP=True,
                                cons=constraint)
        ei = GP.ExpectedImprovement(gp)

    kernel = GP2.SquaredExponentialKernel([L, L, L, L], sigma)
    gp2 = GP2.GaussianProcess(kernel,
                              orig_bounds,
                              noise=[5e-9, [1e-9, 1e-7, 1e-8, 1e-7]],
                              noiseGP=True,
                              cons=constraint)
    #ei = GP2.ExpectedImprovement(gp)

    assert os.path.exists(stateFile)
    state = load_state()
    #for index in range(0, len(state['state'])):
    #    if get_state(state, index) != 'DONE':
    #        x = state['points'][index]
    #        res = evaluate(x, state, index)
    #        state['evals'].append(res)
    #        update_state(state, 'DONE', index)
    #        exit(1)
    n = len(state['points'])
    if len(state['evals']) < len(state['points']):
        n -= 1
    m = 8
    x = state['points'][:n]
    y = [res[0] - mean for res in state['evals']][:n]
    yd = [res[2:6] for res in state['evals']][:n]
    yn = [res[1] for res in state['evals']][:n]
    ydn = [res[6:10] for res in state['evals']][:n]
    #print yd
    #print ydn
    #print yd
    #print ydn
    #exit(1)
    gp2.train(x[:m], y[:m], yd[:m], yn[:m], ydn[:m])
    mus = []
    varis = []
    for i in range(m, n):
        mu, vari = gp2.evaluate(x[i])
        mus.append(mu[0] + mean)
        varis.append(vari[0, 0]**0.5)
        gp2.train(x[i], y[i], yd[i], yn[i], ydn[i])
        # GRAD based optim
        #pmin = gp2.posterior_min()
        #print pmin[1] + mean, pmin[2]
    mus = np.array(mus)
    varis = np.array(varis)
    cm = plt.cm.get_cmap('RdYlBu')
    z = np.arange(0, len(mus))
    plt.locator_params(axis='x', numticks=4)
    plt.locator_params(axis='y', numticks=4)
    sc = plt.scatter(varis, mus, c=z, s=100)
    for i, x in enumerate(z):
        plt.annotate(x, (varis[i] + 1e-5, mus[i]), fontsize=16)
    plt.colorbar(sc, ticks=z + 1)
    d = 0.0001
    plt.xlim([varis.min() - d, varis.max() + d])
    plt.ylim([mus.min() - d, mus.max() + d])
    plt.xlabel('standard deviation of GP at evaluation')
    plt.ylabel('mean of GP at evaluation')
    plt.show()
    exit(1)
    # GRAD based optim

    x = x[:m]
    y = y[:m]
    yn = yn[:m]
    yd = yd[:m]
    ydn = ydn[:m]

    if grad:
        gp.train(x, y, yd, yn, ydn)
    else:
        gp.train(x, y, yn)

    for i in range(m, 25):
        dmin = gp.data_min()
        pmin = gp.posterior_min()
        #print 'data min:', dmin[0], dmin[1] + mean
        #print 'posterior min:', pmin[0], pmin[1] + mean
        print '{},{},{},{}'.format(i, ','.join(np.char.mod('%f', pmin[0])),
                                   pmin[1] + mean, pmin[2])

        x = ei.optimize()

        if grad:
            y, _ = gp2.evaluate_grad(x)
            y, yd = y[0], y[1:]
            yn, ydn = gp2.get_noise(x)
            yn = yn[0]
            ydn = np.array(ydn).flatten()
            z = np.random.randn() * np.sqrt(yn) + y
            zd = np.random.randn(ydn.shape[0]) * np.sqrt(ydn) + yd
            gp.train(x, z, zd, yn, ydn)
        else:
            y, _ = gp2.evaluate(x)
            y = y[0]
            yn = gp2.get_noise(x)[0]
            z = np.random.randn() * np.sqrt(yn) + y
            gp.train(x, z, yn)
Esempio n. 10
0
current_time = time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time()))
Path("data/%s" % (current_time)).mkdir(parents=True, exist_ok=True)
logging.basicConfig(filename="data/%s/exactGP.log" % (current_time),
                    level=logging.INFO)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

train_x = torch.linspace(0, 1, 100)
train_y = torch.sin(train_x *
                    (2 * math.pi)) + torch.randn(train_x.size()) * 0.2
train_x = train_x[:, None]

C = torch.tensor([1.0])
l = torch.rand(1)
kernel = gp.Matern52(C, l)
gaussprocess = gp.GaussianProcess(gp.ZeroScalarMean(), kernel, 2e-2)
optimizer = torch.optim.LBFGS(gaussprocess.parameters(), lr=0.1)
training_iter = 20

gp.train((train_x, train_y),
         gaussprocess,
         optimizer,
         training_iter,
         device,
         workdir="data/%s" % (current_time))
test_x = torch.linspace(0, 1, 51)
test_y = torch.sin(test_x * (2 * math.pi))
test_x = test_x[:, None]
mse = torch.nn.MSELoss()
(test_y_mean, test_y_var), _ = gp.evaluate((test_x, test_y),
                                           (train_x, train_y),
Esempio n. 11
0
File: optim.py Progetto: qiqi/adFVM
def optim():

    orig_bounds = np.array([[0., 1.], [0, 1], [0, 1], [0, 1]])
    L, sigma = 0.5, 0.001
    #L, sigma = 0.3, 0.003
    mean = 0.0092
    kernel = GP.SquaredExponentialKernel([L, L, L, L], sigma)
    gp = GP.GaussianProcess(kernel,
                            orig_bounds,
                            noise=[5e-9, [1e-9, 1e-7, 1e-8, 1e-7]],
                            noiseGP=True,
                            cons=constraint)
    ei = GP.ExpectedImprovement(gp)

    assert os.path.exists(stateFile)
    state = load_state()
    for index in range(0, len(state['state'])):
        if get_state(state, index) != 'DONE':
            x = state['points'][index]
            res = evaluate(x, state, index)
            state['evals'].append(res)
            update_state(state, 'DONE', index)
            exit(1)
    print state
    x = state['points']
    y = [res[0] - mean for res in state['evals']]
    yd = [res[2:6] for res in state['evals']]
    yn = [res[1] for res in state['evals']]
    ydn = [res[6:10] for res in state['evals']]
    #print yd
    #print ydn
    #print yd
    #print ydn
    #exit(1)

    gp.train(x, y, yd, yn, ydn)

    for i in range(len(state['points']), 100):
        dmin = gp.data_min()
        pmin = gp.posterior_min()
        print 'data min:', dmin[0], dmin[1] + mean
        print 'posterior min:', pmin[0], pmin[1] + mean

        x = ei.optimize()
        print 'ei choice:', i, x
        #exit(1)
        state['points'].append(x)
        state['state'].append('BEGIN')
        save_state(state)
        res = evaluate(x, state)
        print 'result:', res
        state['evals'].append(res)
        update_state(state, 'DONE')
        exit(1)
        resm = [x for x in res]
        resm[0] = res[0] - mean

        gp.train(x, *resm)

        #eix = ei.evaluate(xs)
        #plt.plot(x1, eix.reshape(x1.shape))
        #plt.contourf(x1, x2, eix.reshape(x1.shape), 100)
        #plt.colorbar()
        ##plt.savefig('ei_{}.pdf'.format(i))
        ##plt.clf()
        #plt.show()

        #mu, cov = gp.evaluate(xs)
        ##mu, cov = gp.noiseGP[1].exponential(xs)
        ##plt.plot(x1, mu*gp.noise[0])
        ##plt.show()
        ##std = np.diag(cov)**0.5
        #plt.contourf(x1, x2, mu.reshape(x1.shape), 100)
        #plt.colorbar()
        ###plt.savefig('mu_{}.pdf'.format(i))
        ###plt.clf()
        #plt.show()
        #plt.contourf(x1, x2, std.reshape(x1.shape), 1000)
        #plt.colorbar()
        #plt.savefig('cov_{}.pdf'.format(i))
        #plt.clf()
        print