Esempio n. 1
0
class LogEITestCase1(unittest.TestCase):
    def setUp(self):
        self.x = np.array([[0.62971589], [0.63273273], [0.17867868],
                           [0.17447447], [1.88558559]])
        self.y = np.array([[-3.69925653], [-3.66221988], [-3.65560591],
                           [-3.58907791], [-8.06925984]])
        self.kernel = GPy.kern.RBF(input_dim=1,
                                   variance=30.1646253727,
                                   lengthscale=0.435343653946)
        self.noise = 1e-20
        self.model = GPyModel(self.kernel,
                              noise_variance=self.noise,
                              optimize=False)
        self.model.train(self.x, self.y)

    def test(self):
        X_upper = np.array([2.1])
        X_lower = np.array([-2.1])

        x_test = np.array([[1.7], [2.0]])
        log_ei_estimator = LogEI(self.model,
                                 X_lower,
                                 X_upper,
                                 compute_incumbent=compute_incumbent)

        assert log_ei_estimator(x_test[0, np.newaxis])[0] > -np.Infinity
        assert log_ei_estimator(x_test[1, np.newaxis])[0] > -np.Infinity

        assert (log_ei_estimator(self.x[-1, np.newaxis])[0]) == -np.Infinity
Esempio n. 2
0
class PITestCase1(unittest.TestCase):
    def setUp(self):
        self.x = np.array([[0.62971589], [0.63273273], [0.17867868],
                           [0.17447447], [1.88558559]])
        self.y = np.array([[-3.69925653], [-3.66221988], [-3.65560591],
                           [-3.58907791], [-8.06925984]])
        self.kernel = GPy.kern.RBF(input_dim=1,
                                   variance=30.1646253727,
                                   lengthscale=0.435343653946)
        self.noise = 1e-20
        self.model = GPyModel(self.kernel,
                              noise_variance=self.noise,
                              optimize=False)
        self.model.train(self.x, self.y)

    def test(self):
        X_upper = np.array([2.1])
        X_lower = np.array([-2.1])

        x_test = np.array([[1.7], [2.0]])
        pi_estimator = PI(self.model, X_lower, X_upper)

        assert pi_estimator(x_test[0, np.newaxis],
                            incumbent=np.array([1.88558559]))[0] > 0.0
        assert pi_estimator(x_test[1, np.newaxis],
                            incumbent=np.array([1.88558559]))[0] > 0.0

        self.assertAlmostEqual(pi_estimator(self.x[-1, np.newaxis],
                                            incumbent=np.array([1.88558559
                                                                ]))[0],
                               0.0,
                               delta=10E-5)
Esempio n. 3
0
class EITestCase1(unittest.TestCase):

    def setUp(self):
        self.x = np.array([[0.62971589], [0.63273273], [0.17867868], [0.17447447], [1.88558559]])
        self.y = np.array([[-3.69925653], [-3.66221988], [-3.65560591], [-3.58907791], [-8.06925984]])
        self.kernel = GPy.kern.RBF(input_dim=1, variance=30.1646253727, lengthscale=0.435343653946)
        self.noise = 1e-20
        self.model = GPyModel(self.kernel, noise_variance=self.noise, optimize=False)
        self.model.train(self.x, self.y)

    def test(self):
        X_upper = np.array([2.1])
        X_lower = np.array([-2.1])

        best = np.argmin(self.y)
        incumbent = self.x[best]

        ei_par = EI(self.model, X_upper=X_upper, X_lower=X_lower, compute_incumbent=compute_incumbent, par=0.0)

        out0 = ei_par(incumbent[:, np.newaxis], derivative=True)
        value0 = out0[0]
        derivative0 = out0[1]
        assert(value0[0] <= 1e-5)

        x_value = incumbent + np.random.random_integers(1, 10) / 1000.
        out1 = ei_par(x_value[:, np.newaxis], derivative=True)
        value1 = out1[0]
        derivative1 = out1[1]

        assert(np.all(value0 < value1))
        assert(np.all(np.abs(derivative0) < np.abs(derivative1)))
Esempio n. 4
0
def run():
    # Defining the bounds and dimensions of the input space
    X_lower = np.array([0])
    X_upper = np.array([6])
    dims = 1

    # Set the method that we will use to optimize the acquisition function
    maximizer = stochastic_local_search

    # Defining the method to model the objective function
    kernel = GPy.kern.Matern52(input_dim=dims)
    model = GPyModel(kernel, optimize=True, noise_variance=1e-4, num_restarts=10)

    # The acquisition function that we optimize in order to pick a new x
    acquisition_func = EI(
        model, X_upper=X_upper, X_lower=X_lower, compute_incumbent=compute_incumbent, par=0.1
    )  # par is the minimum improvement that a point has to obtain

    # Draw one random point and evaluate it to initialize BO
    X = np.array([np.random.uniform(X_lower, X_upper, dims)])
    Y = objective_function(X)

    # Fit the model on the data we observed so far
    model.train(X, Y)
    # Update the acquisition function model with the retrained model
    acquisition_func.update(model)

    # Optimize the acquisition function to obtain a new point
    new_x = maximizer(acquisition_func, X_lower, X_upper)

    # Evaluate the point and add the new observation to our set of previous seen points
    new_y = objective_function(np.array(new_x))
    X = np.append(X, new_x, axis=0)
    Y = np.append(Y, new_y, axis=0)

    # Visualize the objective function, model and the acquisition function
    fig = plt.figure()
    # Sub plot for the model and the objective function
    ax1 = fig.add_subplot(2, 1, 1)
    # Sub plot for the acquisition function
    ax2 = fig.add_subplot(2, 1, 2)
    resolution = 0.1
    # Call plot_model function
    ax1 = plotting.plot_model(model, X_lower, X_upper, ax1, resolution, "b", "blue", "Prosterior Mean", 3, True)
    # Call plot_objective_function
    ax1 = plotting.plot_objective_function(
        objective_function, X_lower, X_upper, X, Y, ax1, resolution, "black", "ObjectiveFunction", True
    )
    ax1.set_title("Model + Objective Function")
    # Call plot_acquisition_function
    ax2 = plotting.plot_acquisition_function(
        acquisition_func, X_lower, X_upper, X, ax2, resolution, "AcquisitionFunction", True
    )
    plt.savefig("test2.png")
    os.system("eog test2.png&")
Esempio n. 5
0
class PITestCase1(unittest.TestCase):
    def setUp(self):
        self.x = np.array([[0.62971589], [0.63273273], [0.17867868], [0.17447447], [1.88558559]])
        self.y = np.array([[-3.69925653], [-3.66221988], [-3.65560591], [-3.58907791], [-8.06925984]])
        self.kernel = GPy.kern.RBF(input_dim=1, variance=30.1646253727, lengthscale=0.435343653946)
        self.noise = 1e-20
        self.model = GPyModel(self.kernel, noise_variance=self.noise, optimize=False)
        self.model.train(self.x, self.y)

    def test(self):
        X_upper = np.array([2.1])
        X_lower = np.array([-2.1])

        x_test = np.array([[1.7], [2.0]])
        pi_estimator = PI(self.model, X_lower, X_upper)

        assert pi_estimator(x_test[0, np.newaxis], incumbent=np.array([1.88558559]))[0] > 0.0
        assert pi_estimator(x_test[1, np.newaxis], incumbent=np.array([1.88558559]))[0] > 0.0

        self.assertAlmostEqual(pi_estimator(self.x[-1, np.newaxis], incumbent=np.array([1.88558559]))[0], 0.0, delta=10E-5)
Esempio n. 6
0
class LogEITestCase1(unittest.TestCase):
    def setUp(self):
        self.x = np.array([[0.62971589], [0.63273273], [0.17867868], [0.17447447], [1.88558559]])
        self.y = np.array([[-3.69925653], [-3.66221988], [-3.65560591], [-3.58907791], [-8.06925984]])
        self.kernel = GPy.kern.RBF(input_dim=1, variance=30.1646253727, lengthscale=0.435343653946)
        self.noise = 1e-20
        self.model = GPyModel(self.kernel, noise_variance=self.noise, optimize=False)
        self.model.train(self.x, self.y)

    def test(self):
        X_upper = np.array([2.1])
        X_lower = np.array([-2.1])

        x_test = np.array([[1.7], [2.0]])
        log_ei_estimator = LogEI(self.model, X_lower, X_upper, compute_incumbent=compute_incumbent)

        assert log_ei_estimator(x_test[0, np.newaxis])[0] > -np.Infinity
        assert log_ei_estimator(x_test[1, np.newaxis])[0] > -np.Infinity

        assert (log_ei_estimator(self.x[-1, np.newaxis])[0]) == -np.Infinity
Esempio n. 7
0
class EITestCase1(unittest.TestCase):
    def setUp(self):
        self.x = np.array([[0.62971589], [0.63273273], [0.17867868],
                           [0.17447447], [1.88558559]])
        self.y = np.array([[-3.69925653], [-3.66221988], [-3.65560591],
                           [-3.58907791], [-8.06925984]])
        self.kernel = GPy.kern.RBF(input_dim=1,
                                   variance=30.1646253727,
                                   lengthscale=0.435343653946)
        self.noise = 1e-20
        self.model = GPyModel(self.kernel,
                              noise_variance=self.noise,
                              optimize=False)
        self.model.train(self.x, self.y)

    def test(self):
        X_upper = np.array([2.1])
        X_lower = np.array([-2.1])

        best = np.argmin(self.y)
        incumbent = self.x[best]

        ei_par = EI(self.model,
                    X_upper=X_upper,
                    X_lower=X_lower,
                    compute_incumbent=compute_incumbent,
                    par=0.0)

        out0 = ei_par(incumbent[:, np.newaxis], derivative=True)
        value0 = out0[0]
        derivative0 = out0[1]
        assert (value0[0] <= 1e-5)

        x_value = incumbent + np.random.random_integers(1, 10) / 1000.
        out1 = ei_par(x_value[:, np.newaxis], derivative=True)
        value1 = out1[0]
        derivative1 = out1[1]

        assert (np.all(value0 < value1))
        assert (np.all(np.abs(derivative0) < np.abs(derivative1)))
Esempio n. 8
0
yv = np.array([[
    mixexpert(X,
              Y,
              xv[0][0],
              xv[0][1],
              2,
              2,
              cvfolds=cvfolds,
              slowness=slowness,
              ploterr=True)
]])
print yv

for i in xrange(500):
    # Fit the model on the data we observed so far
    model.train(xv, yv)

    if acqf == 'ucb':
        deltab = 0.5
        acquisition_func.par = 1 * np.sqrt(2. * np.log(
            (i + 1)**(2) * 2 * np.pi**2 / (3 * deltab)) + 2. * dims * np.log(
                (i + 1)**(2) * dims * np.log(4 * dims / deltab)**0.5))
    print 'par', acquisition_func.par

    # Update the acquisition function model with the retrained model
    acquisition_func.update(model)

    # Optimize the acquisition function to obtain a new point
    new_x = maximizer(acquisition_func, X_lower, X_upper)
    print new_x
    # Evaluate the point and add the new observation to our set of previous seen points
Esempio n. 9
0
# Defining the method to model the objective function
kernel = GPy.kern.Matern52(input_dim=dims)
model = GPyModel(kernel, optimize=True, noise_variance=1e-4, num_restarts=10)

# The acquisition function that we optimize in order to pick a new x
acquisition_func = EI(model, X_upper=X_upper, X_lower=X_lower, compute_incumbent=compute_incumbent, par=0.1)  # par is the minimum improvement that a point has to obtain

# Draw one random point and evaluate it to initialize BO
X = np.array([np.random.uniform(X_lower, X_upper, dims)])
Y = objective_function(X)

# This is the main Bayesian optimization loop
for i in xrange(10):
    # Fit the model on the data we observed so far
    model.train(X, Y)

    # Update the acquisition function model with the retrained model
    acquisition_func.update(model)

    # Optimize the acquisition function to obtain a new point 
    new_x = maximizer(acquisition_func, X_lower, X_upper)

    # Evaluate the point and add the new observation to our set of previous seen points
    new_y = objective_function(np.array(new_x))
    X = np.append(X, new_x, axis=0)
    Y = np.append(Y, new_y, axis=0)

    # Visualize the objective function, model and the acquisition function
    fig = plt.figure()
    ax1 = fig.add_subplot(1, 1, 1)
Esempio n. 10
0
# The acquisition function that we optimize in order to pick a new x
acquisition_func = EI(
    model,
    X_upper=X_upper,
    X_lower=X_lower,
    compute_incumbent=compute_incumbent,
    par=0.1)  # par is the minimum improvement that a point has to obtain

# Draw one random point and evaluate it to initialize BO
X = np.array([np.random.uniform(X_lower, X_upper, dims)])
Y = objective_function(X)

# This is the main Bayesian optimization loop
for i in xrange(10):
    # Fit the model on the data we observed so far
    model.train(X, Y)

    # Update the acquisition function model with the retrained model
    acquisition_func.update(model)

    # Optimize the acquisition function to obtain a new point
    new_x = maximizer(acquisition_func, X_lower, X_upper)

    # Evaluate the point and add the new observation to our set of previous seen points
    new_y = objective_function(np.array(new_x))
    X = np.append(X, new_x, axis=0)
    Y = np.append(Y, new_y, axis=0)

    # Visualize the objective function, model and the acquisition function
    fig = plt.figure()
    ax1 = fig.add_subplot(1, 1, 1)