Exemple #1
0
class LCBTestCase(unittest.TestCase):

    def setUp(self):
        self.X_lower = np.array([0])
        self.X_upper = np.array([1])
        self.X = init_random_uniform(self.X_lower, self.X_upper, 4)
        self.Y = np.sin(self.X)
        self.kernel = GPy.kern.RBF(input_dim=1)

        self.model = GPyModel(self.kernel)
        self.model.train(self.X, self.Y)
        self.lcb = LCB(self.model,
                    X_upper=self.X_upper,
                    X_lower=self.X_lower)

    def test_general_interface(self):

        X_test = init_random_uniform(self.X_lower, self.X_upper, 10)

        a, dadx = self.lcb(X_test, True)

        assert len(a.shape) == 2
        assert a.shape[0] == X_test.shape[0]
        assert a.shape[1] == 1
        assert len(dadx.shape) == 2
        assert dadx.shape[0] == X_test.shape[0]
        assert dadx.shape[1] == X_test.shape[1]

    def test_check_grads(self):
        x_ = np.array([[np.random.rand()]])

        assert check_grad(self.lcb, lambda x: -self.lcb(x, True)[1], x_) < 1e-5
Exemple #2
0
class TestMaximizers2D(unittest.TestCase):
    def setUp(self):

        self.branin = Branin()

        n_points = 5
        self.X = np.random.rand(n_points, self.branin.n_dims)

        self.X[:, 0] = self.X[:, 0].dot(self.branin.X_upper[0] - self.branin.X_lower[0]) + self.branin.X_lower[0]
        self.X[:, 1] = self.X[:, 1].dot(self.branin.X_upper[1] - self.branin.X_lower[1]) + self.branin.X_lower[1]

        self.Y = self.branin.evaluate(self.X)

        kernel = GPy.kern.Matern52(input_dim=self.branin.n_dims)
        self.model = GPyModel(kernel, optimize=True, noise_variance=1e-4, num_restarts=10)

        self.model.train(self.X, self.Y)
        self.acquisition_func = EI(
            self.model,
            X_upper=self.branin.X_upper,
            X_lower=self.branin.X_lower,
            compute_incumbent=compute_incumbent,
            par=0.1,
        )

    def test_direct(self):
        maximizer = Direct(self.acquisition_func, self.branin.X_lower, self.branin.X_upper)
        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.branin.n_dims
        assert np.all(x[:, 0] >= self.branin.X_lower[0])
        assert np.all(x[:, 1] >= self.branin.X_lower[1])
        assert np.all(x[:, 0] <= self.branin.X_upper[0])
        assert np.all(x[:, 1] <= self.branin.X_upper[1])
        assert np.all(x < self.branin.X_upper)

    def test_stochastic_local_search(self):
        maximizer = StochasticLocalSearch(self.acquisition_func, self.branin.X_lower, self.branin.X_upper)
        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.branin.n_dims
        assert np.all(x[:, 0] >= self.branin.X_lower[0])
        assert np.all(x[:, 1] >= self.branin.X_lower[1])
        assert np.all(x[:, 0] <= self.branin.X_upper[0])
        assert np.all(x[:, 1] <= self.branin.X_upper[1])
        assert np.all(x < self.branin.X_upper)

    def test_cmaes(self):
        maximizer = CMAES(self.acquisition_func, self.branin.X_lower, self.branin.X_upper)
        x = maximizer.maximize(verbose=False)

        assert x.shape[0] == 1
        assert x.shape[1] == self.branin.n_dims
        assert np.all(x[:, 0] >= self.branin.X_lower[0])
        assert np.all(x[:, 1] >= self.branin.X_lower[1])
        assert np.all(x[:, 0] <= self.branin.X_upper[0])
        assert np.all(x[:, 1] <= self.branin.X_upper[1])
        assert np.all(x < self.branin.X_upper)
Exemple #3
0
    def train(self, X, Y, *args):
        self.X = X
        self.Y = Y
        if X.size == 0 or Y.size == 0:
            return

        m = GPy.models.GPRegression(self.X, self.Y, self.kernel)
        # Add exponential prior for the noise
        m.likelihood.unconstrain()
        m.likelihood.variance.set_prior(GPy.priors.Exponential(1))
        m.likelihood.variance.constrain_positive()
        
        
        if self.hmc is None:
            self.hmc = GPy.inference.mcmc.hmc.HMC(m, stepsize=5e-2)
            # Burnin
            self.hmc.sample(num_samples=self.burnin)
        else:
            self.hmc.model = m
        # Start the mcmc chain
        self.mcmc_chain = self.hmc.sample(num_samples=(self.chain_length))
        self.samples = self.mcmc_chain[list(range(0, self.chain_length, self.chain_length // self.n_hypers))]

        self.models = []
        for sample in self.samples:
            # Instantiate a model for each hyperparam configuration
            kernel = deepcopy(self.kernel)

            for i in range(len(sample) - 1):
                kernel.param_array[i] = sample[i]
            model = GPyModel(kernel, noise_variance=sample[-1])
            model.train(self.X, self.Y, optimize=False)
            self.models.append(model)
class Test(unittest.TestCase):
    def func(self, x):
        return x ** x + 0.5

    def setUp(self):
        X = np.array([[0.0, 0.25, 0.75, 1.0]])
        X = X.T
        y = self.func(X)
        k = GPy.kern.RBF(input_dim=1)
        self.m = GPyModel(k, noise_variance=1e-3)
        self.m.train(X, y)
        self.X_lower = np.array([0])
        self.X_upper = np.array([1.0])

    def tearDown(self):
        pass

    def test_optimize_posterior_mean(self):
        inc, inc_val = optimize_posterior_mean(self.m, self.X_lower, self.X_upper, with_gradients=True)

        assert len(inc.shape) == 1
        assert np.all(inc >= self.X_lower)
        assert np.all(inc <= self.X_upper)
        assert np.all(inc_val <= compute_incumbent(self.m)[1])

    def test_optimize_posterior_mean_and_std(self):
        inc, inc_val = optimize_posterior_mean_and_std(self.m, self.X_lower, self.X_upper, with_gradients=True)

        assert len(inc.shape) == 1
        assert np.all(inc >= self.X_lower)
        assert np.all(inc <= self.X_upper)
        assert np.all(inc_val <= compute_incumbent(self.m)[1])
Exemple #5
0
class LCBTestCase(unittest.TestCase):
    def setUp(self):
        self.X_lower = np.array([0])
        self.X_upper = np.array([1])
        self.X = init_random_uniform(self.X_lower, self.X_upper, 4)
        self.Y = np.sin(self.X)
        self.kernel = GPy.kern.RBF(input_dim=1)

        self.model = GPyModel(self.kernel)
        self.model.train(self.X, self.Y)
        self.lcb = LCB(self.model, X_upper=self.X_upper, X_lower=self.X_lower)

    def test_general_interface(self):

        X_test = init_random_uniform(self.X_lower, self.X_upper, 10)

        a, dadx = self.lcb(X_test, True)

        assert len(a.shape) == 2
        assert a.shape[0] == X_test.shape[0]
        assert a.shape[1] == 1
        assert len(dadx.shape) == 2
        assert dadx.shape[0] == X_test.shape[0]
        assert dadx.shape[1] == X_test.shape[1]

    def test_check_grads(self):
        x_ = np.array([[np.random.rand()]])

        assert check_grad(self.lcb, lambda x: -self.lcb(x, True)[1], x_) < 1e-5
Exemple #6
0
class LogEITestCase(unittest.TestCase):

    def setUp(self):
        self.X_lower = np.array([0])
        self.X_upper = np.array([1])
        self.X = np.random.rand(10)[:, np.newaxis]
        self.Y = np.sin(self.X)
        self.kernel = GPy.kern.RBF(input_dim=1)

        self.model = GPyModel(self.kernel)
        self.model.train(self.X, self.Y)
        self.log_ei = LogEI(self.model,
                    X_upper=self.X_upper,
                    X_lower=self.X_lower)

    def test_general_interface(self):

        X_test = init_random_uniform(self.X_lower, self.X_upper, 10)

#        a, dadx = self.log_ei(X_test, True)
        a = self.log_ei(X_test)

        assert len(a.shape) == 2
        assert a.shape[0] == X_test.shape[0]
        assert a.shape[1] == 1
Exemple #7
0
class PITestCase(unittest.TestCase):

    def setUp(self):
        self.X_lower = np.array([0])
        self.X_upper = np.array([1])
        self.X = init_random_uniform(self.X_lower, self.X_upper, 10)
        self.Y = np.sin(self.X)
        self.kernel = GPy.kern.RBF(input_dim=1)

        self.model = GPyModel(self.kernel)
        self.model.train(self.X, self.Y)
        self.pi = PI(self.model,
                    X_upper=self.X_upper,
                    X_lower=self.X_lower)

    def test_general_interface(self):

        X_test = init_random_uniform(self.X_lower, self.X_upper, 10)
        # Just check if PI is always greater equal than 0

        a, dadx = self.pi(X_test, True)

        assert len(a.shape) == 2
        assert a.shape[0] == X_test.shape[0]
        assert a.shape[1] == 1
        assert np.all(a) >= 0.0
        assert len(dadx.shape) == 2
        assert dadx.shape[0] == X_test.shape[0]
        assert dadx.shape[1] == X_test.shape[1]

    def test_check_grads(self):
        x_ = np.array([[np.random.rand()]])

        assert check_grad(self.pi, lambda x: -self.pi(x, True)[1], x_) < 1e-5
Exemple #8
0
class EITestCase(unittest.TestCase):
    def setUp(self):
        self.X_lower = np.array([0])
        self.X_upper = np.array([1])
        self.X = init_random_uniform(self.X_lower, self.X_upper, 10)
        self.Y = np.sin(self.X)
        self.kernel = GPy.kern.RBF(input_dim=1)

        self.model = GPyModel(self.kernel)
        self.model.train(self.X, self.Y)
        self.ei = EI(self.model, X_upper=self.X_upper, X_lower=self.X_lower)

    def test_general_interface(self):

        X_test = init_random_uniform(self.X_lower, self.X_upper, 10)
        # Just check if EI is always greater equal than 0

        a, dadx = self.ei(X_test, True)

        assert len(a.shape) == 2
        assert a.shape[0] == X_test.shape[0]
        assert a.shape[1] == 1
        assert np.all(a) >= 0.0
        assert len(dadx.shape) == 2
        assert dadx.shape[0] == X_test.shape[0]
        assert dadx.shape[1] == X_test.shape[1]

    def test_check_grads(self):
        x_ = np.array([[np.random.rand()]])

        assert check_grad(self.ei, lambda x: -self.ei(x, True)[1], x_) < 1e-5
class TestMaximizers1D(unittest.TestCase):
    def setUp(self):

        self.X_lower = np.array([0])
        self.X_upper = np.array([6])
        self.dims = 1

        self.X = np.array([[1], [3.8], [0.9], [5.2], [3.4]])

        self.X[:, 0] = self.X[:, 0].dot(self.X_upper[0] -
                                        self.X_lower[0]) + self.X_lower[0]

        self.Y = objective_function(self.X)

        kernel = GPy.kern.Matern52(input_dim=self.dims)
        self.model = GPyModel(kernel,
                              optimize=True,
                              noise_variance=1e-4,
                              num_restarts=10)

        self.model.train(self.X, self.Y)
        self.acquisition_func = EI(self.model,
                                   X_upper=self.X_upper,
                                   X_lower=self.X_lower,
                                   par=0.1)

    def test_direct(self):
        maximizer = Direct(self.acquisition_func, self.X_lower, self.X_upper)
        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.dims
        assert np.all(x[:, 0] >= self.X_lower[0])
        assert np.all(x[:, 0] <= self.X_upper[0])
        assert np.all(x < self.X_upper)

    def test_stochastic_local_search(self):
        maximizer = StochasticLocalSearch(self.acquisition_func, self.X_lower,
                                          self.X_upper)
        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.dims
        assert np.all(x[:, 0] >= self.X_lower[0])
        assert np.all(x[:, 0] <= self.X_upper[0])
        assert np.all(x < self.X_upper)

    def test_grid_search(self):
        maximizer = GridSearch(self.acquisition_func, self.X_lower,
                               self.X_upper)
        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.dims
        assert np.all(x[:, 0] >= self.X_lower[0])
        assert np.all(x[:, 0] <= self.X_upper[0])
        assert np.all(x < self.X_upper)
Exemple #10
0
    def setUp(self):
        self.X_lower = np.array([0])
        self.X_upper = np.array([1])
        self.X = init_random_uniform(self.X_lower, self.X_upper, 10)
        self.Y = np.sin(self.X)
        self.kernel = GPy.kern.RBF(input_dim=1)

        self.model = GPyModel(self.kernel)
        self.model.train(self.X, self.Y)
        self.ei = EI(self.model, X_upper=self.X_upper, X_lower=self.X_lower)
Exemple #11
0
class TestMaximizers1D(unittest.TestCase):

    def setUp(self):

        self.X_lower = np.array([0])
        self.X_upper = np.array([6])
        self.dims = 1

        self.X = np.array([[1], [3.8], [0.9], [5.2], [3.4]])

        self.X[:, 0] = self.X[:, 0].dot(self.X_upper[0] - self.X_lower[0]) + self.X_lower[0]

        self.Y = objective_function(self.X)

        kernel = GPy.kern.Matern52(input_dim=self.dims)
        self.model = GPyModel(kernel, optimize=True,
                              noise_variance=1e-4, num_restarts=10)

        self.model.train(self.X, self.Y)
        self.acquisition_func = EI(self.model, X_upper=self.X_upper,
                                   X_lower=self.X_lower,
                                   par=0.1)

    def test_direct(self):
        maximizer = Direct(self.acquisition_func, self.X_lower, self.X_upper)
        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.dims
        assert np.all(x[:, 0] >= self.X_lower[0])
        assert np.all(x[:, 0] <= self.X_upper[0])
        assert np.all(x < self.X_upper)

    def test_stochastic_local_search(self):
        maximizer = StochasticLocalSearch(self.acquisition_func,
                                          self.X_lower, self.X_upper)
        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.dims
        assert np.all(x[:, 0] >= self.X_lower[0])
        assert np.all(x[:, 0] <= self.X_upper[0])
        assert np.all(x < self.X_upper)

    def test_grid_search(self):
        maximizer = GridSearch(self.acquisition_func,
                               self.X_lower,
                               self.X_upper)
        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.dims
        assert np.all(x[:, 0] >= self.X_lower[0])
        assert np.all(x[:, 0] <= self.X_upper[0])
        assert np.all(x < self.X_upper)
Exemple #12
0
    def setUp(self):
        self.X_lower = np.array([0])
        self.X_upper = np.array([1])
        self.X = np.random.rand(10)[:, np.newaxis]
        self.Y = np.sin(self.X)
        self.kernel = GPy.kern.RBF(input_dim=1)

        self.model = GPyModel(self.kernel)
        self.model.train(self.X, self.Y)
        self.log_ei = LogEI(self.model,
                            X_upper=self.X_upper,
                            X_lower=self.X_lower)
Exemple #13
0
 def setUp(self):
     self.x = np.array([[0.62971589], [0.63273273], [0.17867868], [0.17447447], [1.88558559]])
     self.y = np.array([[-3.69925653], [-3.66221988], [-3.65560591], [-3.58907791], [-8.06925984]])
     self.kernel = GPy.kern.RBF(input_dim=1, variance=30.1646253727, lengthscale=0.435343653946)
     self.noise = 1e-20
     self.model = GPyModel(self.kernel, noise_variance=self.noise, optimize=False)
     self.model.train(self.x, self.y)
 def setUp(self):
     X = np.array([[0.0, 0.25, 0.75, 1.0]])
     X = X.T
     y = self.func(X)
     k = GPy.kern.RBF(input_dim=1)
     self.m = GPyModel(k, noise_variance=1e-3)
     self.m.train(X, y)
     self.X_lower = np.array([0])
     self.X_upper = np.array([1.0])
Exemple #15
0
    def setUp(self):

        self.X_lower = np.array([0])
        self.X_upper = np.array([6])
        self.dims = 1

        self.X = np.array([[1], [3.8], [0.9], [5.2], [3.4]])

        self.X[:, 0] = self.X[:, 0].dot(self.X_upper[0] - self.X_lower[0]) + self.X_lower[0]

        self.Y = objective_function(self.X)

        kernel = GPy.kern.Matern52(input_dim=self.dims)
        self.model = GPyModel(kernel, optimize=True,
                              noise_variance=1e-4, num_restarts=10)

        self.model.train(self.X, self.Y)
        self.acquisition_func = EI(self.model, X_upper=self.X_upper,
                                   X_lower=self.X_lower,
                                   par=0.1)
Exemple #16
0
class PITestCase1(unittest.TestCase):
    def setUp(self):
        self.x = np.array([[0.62971589], [0.63273273], [0.17867868], [0.17447447], [1.88558559]])
        self.y = np.array([[-3.69925653], [-3.66221988], [-3.65560591], [-3.58907791], [-8.06925984]])
        self.kernel = GPy.kern.RBF(input_dim=1, variance=30.1646253727, lengthscale=0.435343653946)
        self.noise = 1e-20
        self.model = GPyModel(self.kernel, noise_variance=self.noise, optimize=False)
        self.model.train(self.x, self.y)

    def test(self):
        X_upper = np.array([2.1])
        X_lower = np.array([-2.1])

        x_test = np.array([[1.7], [2.0]])
        pi_estimator = PI(self.model, X_lower, X_upper, compute_incumbent)

        assert pi_estimator(x_test[0, np.newaxis])[0] > 0.0
        assert pi_estimator(x_test[1, np.newaxis])[0] > 0.0

        self.assertAlmostEqual(pi_estimator(self.x[-1, np.newaxis])[0], 0.0, delta=10E-5)
Exemple #17
0
    def setUp(self):
        self.X_lower = np.array([0])
        self.X_upper = np.array([1])
        self.X = init_random_uniform(self.X_lower, self.X_upper, 10)
        self.Y = np.sin(self.X)
        self.kernel = GPy.kern.RBF(input_dim=1)

        self.model = GPyModel(self.kernel)
        self.model.train(self.X, self.Y)
        self.pi = PI(self.model,
                    X_upper=self.X_upper,
                    X_lower=self.X_lower)
Exemple #18
0
    def setUp(self):
        self.X_lower = np.array([0])
        self.X_upper = np.array([1])
        self.X = np.random.rand(10)[:, np.newaxis]
        self.Y = np.sin(self.X)
        self.kernel = GPy.kern.RBF(input_dim=1)

        self.model = GPyModel(self.kernel)
        self.model.train(self.X, self.Y)
        self.log_ei = LogEI(self.model,
                    X_upper=self.X_upper,
                    X_lower=self.X_lower)
    def setUp(self):

        self.branin = Branin()

        n_points = 5
        rng = np.random.RandomState(42)
        self.X = init_random_uniform(self.branin.X_lower,
                                     self.branin.X_upper,
                                     n_points,
                                     rng=rng)

        self.Y = self.branin.evaluate(self.X)

        kernel = GPy.kern.Matern52(input_dim=self.branin.n_dims)
        self.model = GPyModel(kernel,
                              optimize=True,
                              noise_variance=1e-4,
                              num_restarts=10)

        self.model.train(self.X, self.Y)
        self.acquisition_func = EI(self.model,
                                   X_upper=self.branin.X_upper,
                                   X_lower=self.branin.X_lower,
                                   par=0.1)
Exemple #20
0
class LogEITestCase(unittest.TestCase):
    def setUp(self):
        self.X_lower = np.array([0])
        self.X_upper = np.array([1])
        self.X = np.random.rand(10)[:, np.newaxis]
        self.Y = np.sin(self.X)
        self.kernel = GPy.kern.RBF(input_dim=1)

        self.model = GPyModel(self.kernel)
        self.model.train(self.X, self.Y)
        self.log_ei = LogEI(self.model,
                            X_upper=self.X_upper,
                            X_lower=self.X_lower)

    def test_general_interface(self):

        X_test = init_random_uniform(self.X_lower, self.X_upper, 10)

        #        a, dadx = self.log_ei(X_test, True)
        a = self.log_ei(X_test)

        assert len(a.shape) == 2
        assert a.shape[0] == X_test.shape[0]
        assert a.shape[1] == 1
Exemple #21
0
    def setUp(self):

        self.X_lower = np.array([0])
        self.X_upper = np.array([6])
        self.dims = 1

        n_points = 5
        self.X = np.random.rand(n_points, self.dims)

        self.X[:, 0] = self.X[:, 0].dot(self.X_upper[0] - self.X_lower[0]) + self.X_lower[0]

        self.Y = objective_function(self.X)

        kernel = GPy.kern.Matern52(input_dim=self.dims)
        self.model = GPyModel(kernel, optimize=True, noise_variance=1e-4, num_restarts=10)

        self.model.train(self.X, self.Y)
        self.acquisition_func = EI(
            self.model, X_upper=self.X_upper, X_lower=self.X_lower, compute_incumbent=compute_incumbent, par=0.1
        )
Exemple #22
0
    def setUp(self):

        self.X_lower = np.array([0])
        self.X_upper = np.array([6])
        self.dims = 1

        self.X = np.array([[1], [3.8], [0.9], [5.2], [3.4]])

        self.X[:, 0] = self.X[:, 0].dot(self.X_upper[0] - self.X_lower[0]) + self.X_lower[0]

        self.Y = objective_function(self.X)

        kernel = GPy.kern.Matern52(input_dim=self.dims)
        self.model = GPyModel(kernel, optimize=True,
                              noise_variance=1e-4, num_restarts=10)

        self.model.train(self.X, self.Y)
        self.acquisition_func = EI(self.model, X_upper=self.X_upper,
                                   X_lower=self.X_lower,
                                   par=0.1)
Exemple #23
0
    def setUp(self):

        self.branin = Branin()

        n_points = 5
        self.X = np.random.rand(n_points, self.branin.n_dims)

        self.X[:, 0] = self.X[:, 0].dot(self.branin.X_upper[0] - self.branin.X_lower[0]) + self.branin.X_lower[0]
        self.X[:, 1] = self.X[:, 1].dot(self.branin.X_upper[1] - self.branin.X_lower[1]) + self.branin.X_lower[1]

        self.Y = self.branin.evaluate(self.X)

        kernel = GPy.kern.Matern52(input_dim=self.branin.n_dims)
        self.model = GPyModel(kernel, optimize=True, noise_variance=1e-4, num_restarts=10)

        self.model.train(self.X, self.Y)
        self.acquisition_func = EI(
            self.model,
            X_upper=self.branin.X_upper,
            X_lower=self.branin.X_lower,
            compute_incumbent=compute_incumbent,
            par=0.1,
        )
Exemple #24
0
    def setUp(self):

        self.branin = Branin()

        n_points = 5
        rng = np.random.RandomState(42)
        self.X = init_random_uniform(self.branin.X_lower,
                                     self.branin.X_upper,
                                     n_points,
                                     rng=rng)
        

        self.Y = self.branin.evaluate(self.X)

        kernel = GPy.kern.Matern52(input_dim=self.branin.n_dims)
        self.model = GPyModel(kernel, optimize=True,
                              noise_variance=1e-4,
                              num_restarts=10)

        self.model.train(self.X, self.Y)
        self.acquisition_func = EI(self.model,
                                   X_upper=self.branin.X_upper,
                                   X_lower=self.branin.X_lower,
                                   par=0.1)
Exemple #25
0
class TestMaximizers2D(unittest.TestCase):

    def setUp(self):

        self.branin = Branin()

        n_points = 5
        rng = np.random.RandomState(42)
        self.X = init_random_uniform(self.branin.X_lower,
                                     self.branin.X_upper,
                                     n_points,
                                     rng=rng)
        

        self.Y = self.branin.evaluate(self.X)

        kernel = GPy.kern.Matern52(input_dim=self.branin.n_dims)
        self.model = GPyModel(kernel, optimize=True,
                              noise_variance=1e-4,
                              num_restarts=10)

        self.model.train(self.X, self.Y)
        self.acquisition_func = EI(self.model,
                                   X_upper=self.branin.X_upper,
                                   X_lower=self.branin.X_lower,
                                   par=0.1)

    def test_direct(self):
        maximizer = Direct(self.acquisition_func,
                           self.branin.X_lower,
                           self.branin.X_upper)
        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.branin.n_dims
        assert np.all(x[:, 0] >= self.branin.X_lower[0])
        assert np.all(x[:, 1] >= self.branin.X_lower[1])
        assert np.all(x[:, 0] <= self.branin.X_upper[0])
        assert np.all(x[:, 1] <= self.branin.X_upper[1])
        assert np.all(x < self.branin.X_upper)

    def test_stochastic_local_search(self):
        maximizer = StochasticLocalSearch(self.acquisition_func,
                                          self.branin.X_lower,
                                          self.branin.X_upper)
        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.branin.n_dims
        assert np.all(x[:, 0] >= self.branin.X_lower[0])
        assert np.all(x[:, 1] >= self.branin.X_lower[1])
        assert np.all(x[:, 0] <= self.branin.X_upper[0])
        assert np.all(x[:, 1] <= self.branin.X_upper[1])
        assert np.all(x < self.branin.X_upper)

    def test_cmaes(self):
        maximizer = CMAES(self.acquisition_func,
                          self.branin.X_lower,
                          self.branin.X_upper)

        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.branin.n_dims
        assert np.all(x[:, 0] >= self.branin.X_lower[0])
        assert np.all(x[:, 1] >= self.branin.X_lower[1])
        assert np.all(x[:, 0] <= self.branin.X_upper[0])
        assert np.all(x[:, 1] <= self.branin.X_upper[1])
        assert np.all(x < self.branin.X_upper)
Exemple #26
0
    def test(self):
        X_lower = np.array([0])
        X_upper = np.array([1])
        X = init_random_uniform(X_lower, X_upper, 10)
        Y = np.sin(X)
        kernel = GPy.kern.Matern52(input_dim=1)
        model = GPyModel(kernel)
        model.train(X, Y)

        x_test = init_random_uniform(X_lower, X_upper, 3)

        # Shape matching predict
        m, v = model.predict(x_test, full_cov=True)
        assert len(m.shape) == 2
        assert m.shape[0] == x_test.shape[0]
        assert m.shape[1] == 1
        assert len(v.shape) == 2
        assert v.shape[0] == x_test.shape[0]
        assert v.shape[1] == x_test.shape[0]

        # Check gradients
        dm, dv = model.predictive_gradients(x_test)
        assert len(dm.shape) == 2
        assert dm.shape[0] == x_test.shape[0]
        assert dm.shape[1] == x_test.shape[1]
        assert len(dv.shape) == 2
        assert dv.shape[0] == x_test.shape[0]
        assert dv.shape[1] == 1

        # Shape matching function sampling
        x_ = np.linspace(X_lower, X_upper, 10)
        x_ = x_[:, np.newaxis]
        funcs = model.sample_functions(x_, n_funcs=2)
        assert len(funcs.shape) == 2
        assert funcs.shape[0] == 2
        assert funcs.shape[1] == x_.shape[0]

        # Shape matching predict variance
        x_test2 = np.array([np.random.rand(1)])
        x_test1 = np.random.rand(10)[:, np.newaxis]
        var = model.predict_variance(x_test1, x_test2)
        assert len(var.shape) == 2
        assert var.shape[0] == x_test1.shape[0]
        assert var.shape[1] == 1

        # Check compatibility with all acquisition functions
        acq_func = EI(model,
                     X_upper=X_upper,
                     X_lower=X_lower)
        acq_func.update(model)
        acq_func(x_test)

        acq_func = PI(model,
                     X_upper=X_upper,
                     X_lower=X_lower)
        acq_func.update(model)
        acq_func(x_test)

        acq_func = LCB(model,
                     X_upper=X_upper,
                     X_lower=X_lower)
        acq_func.update(model)
        acq_func(x_test)

        acq_func = InformationGain(model,
                     X_upper=X_upper,
                     X_lower=X_lower)
        acq_func.update(model)
        acq_func(x_test)

        # Check compatibility with all incumbent estimation methods
        rec = BestObservation(model, X_lower, X_upper)
        inc, inc_val = rec.estimate_incumbent(None)
        assert len(inc.shape) == 2
        assert inc.shape[0] == 1
        assert inc.shape[1] == X_upper.shape[0]
        assert len(inc_val.shape) == 2
        assert inc_val.shape[0] == 1
        assert inc_val.shape[1] == 1

        rec = PosteriorMeanOptimization(model, X_lower, X_upper)
        startpoints = init_random_uniform(X_lower, X_upper, 4)
        inc, inc_val = rec.estimate_incumbent(startpoints)
        assert len(inc.shape) == 2
        assert inc.shape[0] == 1
        assert inc.shape[1] == X_upper.shape[0]
        assert len(inc_val.shape) == 2
        assert inc_val.shape[0] == 1
        assert inc_val.shape[1] == 1

        rec = PosteriorMeanAndStdOptimization(model, X_lower, X_upper)
        startpoints = init_random_uniform(X_lower, X_upper, 4)
        inc, inc_val = rec.estimate_incumbent(startpoints)
        assert len(inc.shape) == 2
        assert inc.shape[0] == 1
        assert inc.shape[1] == X_upper.shape[0]
        assert len(inc_val.shape) == 2
        assert inc_val.shape[0] == 1
        assert inc_val.shape[1] == 1
from robo.task.rembo import REMBO
from robo.task.synthetic_functions.branin import Branin
from robo.models.gpy_model import GPyModel
from robo.maximizers.cmaes import CMAES
from robo.solver.bayesian_optimization import BayesianOptimization
from robo.acquisition.ei import EI


class BraninInBillionDims(REMBO):
    def __init__(self):
        self.b = Branin()
        X_lower = np.concatenate((self.b.X_lower, np.zeros([999998])))
        X_upper = np.concatenate((self.b.X_upper, np.ones([999998])))
        super(BraninInBillionDims, self).__init__(X_lower, X_upper, d=2)

    def objective_function(self, x):
        return self.b.objective_function(x[:, :2])

task = BraninInBillionDims()
kernel = GPy.kern.Matern52(input_dim=task.n_dims)
model = GPyModel(kernel, optimize=True, num_restarts=10)
acquisition_func = EI(model, task.X_lower, task.X_upper)
maximizer = CMAES(acquisition_func, task.X_lower, task.X_upper)
bo = BayesianOptimization(acquisition_func=acquisition_func,
                      model=model,
                      maximize_func=maximizer,
                      task=task)

bo.run(500)
class TestMaximizers2D(unittest.TestCase):
    def setUp(self):

        self.branin = Branin()

        n_points = 5
        rng = np.random.RandomState(42)
        self.X = init_random_uniform(self.branin.X_lower,
                                     self.branin.X_upper,
                                     n_points,
                                     rng=rng)

        self.Y = self.branin.evaluate(self.X)

        kernel = GPy.kern.Matern52(input_dim=self.branin.n_dims)
        self.model = GPyModel(kernel,
                              optimize=True,
                              noise_variance=1e-4,
                              num_restarts=10)

        self.model.train(self.X, self.Y)
        self.acquisition_func = EI(self.model,
                                   X_upper=self.branin.X_upper,
                                   X_lower=self.branin.X_lower,
                                   par=0.1)

    def test_direct(self):
        maximizer = Direct(self.acquisition_func, self.branin.X_lower,
                           self.branin.X_upper)
        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.branin.n_dims
        assert np.all(x[:, 0] >= self.branin.X_lower[0])
        assert np.all(x[:, 1] >= self.branin.X_lower[1])
        assert np.all(x[:, 0] <= self.branin.X_upper[0])
        assert np.all(x[:, 1] <= self.branin.X_upper[1])
        assert np.all(x < self.branin.X_upper)

    def test_stochastic_local_search(self):
        maximizer = StochasticLocalSearch(self.acquisition_func,
                                          self.branin.X_lower,
                                          self.branin.X_upper)
        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.branin.n_dims
        assert np.all(x[:, 0] >= self.branin.X_lower[0])
        assert np.all(x[:, 1] >= self.branin.X_lower[1])
        assert np.all(x[:, 0] <= self.branin.X_upper[0])
        assert np.all(x[:, 1] <= self.branin.X_upper[1])
        assert np.all(x < self.branin.X_upper)

    def test_cmaes(self):
        maximizer = CMAES(self.acquisition_func, self.branin.X_lower,
                          self.branin.X_upper)

        x = maximizer.maximize()

        assert x.shape[0] == 1
        assert x.shape[1] == self.branin.n_dims
        assert np.all(x[:, 0] >= self.branin.X_lower[0])
        assert np.all(x[:, 1] >= self.branin.X_lower[1])
        assert np.all(x[:, 0] <= self.branin.X_upper[0])
        assert np.all(x[:, 1] <= self.branin.X_upper[1])
        assert np.all(x < self.branin.X_upper)
Exemple #29
0
@author: Aaron Klein
'''

import setup_logger

import GPy
from robo.models.gpy_model import GPyModel
from robo.acquisition.ei import EI
from robo.maximizers.cmaes import CMAES
from robo.task.synthetic_functions.branin import Branin
from robo.solver.bayesian_optimization import BayesianOptimization

branin = Branin()

kernel = GPy.kern.Matern52(input_dim=branin.n_dims)
model = GPyModel(kernel)

acquisition_func = EI(model,
                      X_upper=branin.X_upper,
                      X_lower=branin.X_lower,
                      par=0.1)

maximizer = CMAES(acquisition_func, branin.X_lower, branin.X_upper)

bo = BayesianOptimization(acquisition_func=acquisition_func,
                          model=model,
                          maximize_func=maximizer,
                          task=branin)

bo.run(10)