def test_optimize_constraint(self):
        for verbose in [False, True]:
            with self.test_session():
                acquisition = gpflowopt.acquisition.ProbabilityOfFeasibility(
                    create_parabola_model(self.domain), threshold=-1)
                optimizer = gpflowopt.BayesianOptimizer(self.domain,
                                                        acquisition,
                                                        verbose=verbose)
                result = optimizer.optimize(lambda X: parabola2d(X)[0],
                                            n_iter=1)
                self.assertFalse(result.success)
                self.assertEqual(
                    result.message,
                    'No evaluations satisfied all the constraints')
                self.assertEqual(result.nfev, 1,
                                 "Only 1 evaluations permitted")
                self.assertTupleEqual(result.x.shape, (17, 2))
                self.assertTupleEqual(result.fun.shape, (17, 0))
                self.assertTupleEqual(result.constraints.shape, (17, 1))

                acquisition = gpflowopt.acquisition.ProbabilityOfFeasibility(
                    create_parabola_model(self.domain), threshold=0.3)
                optimizer = gpflowopt.BayesianOptimizer(self.domain,
                                                        acquisition,
                                                        verbose=verbose)
                result = optimizer.optimize(lambda X: parabola2d(X)[0],
                                            n_iter=1)
                self.assertTrue(result.success)
                self.assertEqual(result.nfev, 1, "Only 1 evaluation permitted")
                self.assertTupleEqual(result.x.shape, (5, 2))
                self.assertTupleEqual(result.fun.shape, (5, 0))
                self.assertTupleEqual(result.constraints.shape, (5, 1))
    def test_callback_recompile_mcmc(self):
        class DummyCallback(object):
            def __init__(self):
                self.no_models = 0

            def __call__(self, models):
                c = np.random.randint(2, 10)
                models[0].kern.variance.prior = gpflow.priors.Gamma(c, 1. / c)
                self.no_models = len(models)

        c = DummyCallback()
        optimizer = gpflowopt.BayesianOptimizer(self.domain,
                                                self.acquisition,
                                                hyper_draws=5,
                                                callback=c)
        opers = optimizer.acquisition.operands
        result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1)
        self.assertEqual(c.no_models, 1)
        self.assertEqual(id(opers[0]), id(optimizer.acquisition.operands[0]))
        for op1, op2 in zip(opers[1:], optimizer.acquisition.operands[1:]):
            self.assertNotEqual(id(op1), id(op2))
        opers = optimizer.acquisition.operands
        result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1)
        self.assertEqual(id(opers[0]), id(optimizer.acquisition.operands[0]))
        for op1, op2 in zip(opers[1:], optimizer.acquisition.operands[1:]):
            self.assertNotEqual(id(op1), id(op2))
Exemple #3
0
 def test_nongpr_model(self):
     design = gpflowopt.design.LatinHyperCube(16, self.domain)
     X, Y = design.generate(), parabola2d(design.generate())[0]
     m = gpflow.vgp.VGP(X, Y, gpflow.kernels.RBF(2, ARD=True), likelihood=gpflow.likelihoods.Gaussian())
     acq = gpflowopt.acquisition.ExpectedImprovement(m)
     optimizer = gpflowopt.BayesianOptimizer(self.domain, acq)
     result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1)
     self.assertTrue(result.success)
Exemple #4
0
    def test_mcmc(self):
        with self.test_session():
            optimizer = gpflowopt.BayesianOptimizer(self.domain, self.acquisition, hyper_draws=10)
            self.assertIsInstance(optimizer.acquisition, gpflowopt.acquisition.MCMCAcquistion)
            self.assertEqual(len(optimizer.acquisition.operands), 10)
            self.assertEqual(optimizer.acquisition.operands[0], self.acquisition)

            result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=20)
            self.assertTrue(result.success)
            self.assertTrue(np.allclose(result.x, 0), msg="Optimizer failed to find optimum")
            self.assertTrue(np.allclose(result.fun, 0), msg="Incorrect function value returned")
Exemple #5
0
    def test_callback(self):
        class DummyCallback(object):
            def __init__(self):
                self.counter = 0

            def __call__(self, models):
                self.counter += 1

        c = DummyCallback()
        optimizer = gpflowopt.BayesianOptimizer(self.domain, self.acquisition, callback=c)
        result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=2)
        self.assertEqual(c.counter, 2)
Exemple #6
0
 def test_optimize_multi_objective(self):
     with self.test_session():
         m1, m2 = create_vlmop2_model()
         acquisition = gpflowopt.acquisition.ExpectedImprovement(m1) + gpflowopt.acquisition.ExpectedImprovement(m2)
         optimizer = gpflowopt.BayesianOptimizer(self.domain, acquisition)
         result = optimizer.optimize(vlmop2, n_iter=2)
         self.assertTrue(result.success)
         self.assertEqual(result.nfev, 2, "Only 2 evaluations permitted")
         self.assertTupleEqual(result.x.shape, (7, 2))
         self.assertTupleEqual(result.fun.shape, (7, 2))
         _, dom = gpflowopt.pareto.non_dominated_sort(result.fun)
         self.assertTrue(np.all(dom==0))
Exemple #7
0
    def test_initial_design(self):
        with self.test_session():
            design = gpflowopt.design.RandomDesign(5, self.domain)
            optimizer = gpflowopt.BayesianOptimizer(self.domain, self.acquisition, initial=design)

            result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=0)
            self.assertTrue(result.success)
            self.assertEqual(result.nfev, 5, "Evaluated only initial")
            self.assertTupleEqual(optimizer.acquisition.data[0].shape, (21, 2))
            self.assertTupleEqual(optimizer.acquisition.data[1].shape, (21, 1))

            result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=0)
            self.assertTrue(result.success)
            self.assertEqual(result.nfev, 0, "Initial was not reset")
            self.assertTupleEqual(optimizer.acquisition.data[0].shape, (21, 2))
            self.assertTupleEqual(optimizer.acquisition.data[1].shape, (21, 1))
def predict_with_BO(X_test2predict, joint, models, normalizer):
    add_strategy_features = np.zeros((X_test2predict.shape[0], len(mappnames)))
    new_X = copy.deepcopy(np.matrix(X_test2predict))

    new_X = np.hstack((new_X, add_strategy_features))
    new_X = normalizer.transform(new_X)

    for i in range(X_test2predict.shape[0]):

        #create current vector

        domain = None
        for feature_name_i in range(len(names_features)):
            if type(domain) == type(None):
                domain = gpflowopt.domain.ContinuousParameter(
                    names_features[feature_name_i], new_X[i, feature_name_i],
                    new_X[i, feature_name_i])
            else:
                domain += gpflowopt.domain.ContinuousParameter(
                    names_features[feature_name_i], new_X[i, feature_name_i],
                    new_X[i, feature_name_i])

        for strategy_i in range(len(mappnames)):
            domain += gpflowopt.domain.ContinuousParameter(
                mappnames[strategy_i + 1], 0, 1)

        optimization_stages = [gpflowopt.optim.MCOptimizer(domain,
                                                           200)]  # accuracy
        for constraint_i in range(1, 5):
            optimization_stages.append(gpflowopt.optim.SciPyOptimizer(domain))
        acquisition_opt = gpflowopt.optim.StagedOptimizer(optimization_stages)

        # Then run the BayesianOptimizer
        optimizer = gpflowopt.BayesianOptimizer(domain,
                                                joint,
                                                optimizer=acquisition_opt,
                                                verbose=True)

        surrogate_functions = []
        for m_i in range(len(models)):
            surrogate_functions.append(partial(townsend, model=models[m_i]))
        result = optimizer.optimize(surrogate_functions, n_iter=100)

        print(result)

    predictions = None
    return predictions
 def test_optimize(self):
     for verbose in [False, True]:
         with self.test_session():
             acquisition = gpflowopt.acquisition.ExpectedImprovement(
                 create_parabola_model(self.domain))
             optimizer = gpflowopt.BayesianOptimizer(self.domain,
                                                     acquisition,
                                                     verbose=verbose)
             result = optimizer.optimize(lambda X: parabola2d(X)[0],
                                         n_iter=20)
             self.assertTrue(result.success)
             self.assertEqual(result.nfev, 20,
                              "Only 20 evaluations permitted")
             self.assertTrue(np.allclose(result.x, 0),
                             msg="Optimizer failed to find optimum")
             self.assertTrue(np.allclose(result.fun, 0),
                             msg="Incorrect function value returned")
Exemple #10
0
    def test_callback_recompile(self):
        class DummyCallback(object):
            def __init__(self):
                self.recompile = False

            def __call__(self, models):
                c = np.random.randint(2, 10)
                models[0].kern.variance.prior = gpflow.priors.Gamma(c, 1./c)
                self.recompile = models[0]._needs_recompile

        c = DummyCallback()
        optimizer = gpflowopt.BayesianOptimizer(self.domain, self.acquisition, callback=c)
        self.acquisition.evaluate(np.zeros((1,2))) # Make sure its run and setup to skip
        result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1)
        self.assertFalse(c.recompile)
        result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1)
        self.assertTrue(c.recompile)
        self.assertFalse(self.acquisition.models[0]._needs_recompile)
Exemple #11
0
 def setUp(self):
     super(TestBayesianOptimizer, self).setUp()
     acquisition = gpflowopt.acquisition.ExpectedImprovement(create_parabola_model(self.domain))
     self.optimizer = gpflowopt.BayesianOptimizer(self.domain, acquisition)
Exemple #12
0
     
     # Models (one model for each objective)
     objective_models = [gpflow.gpr.GPR(X.copy(), Y[:,[i]].copy(), gpflow.kernels.Matern52(2, ARD=True)) for i in range(Y.shape[1])]
     for model in objective_models:
         model.likelihood.variance = 0.01
     
     hvpoi = gpflowopt.acquisition.HVProbabilityOfImprovement(objective_models)
     
     # First setup the optimization strategy for the acquisition function
     # Combining MC step followed by L-BFGS-B
     acquisition_opt = gpflowopt.optim.StagedOptimizer([gpflowopt.optim.MCOptimizer(domain, 1000),
                                                        gpflowopt.optim.SciPyOptimizer(domain)])
     
     # Then run the BayesianOptimizer for 20 iterations
     print('Run BO ...')
     optimizer = gpflowopt.BayesianOptimizer(domain, hvpoi, optimizer=acquisition_opt, verbose=True)
     
     result = optimizer.optimize([af.obj_func], n_iter=n_eval)
     
     run_time = time.time() - t0
     print('Wall time: {:.1f}s'.format(run_time))
     
     # Save optimization history
     x_hist = af.synthesize(hvpoi.data[0])
     y_hist = hvpoi.data[1]
     np.save(x_hist_path, x_hist)
     np.save(y_hist_path, y_hist)
 
     print('Results:')
     print(result)
 
Exemple #13
0
    def __init__(self,
                 model_class,
                 problem_name,
                 initial_data_options,
                 anti_ideal_point,
                 optimization_domain,
                 fixed_hyperparams,
                 instance_options={},
                 num_instances=0,
                 num_replications=1,
                 num_workers=1,
                 plot_options={},
                 output_base_dir=''):
        mp.set_start_method(
            'spawn', force=True
        )  # workaround for weird matplotlib+multiprocessing compatibility issue

        self._model_class = model_class
        self._problem_name = problem_name
        self._num_instances = num_instances
        self._num_replications = num_replications

        # This is used as an ordering for consistently converting the hyperparams dict of
        # hyperparam-name-string -> hyperparam-value that we use into a numpy array of hyperparam
        # values that GPFlowOpt uses.
        self._optimizable_hyperparams_names = list(optimization_domain.keys())

        parser = argparse.ArgumentParser()
        parser.add_argument('--init-data-file')
        args = parser.parse_args()

        # Loads initial data into numpy arrays from file or generates it.
        if args.init_data_file is not None:
            initial_data_options["filename"] = args.init_data_file
        initial_data = self._get_initial_points(initial_data_options)
        self._initial_hyperparams_vals = initial_data[0]
        self._initial_privacy_vals = initial_data[1]
        self._initial_utility_vals = initial_data[2]
        self._max_initial_utility_variance = initial_data[3]

        # Transforms the initial data into a format that GPFlowOpt can use.
        transformed_data = self._create_initial_transformations(initial_data)
        self._transformed_initial_hyperparams_vals = transformed_data[0]
        self._transformed_initial_privacy_vals = transformed_data[1]
        self._transformed_initial_utility_vals = transformed_data[2]

        # Creates separate GPs for modeling privacy and utility.
        self._privacy_gp = self._create_privacy_gp()
        self._utility_gp = self._create_utility_gp()

        # Runs a hard-coded sanity check against data from the loaded file.
        self._print_sanity_check()

        # Sets the information for the reference (aka anti-ideal) point and its transformation.
        self._anti_ideal_point = anti_ideal_point
        transformed_anti_ideal_eps = self._transform_privacy(
            self._anti_ideal_point[0])
        transformed_anti_ideal_err = self._transform_utility(
            self._anti_ideal_point[1])
        self._transformed_anti_ideal_point = [
            transformed_anti_ideal_eps, transformed_anti_ideal_err
        ]

        # Optimization domain dictates the range of each to-be-optimized-over hyperparamter. Fixed hyperparams
        # are the hyperparams that aren't optimized over.
        self._optimization_domain = optimization_domain
        self._gpflowopt_domain = self._construct_gpflowopt_domain(
            optimization_domain)
        self._fixed_hyperparams = fixed_hyperparams
        self._instance_options = instance_options

        # Create acquisition function, set it's anti-ideal point, and create it's optimizer.
        self._acquisition = gpflowopt.acquisition.HVProbabilityOfImprovement(
            [self._privacy_gp, self._utility_gp])
        self._acquisition.reference = np.array(
            [self._transformed_anti_ideal_point])
        self._acquisition_optimizer = gpflowopt.optim.StagedOptimizer([
            gpflowopt.optim.MCOptimizer(self._gpflowopt_domain, 5000),
            gpflowopt.optim.SciPyOptimizer(self._gpflowopt_domain)
        ])

        # Create the Bayesian Optimizer.
        self._bayesian_optimizer = gpflowopt.BayesianOptimizer(
            self._gpflowopt_domain,
            self._acquisition,
            optimizer=self._acquisition_optimizer,
            scaling=False)

        # Multiprocessing worker info
        self._num_workers = num_workers

        # Misc fields
        self._saved_hypervolumes = []
        self._all_results = []
        self._output_base_dir = output_base_dir
        self._run_name = int(time.time() * 1000)
        self._save_results = True
        self._make_results_directory()
        self._show_plot = plot_options.get('show_plot', False)
        self._plot_options = plot_options
Exemple #14
0
def baysian_opt():
    global is_sampling
    n_samples = 10
    design = gpflowopt.design.LatinHyperCube(n_samples, domain)
    X = design.generate()
    X = np.array(X)
    Y = objective_func(X)
    # discard samples where it doesn't produce output
    itemindex = np.where(Y[:n_samples] == None)
    Y = np.delete(Y, (itemindex[0]), axis=0)
    X = np.delete(X, (itemindex[0]), axis=0)
    Y = np.array(Y, dtype=float)
    n_samples = len(X)

    # One model for each objective
    objective_models = [
        gpflow.gpr.GPR(X.copy(), Y[:, [i]].copy(),
                       gpflow.kernels.Matern52(domain.size, ARD=True))
        for i in range(Y.shape[1])
    ]
    for model in objective_models:
        model.likelihood.variance = 0.01

    hvpoi = gpflowopt.acquisition.HVProbabilityOfImprovement(objective_models)
    acquisition_opt = gpflowopt.optim.StagedOptimizer([
        gpflowopt.optim.MCOptimizer(domain, n_samples),
        gpflowopt.optim.SciPyOptimizer(domain)
    ])

    # Then run the BayesianOptimizer for 40 iterations
    optimizer = gpflowopt.BayesianOptimizer(domain,
                                            hvpoi,
                                            optimizer=acquisition_opt,
                                            verbose=True)
    optimizer.optimize(objective_func, n_iter=30)

    pf, dom = gpflowopt.pareto.non_dominated_sort(hvpoi.data[1])

    plt.scatter(hvpoi.data[1][:, 0], hvpoi.data[1][:, 1], c=dom)
    plt.title('Pareto set')
    plt.xlabel('Inference Time')
    plt.ylabel('Power Consumption')
    plt.show()

    plt.plot(np.arange(0, hvpoi.data[0].shape[0]),
             np.minimum.accumulate(hvpoi.data[1][:, 0]),
             'b',
             label='Inference Time')
    plt.ylabel('fmin')
    plt.xlabel('Number of evaluated points')
    plt.legend()
    plt.show()

    plt.plot(np.arange(0, hvpoi.data[0].shape[0]),
             np.minimum.accumulate(hvpoi.data[1][:, 1]),
             'g',
             label='Power Consumption')
    plt.ylabel('fmin')
    plt.xlabel('Number of evaluated points')
    plt.legend()
    plt.show()