Beispiel #1
0
    def test_duplicate_with_ignored_and_pending(self):
        space = [
            {'name': 'var_1', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 1},
            {'name': 'var_2', 'type': 'discrete', 'domain': (0,1,2,3)},
            {'name': 'var_3', 'type': 'categorical', 'domain': (0, 1)}
        ]
        design_space = Design_space(space)

        np.random.seed(666)

        number_points = 5

        zipped_X = initial_design("random",design_space,number_points)
        pending_zipped_X = initial_design("random", design_space, number_points)
        ignored_zipped_X = initial_design("random", design_space, number_points)

        d = DuplicateManager(design_space, zipped_X, pending_zipped_X, ignored_zipped_X)

        duplicate_in_pending_state = np.atleast_2d(pending_zipped_X[0,:].copy())

        assert d.is_zipped_x_duplicate(duplicate_in_pending_state)

        assert d.is_unzipped_x_duplicate(design_space.unzip_inputs(duplicate_in_pending_state))

        duplicate_in_ignored_state = np.atleast_2d(ignored_zipped_X[0,:].copy())

        assert d.is_zipped_x_duplicate(duplicate_in_ignored_state)

        assert d.is_unzipped_x_duplicate(design_space.unzip_inputs(duplicate_in_ignored_state))
Beispiel #2
0
    def test_grid_design(self):
        init_points_count = 3
        samples = initial_design('grid', self.design_space, init_points_count)
        self.assertEqual(len(samples), init_points_count)
        self.assert_samples_against_space(samples)

        init_points_count = 1000
        samples = initial_design('grid', self.design_space, init_points_count)
        self.assertEqual(len(samples), init_points_count)
        self.assert_samples_against_space(samples)
    def test_grid_design(self):
        init_points_count = 3
        samples = initial_design('grid', self.design_space, init_points_count)
        self.assertEqual(len(samples), init_points_count)
        self.assert_samples_against_space(samples)

        init_points_count = 1000
        samples = initial_design('grid', self.design_space, init_points_count)
        self.assertEqual(len(samples), init_points_count)
        self.assert_samples_against_space(samples)
    def test_grid_design_with_multiple_continuous_variables(self):
        self.space.extend([
            {'name': 'var_5', 'type': 'continuous', 'domain':(0,5), 'dimensionality': 2},
            {'name': 'var_6', 'type': 'continuous', 'domain':(-5,5), 'dimensionality': 1}
        ])
        self.design_space = Design_space(self.space)

        init_points_count = 10
        samples = initial_design('grid', self.design_space, init_points_count)
        self.assertEqual(len(samples), 1)

        init_points_count = 100
        samples = initial_design('grid', self.design_space, init_points_count)
        self.assertEqual(len(samples), 3**4)
Beispiel #5
0
    def test_grid_design_with_multiple_continuous_variables(self):
        self.space.extend([
            {'name': 'var_5', 'type': 'continuous', 'domain':(0,5), 'dimensionality': 2},
            {'name': 'var_6', 'type': 'continuous', 'domain':(-5,5), 'dimensionality': 1}
        ])
        self.design_space = Design_space(self.space)

        init_points_count = 10
        samples = initial_design('grid', self.design_space, init_points_count)
        self.assertEqual(len(samples), 1)

        init_points_count = 100
        samples = initial_design('grid', self.design_space, init_points_count)
        self.assertEqual(len(samples), 3**4)
Beispiel #6
0
    def start(self):
        if self.Y is None:
            if "Y_init" in self.module and "X_init" in self.module:
                self.X = self.module["X_init"]
                self.Y = self.module["Y_init"]
            else:
                if "X_init" in self.module:
                    self.X = self.module["X_init"]
                elif self.X is None:
                    self.X = initial_design(self.initial_design_type,
                                            self.space,
                                            self.initial_design_numdata)
                self.Y = self.run_initial(
                    self.X)  # FIXME: wait for all thest to finish

        assert len(self.X) == len(self.Y)
        print("Initial: {} points. Max exp num: {}.".format(
            len(self.X), self.max_exp))

        opt = GPyOpt.methods.BayesianOptimization(
            f=None,
            domain=self.domain,
            constraints=self.constraints,
            cost_withGradients=None,
            model_type='GP',
            X=self.X,
            Y=self.Y,
            acquisition_type='EI',
            acquisition_optimizer_type='lbfgs',
            evaluator_type='local_penalization',
            batch_size=self.num_processes)
        self.init_opt(opt)
        first_batch_X = opt.suggest_next_locations()
        self.run_batch(first_batch_X)
        self.wait_and_run()
Beispiel #7
0
    def test_duplicate(self):
        space = [
            {'name': 'var_1', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 1},
            {'name': 'var_2', 'type': 'discrete', 'domain': (0,1,2,3)},
            {'name': 'var_3', 'type': 'categorical', 'domain': (0, 1)}
        ]
        design_space = Design_space(space)

        np.random.seed(666)

        number_points = 5

        zipped_X = initial_design("random",design_space,number_points)

        d = DuplicateManager(design_space, zipped_X)

        duplicate = np.atleast_2d(zipped_X[0,:].copy())

        assert d.is_zipped_x_duplicate(duplicate)

        assert d.is_unzipped_x_duplicate(design_space.unzip_inputs(duplicate))

        non_duplicate = np.array([[-2.5,  2., 0.]])

        for x in zipped_X:
            assert not np.all(non_duplicate==x)

        assert not d.is_zipped_x_duplicate(non_duplicate)

        assert not d.is_unzipped_x_duplicate(design_space.unzip_inputs(non_duplicate))
Beispiel #8
0
 def suggest_sample(self, number_of_samples=1):
     """
     Returns a suggested next point to evaluate.
     """
     suggested_sample = initial_design('random',
                                       self.decision_context_space, 1)
     return suggested_sample
    def _compute_final_evaluations(self,
                                   pending_zipped_X=None,
                                   ignored_zipped_X=None,
                                   re_use=False):
        """
        Computes the location of the new evaluation (optimizes the acquisition in the standard case).
        :param pending_zipped_X: matrix of input configurations that are in a pending state (i.e., do not have an evaluation yet).
        :param ignored_zipped_X: matrix of input configurations that the user black-lists, i.e., those configurations will not be suggested again.
        :return:
        """
        ## --- Update the context if any

        self.acquisition.optimizer.context_manager = ContextManager(
            self.space,
            self.context,
        )
        print("compute next evaluation")
        if self.sample_from_acq:
            print("suggest next location given THOMPSON SAMPLING")
            candidate_points = initial_design('latin', self.space, 2000)
            aux_var = self.acquisition._compute_acq(candidate_points)
        else:
            if self.constraint is not None:
                aux_var = self.last_step_evaluator.compute_batch(
                    duplicate_manager=None, re_use=re_use, constrained=True)
            else:
                aux_var = self.last_step_evaluator.compute_batch(
                    duplicate_manager=None, re_use=re_use, constrained=False)

        return self.space.zip_inputs(aux_var[0])
    def test_random_design_with_bandit_only(self):
        space = [self.bandit_variable]
        self.design_space = Design_space(space)
        initial_points_count = 3

        samples = initial_design('random', self.design_space, initial_points_count)

        self.assertEqual(len(samples), initial_points_count)
Beispiel #11
0
    def test_random_design_with_bandit_only(self):
        space = [self.bandit_variable]
        self.design_space = Design_space(space)
        initial_points_count = 3

        samples = initial_design('random', self.design_space, initial_points_count)

        self.assertEqual(len(samples), initial_points_count)
Beispiel #12
0
    def verbosity_plot_2D(self):
        ####plots
        print("generating plots")
        design_plot = initial_design('random', self.space, 1000)

        # precision = []
        # for i in range(20):
        #     kg_f = -self.acquisition._compute_acq(design_plot)
        #     precision.append(np.array(kg_f).reshape(-1))

        # print("mean precision", np.mean(precision, axis=0), "std precision",  np.std(precision, axis=0), "max precision", np.max(precision, axis=0), "min precision",np.min(precision, axis=0))
        ac_f = self.expected_improvement(design_plot)

        Y, _ = self.objective.evaluate(design_plot)
        C, _ = self.constraint.evaluate(design_plot)
        pf = self.probability_feasibility_multi_gp(design_plot, self.model_c).reshape(-1, 1)
        mu_f = self.model.predict(design_plot)[0]

        bool_C = np.product(np.concatenate(C, axis=1) < 0, axis=1)
        func_val = Y * bool_C.reshape(-1, 1)

        # kg_f = -self.acquisition._compute_acq(design_plot)
        fig, axs = plt.subplots(2, 2)
        axs[0, 0].set_title('True Function')
        axs[0, 0].scatter(design_plot[:, 0], design_plot[:, 1], c=np.array(func_val).reshape(-1))
        axs[0, 0].scatter(self.X[:, 0], self.X[:, 1], color="red", label="sampled")
        #suggested_sample_value = self.objective.evaluate(self.suggested_sample)
        axs[0, 0].scatter(self.suggested_sample[:,0], self.suggested_sample[:,1], marker="x", color="red",
                          label="suggested")
        axs[0, 0].legend()

        axs[0, 1].set_title('approximation Acqu Function')
        axs[0, 1].scatter(design_plot[:,0],design_plot[:,1], c=np.array(ac_f).reshape(-1))
        axs[0, 1].legend()

        # axs[1, 0].set_title("KG")
        # axs[1, 0].scatter(design_plot[:,0],design_plot[:,1],c= np.array(kg_f).reshape(-1))
        # axs[1, 0].legend()

        axs[1, 1].set_title("mu pf")
        axs[1, 1].scatter(design_plot[:,0],design_plot[:,1],c= np.array(mu_f).reshape(-1) * np.array(pf).reshape(-1))
        axs[1, 1].legend()

        # axs[2, 1].set_title('approximation kg Function')
        # axs[2, 1].scatter(design_plot, np.array(kg_f).reshape(-1))
        # axs[2, 1].legend()
        # import os
        # folder = "IMAGES"
        # subfolder = "new_branin"
        # cwd = os.getcwd()
        # print("cwd", cwd)
        # time_taken = time.time()
        # path = cwd + "/" + folder + "/" + subfolder + '/im_' +str(time_taken) +str(self.X.shape[0]) + '.pdf'
        # if os.path.isdir(cwd + "/" + folder + "/" + subfolder) == False:
        #     os.makedirs(cwd + "/" + folder + "/" + subfolder)
        # plt.savefig(path)
        plt.show()
Beispiel #13
0
    def test_random_design_with_constraints(self):
        constraints = [{'name': 'const_1', 'constraint': 'x[:,0]**2 - 1'}]
        self.design_space = Design_space(self.space, constraints=constraints)
        initial_points_count = 10

        samples = initial_design('random', self.design_space, initial_points_count)

        self.assert_samples_against_space(samples)
        self.assertTrue((samples[:,0]**2 - 1 < 0).all())
    def test_random_design_with_constraints(self):
        constraints = [{'name': 'const_1', 'constraint': 'x[:,0]**2 - 1'}]
        self.design_space = Design_space(self.space, constraints=constraints)
        initial_points_count = 10

        samples = initial_design('random', self.design_space, initial_points_count)

        self.assert_samples_against_space(samples)
        self.assertTrue((samples[:,0]**2 - 1 < 0).all())
Beispiel #15
0
 def next_evaluation_params(self):
     """
     Returns the next evaluation points requested by this optimiser
     """
     if self.evaluated_init:
         x_new = self.optimiser._compute_next_evaluations()
     else:
         x_new = initial_design(self.optimiser.initial_design_type,
                                self.optimiser.space, self._nb_init)
     return x_new
 def create_initial_jobs(self):
     print("\nCreating initial jobs.\n")
     self.safe_query(
         "UPDATE job_runners SET current_task=\"Creating Initial Jobs\" WHERE id={0}"
         .format(self.runner_id))
     space = gpo.Design_space(self.domain)
     X_init = initial_design('sobol', space,
                             self.job_properties['initial_iterations'])
     for X in X_init:
         self.safe_query(self.insert_query_from_X(X, True))
    def true_best_value(self):
        from scipy.optimize import minimize

        X = initial_design('random', self.space, 1000)

        fval = self.func_val(X)

        anchor_point = np.array(X[np.argmin(fval)]).reshape(-1)
        anchor_point = anchor_point.reshape(1, -1)
        print("anchor_point",anchor_point)
        best_design = minimize(self.func_val, anchor_point, method='Nelder-Mead', tol=1e-8).x

        self.true_best_stats["true_best"].append(self.func_val(best_design))
        self.true_best_stats["mean_gp"].append(self.model.posterior_mean(best_design))
        self.true_best_stats["std gp"].append(self.model.posterior_variance(best_design, noise=False))
        self.true_best_stats["pf"].append(self.probability_feasibility_multi_gp(best_design,self.model_c).reshape(-1,1))
        mean = self.model_c.posterior_mean(best_design)
        var = self.model_c.posterior_variance(best_design, noise=False)
        residual_noise = self.model_c.posterior_variance(self.X[1], noise=False)
        self.true_best_stats["mu_pf"].append(mean)
        self.true_best_stats["var_pf"].append(var)
        self.true_best_stats["residual_noise"].append(residual_noise)

        if False:
            fig, axs = plt.subplots(3, 2)
            N = len(np.array(self.true_best_stats["std gp"]).reshape(-1))
            GAP = np.array(np.abs(np.abs(self.true_best_stats["true_best"]).reshape(-1) - np.abs(self.true_best_stats["mean_gp"]).reshape(-1))).reshape(-1)
            print("GAP len", len(GAP))
            print("N",N)
            axs[0, 0].set_title('GAP')
            axs[0, 0].plot(range(N),GAP)
            axs[0, 0].set_yscale("log")

            axs[0, 1].set_title('VAR')
            axs[0, 1].plot(range(N),np.array(self.true_best_stats["std gp"]).reshape(-1))
            axs[0, 1].set_yscale("log")

            axs[1, 0].set_title("PF")
            axs[1, 0].plot(range(N),np.array(self.true_best_stats["pf"]).reshape(-1))

            axs[1, 1].set_title("mu_PF")
            axs[1, 1].plot(range(N),np.abs(np.array(self.true_best_stats["mu_pf"]).reshape(-1)))
            axs[1, 1].set_yscale("log")

            axs[2, 1].set_title("std_PF")
            axs[2, 1].plot(range(N),np.sqrt(np.array(self.true_best_stats["var_pf"]).reshape(-1)))
            axs[2, 1].set_yscale("log")

            axs[2, 0].set_title("Irreducible noise")
            axs[2, 0].plot(range(N), np.sqrt(np.array(self.true_best_stats["residual_noise"]).reshape(-1)))
            axs[2, 0].set_yscale("log")

            plt.show()
Beispiel #18
0
    def _set_initial_values(self):
        if self.X is None:
            self.X = initial_design(self.initial_design_type, self.subspace, self.initial_design_numdata)
            self.Y, _ = self.objective.evaluate(self.map_to_original_space(x=self.X))
        elif self.X is not None and self.Y is None:
            self.Y, _ = self.objective.evaluate(self.map_to_original_space(x=self.X))

        # save initial values
        self.initial_X = deepcopy(self.X)
        if self.maximize:
            self.initial_Y = -deepcopy(self.Y)
        else:
            self.initial_Y = deepcopy(self.Y)
Beispiel #19
0
def example_initial_design():
    func = GPyOpt.objective_examples.experimentsNd.alpine1(input_dim=2)

    mixed_domain = [{
        'name': 'var1_2',
        'type': 'continuous',
        'domain': (-10, 10),
        'dimensionality': 1
    }, {
        'name': 'var5',
        'type': 'continuous',
        'domain': (-1, 5)
    }]

    space = GPyOpt.Design_space(mixed_domain)
    data_init = 500

    ### --- Grid design
    X = initial_design('grid', space, data_init)
    plt.plot(X[:, 0], X[:, 1], 'b.')
    plt.title('Grid design')

    ### --- Random initial design
    X = initial_design('random', space, data_init)
    plt.plot(X[:, 0], X[:, 1], 'b.')
    plt.title('Random design')

    ### --- Latin design
    X = initial_design('latin', space, data_init)
    plt.plot(X[:, 0], X[:, 1], 'b.')
    plt.title('Latin design')

    ### --- Sobol design
    X = initial_design('sobol', space, data_init)
    plt.plot(X[:, 0], X[:, 1], 'b.')
    plt.title('Sobol design')
    pass
Beispiel #20
0
    def step(self):
        """
        Runs Bayesian Optimization every loop
        """
        if self.Y.shape[0] < self.initial_design_numdata:
            self.suggested_sample = initial_design('random', self.space, 1)
        else:
            self.suggested_sample = self._compute_next_evaluations()

        self.X = np.vstack((self.X, self.suggested_sample))

        # --- Update current evaluation time and function evaluations
        self.num_acquisitions += 1

        if self.verbosity:
            print("num acquisition: {}".format(self.num_acquisitions))

        return np.array(self.suggested_sample[0, :])
    def test_nonrandom_designs_with_constrains(self):
        constraints = [{'name': 'const_1', 'constraint': 'x[:,0]**2 - 1'}]
        self.design_space = Design_space(self.space, constraints=constraints)
        initial_points_count = 10

        with self.assertRaises(InvalidConfigError):
            initial_design('grid', self.design_space, initial_points_count)

        with self.assertRaises(InvalidConfigError):
            initial_design('latin', self.design_space, initial_points_count)

        with self.assertRaises(InvalidConfigError):
            initial_design('sobol', self.design_space, initial_points_count)
Beispiel #22
0
    def test_nonrandom_designs_with_constrains(self):
        constraints = [{'name': 'const_1', 'constrain': 'x[:,0]**2 - 1'}]
        self.design_space = Design_space(self.space, constraints=constraints)
        initial_points_count = 10

        with self.assertRaises(InvalidConfigError):
            initial_design('grid', self.design_space, initial_points_count)

        with self.assertRaises(InvalidConfigError):
            initial_design('latin', self.design_space, initial_points_count)

        with self.assertRaises(InvalidConfigError):
            initial_design('sobol', self.design_space, initial_points_count)
Beispiel #23
0
    def init(self, max_iter=0, context=None, verbosity=False):
        """
        Runs Bayesian Optimization 
        :param verbosity: flag to print the optimization results after each iteration (default, False).
        :param context: fixes specified variables to a particular context (values) for the optimization run (default, None).
        """
        # --- Save the options to print and save the results
        self.max_iter = max_iter
        self.verbosity = verbosity
        self.context = context

        # --- Initialize iterations and running time
        self.time_zero = time.time()
        self.cum_time = 0
        self.num_acquisitions = 0
        self.suggested_sample = initial_design('random', self.space, 1)
        self.X = self.suggested_sample
        self.Y_new = self.Y
        return np.array(self.suggested_sample[0, :])
Beispiel #24
0
    def _init_design_chooser(self):
        """
        Initializes the choice of X and Y based on the selected initial design and number of points selected.
        """

        # If objective function was not provided, we require some initial sample data
        if self.f is None and (self.X is None or self.Y is None):
            raise InvalidConfigError(
                "Initial data for both X and Y is required when objective function is not provided")

        # Case 1:
        if self.X is None:
            self.X = initial_design(
                self.initial_design_type,
                self.space,
                self.initial_design_numdata)
            self.Y, _ = self.objective.evaluate(self.X)
        # Case 2
        elif self.X is not None and self.Y is None:
            self.Y, _ = self.objective.evaluate(self.X)
Beispiel #25
0
 def test_random_design(self):
     init_points_count = 10
     samples = initial_design('random', self.design_space,
                              init_points_count)
     self.assertEqual(len(samples), init_points_count)
     self.assert_samples_against_space(samples)
 def test_random_design(self):
     init_points_count = 10
     samples = initial_design('random', self.design_space, init_points_count)
     self.assertEqual(len(samples), init_points_count)
     self.assert_samples_against_space(samples)
    def optimize(self,
                 f=None,
                 df=None,
                 f_df=None,
                 duplicate_manager=None,
                 f_aux=None):
        """
        Optimizes the input function.

        :param f: function to optimize.
        :param df: gradient of the function to optimize.
        :param f_df: returns both the function to optimize and its gradient.

        """
        self.f = f
        self.df = df
        self.f_df = f_df

        ## --- Update the optimizer, in case context has beee passed.
        self.optimizer = choose_optimizer(
            self.optimizer_name, self.context_manager.noncontext_bounds)

        ## --- Selecting the anchor points and removing duplicates
        X_init = initial_design(random_design_type, self.space,
                                self.n_starting)
        fX_init = f(X_init)
        scores = fX_init.flatten()
        anchor_points = X_init[
            np.argsort(scores)[:min(len(scores), self.n_anchor)], :]
        anchor_points_values = np.sort(
            scores)[0:min(len(scores), self.n_anchor)]

        ## -- Select the anchor points (with context)
        x_min_anchor = np.atleast_2d(anchor_points[0])
        fx_min_anchor = anchor_points_values[0]
        x_min = np.atleast_2d(anchor_points[0])
        fx_min = anchor_points_values[0]
        print('anchor points')
        print(anchor_points)
        print(anchor_points_values)
        parallel = True
        if parallel:
            n_cores = 4
            pool = Pool(n_cores)
            i = 0
            while i < self.n_anchor:
                points_to_optimize = anchor_points[i:i + 4, :]
                optimized_points = pool.map(
                    self._parallel_optimization_wrapper, points_to_optimize)
                x_aux, fx_aux = min(optimized_points, key=lambda t: t[1])
                if fx_aux < fx_min + 1e-2:
                    x_min = x_aux
                    fx_min = fx_aux
                    if i > 0:
                        break
                else:
                    fx_aux = f(np.atleast_2d(x_aux))
                    if fx_aux < fx_min + 1e-2:
                        x_min = x_aux
                        fx_min = fx_aux
                        if i > 0:
                            break
                i += 4
        else:
            optimized_points = [
                apply_optimizer(self.optimizer,
                                a,
                                f=f,
                                df=None,
                                f_df=f_df,
                                duplicate_manager=duplicate_manager,
                                context_manager=self.context_manager,
                                space=self.space) for a in anchor_points
            ]
            x_min, fx_min = min(optimized_points, key=lambda t: t[1])

        print('min and min value before bo')
        print(x_min)
        print(fx_min)
        if fx_min_anchor < fx_min + 1e-3:
            try:
                aux_objective = GPyOpt2.core.task.SingleObjective(f)
                aux_model = GPyOpt2.models.GPModel_MCMC()
                aux_acq_opt = GPyOpt.optimization.AcquisitionOptimizer(
                    optimizer='lbfgs', space=self.space)
                aux_acquisition = GPyOpt2.acquisitions.AcquisitionEI_MCMC(
                    aux_model, self.space, optimizer=aux_acq_opt)
                aux_evaluator = GPyOpt2.core.evaluators.Sequential(
                    aux_acquisition)
                bo = GPyOpt2.core.BO(aux_model, self.space, aux_objective,
                                     aux_acquisition, aux_evaluator, X_init,
                                     fX_init)
                bo.run_optimization(max_iter=75)
                x_min, fx_min = bo.get_results()
                x_min = np.atleast_2d(x_min)
                print('min and min value after bo')
                print(x_min)
                print(fx_min)
            except:
                pass
        return x_min, fx_min
    def get_samples(self, n_samples, log_p_function, burn_in_steps=50):
        samples = initial_design('latin', self.space, n_samples)
        samples_log = np.array([[i] for i in range(n_samples)])

        return samples, samples_log
    def verbosity_plot_2D_constrained(self):
        ####plots
        print("generating plots")
        design_plot = initial_design('random', self.space, 1000)

        # precision = []
        # for i in range(20):
        # kg_f = -self.acquisition._compute_acq(design_plot)
        #     precision.append(np.array(kg_f).reshape(-1))

        # print("mean precision", np.mean(precision, axis=0), "std precision",  np.std(precision, axis=0), "max precision", np.max(precision, axis=0), "min precision",np.min(precision, axis=0))

        # self.acquisition._gradient_sanity_check_2D(f=self.acquisition._compute_acq, grad_f = self.acquisition.acquisition_Gradients, x_value = self.suggested_sample, delta=1e-4)

        Y, _ = self.objective.evaluate(design_plot)
        Y = np.concatenate(Y, axis=1)
        C, _ = self.constraint.evaluate(design_plot)
        pf = self.probability_feasibility_multi_gp(design_plot,
                                                   self.model_c).reshape(
                                                       -1, 1)
        mu_f = self.model.posterior_mean(design_plot)
        bool_C = np.product(np.concatenate(C, axis=1) < 0, axis=1)
        bool_C = np.array(bool_C, dtype=bool)

        func_val = Y[bool_C]
        mu_predicted_best = self.model.posterior_mean(self.suggested_sample)
        # mu_predicted_final_best = self.model.posterior_mean(self.suggested_final_evaluation)
        feasable_mu_index = np.array(pf > 0.51, dtype=bool).reshape(-1)

        # HVI = self.acquisition._compute_acq(design_plot[feasable_mu_index ])
        # kg_f = -self.acquisition._compute_acq(design_plot)
        # HVI_optimiser = self.acquisition._compute_acq(self.suggested_sample)
        # print("optimiser best", HVI_optimiser, "discretisation best", np.max(np.array(HVI).reshape(-1)))
        # print("x", design_plot[np.argmax(np.array(HVI).reshape(-1))])
        # x_suggested_discretisation = design_plot[np.argmax(np.array(HVI).reshape(-1))]
        # print("ac with grad info optimised", self.acquisition._compute_acq_withGradients(self.suggested_sample), "ac info optimised",self.acquisition._compute_acq(self.suggested_sample))
        # print("best discretisation ac", self.acquisition._compute_acq(x_suggested_discretisation ))
        # print("best discretisation ac with gradients", self.acquisition._compute_acq_withGradients(x_suggested_discretisation))
        # print("mu predicted best opt", mu_predicted_best[0], mu_predicted_best[1])
        # print("mu predicted best discretisation", mu_f[0][np.argmax(np.array(HVI).reshape(-1))], mu_f[1][np.argmax(np.array(HVI).reshape(-1))])

        fig, axs = plt.subplots(2, 2)
        axs[0, 0].set_title('True PF Function')
        axs[0, 0].scatter(func_val[:, 0], func_val[:, 1])

        axs[0, 1].set_title("HVI")
        axs[0, 1].scatter(mu_f[0], mu_f[1],
                          color="green")  # , c=np.array(HVI).reshape(-1))
        axs[0, 1].scatter(mu_f[0][feasable_mu_index],
                          mu_f[1][feasable_mu_index],
                          color="blue")  #, c=np.array(HVI).reshape(-1))
        axs[0, 1].scatter(mu_predicted_best[0],
                          mu_predicted_best[1],
                          color="red",
                          label="optimiser best")
        # axs[0, 1].scatter(mu_predicted_final_best[0], mu_predicted_final_best[1], color="red", label="optimiser final best")
        #axs[0, 1].scatter(mu_f[0][np.argmax(np.array(HVI).reshape(-1))], mu_f[1][np.argmax(np.array(HVI).reshape(-1))], color="red",label="discretisation best")
        axs[0, 1].legend()

        axs[1, 0].set_title('Opportunity Cost')
        axs[1, 0].plot(range(len(self.Opportunity_Cost["Hypervolume"])),
                       self.Opportunity_Cost["Hypervolume"])
        axs[1, 0].set_yscale("log")
        axs[1, 0].legend()

        Y_reccomended, _ = self.objective.evaluate(self.suggested_sample)
        # Y_reccomended = np.concatenate(Y_reccomended, axis=1)

        axs[1, 1].set_title('True PF Function with sampled points')
        axs[1, 1].scatter(func_val[:, 0], func_val[:, 1])
        axs[1, 1].scatter(Y_reccomended[0],
                          Y_reccomended[1],
                          color="red",
                          label="sampled")
        axs[1, 1].scatter(self.Y[0], self.Y[1], color="green")

        # import os
        # folder = "IMAGES"
        # subfolder = "new_branin"
        # cwd = os.getcwd()
        # print("cwd", cwd)
        # time_taken = time.time()
        # path = cwd + "/" + folder + "/" + subfolder + '/im_' +str(time_taken) +str(self.X.shape[0]) + '.pdf'
        # if os.path.isdir(cwd + "/" + folder + "/" + subfolder) == False:
        #     os.makedirs(cwd + "/" + folder + "/" + subfolder)
        # plt.savefig(path)
        plt.show()
    def verbosity_plot_2D_unconstrained(self):
        ####plots
        print("generating plots")
        design_plot = initial_design('random', self.space, 10000)

        # precision = []
        # for i in range(20):
        # kg_f = -self.acquisition._compute_acq(design_plot)
        #     precision.append(np.array(kg_f).reshape(-1))

        # print("mean precision", np.mean(precision, axis=0), "std precision",  np.std(precision, axis=0), "max precision", np.max(precision, axis=0), "min precision",np.min(precision, axis=0))

        func_val, _ = self.objective.evaluate(design_plot)
        func_val = np.concatenate(func_val, axis=1)

        mu_f = self.model.posterior_mean(design_plot)
        var_f = self.model.posterior_variance(design_plot, noise=False)
        mu_predicted_best = self.model.posterior_mean(self.suggested_sample)

        HVI = self.acquisition._compute_acq(design_plot)
        fig, axs = plt.subplots(2, 2)
        axs[0, 0].set_title('True PF Function')
        axs[0, 0].scatter(func_val[:, 0], func_val[:, 1])

        print("self.suggested_sample", self.suggested_sample)
        axs[0, 1].set_title("GP(X)")
        axs[0,
            1].scatter(
                design_plot[:, 0],
                design_plot[:, 1],
                c=np.array(mu_f).reshape(-1))  #,c= np.array(HVI).reshape(-1))
        axs[0, 1].scatter(self.suggested_sample[:, 0],
                          self.suggested_sample[:, 1],
                          color="magenta")
        axs[0, 1].legend()

        print("self.suggested_sample", self.suggested_sample)
        axs[1, 0].set_title("Var[GP(X)]")
        axs[1, 0].scatter(
            design_plot[:, 0],
            design_plot[:, 1],
            c=np.array(var_f).reshape(-1))  #,c= np.array(HVI).reshape(-1))
        axs[1, 0].scatter(self.suggested_sample[:, 0],
                          self.suggested_sample[:, 1],
                          color="magenta")
        axs[1, 0].legend()

        axs[1, 1].set_title("acq(X)")
        axs[1,
            1].scatter(
                design_plot[:, 0],
                design_plot[:, 1],
                c=np.array(HVI).reshape(-1))  #,c= np.array(HVI).reshape(-1))
        axs[1, 1].scatter(self.suggested_sample[:, 0],
                          self.suggested_sample[:, 1],
                          color="magenta")
        axs[1, 1].legend()

        # axs[1, 1].set_title("mu pf")
        # axs[1, 1].scatter(design_plot[:,0],design_plot[:,1],c= np.array(mu_f).reshape(-1) * np.array(pf).reshape(-1))
        # axs[1, 1].legend()
        #
        # axs[1, 0].set_title('Opportunity Cost')
        # axs[1, 0].plot(range(len(self.Opportunity_Cost)), self.Opportunity_Cost)
        # axs[1, 0].set_yscale("log")
        # axs[1, 0].legend()

        # axs[1, 1].set_title('True PF Function with sampled points')
        # axs[1, 1].scatter(func_val[:, 0], func_val[:, 1])
        # axs[1, 1].scatter(self.Y[0],self.Y[1], color="red", label="sampled")

        # import os
        # folder = "IMAGES"
        # subfolder = "new_branin"
        # cwd = os.getcwd()
        # print("cwd", cwd)
        # time_taken = time.time()
        # path = cwd + "/" + folder + "/" + subfolder + '/im_' +str(time_taken) +str(self.X.shape[0]) + '.pdf'
        # if os.path.isdir(cwd + "/" + folder + "/" + subfolder) == False:
        #     os.makedirs(cwd + "/" + folder + "/" + subfolder)
        # plt.savefig(path)
        plt.show()
    def get(self,
            num_anchor=5,
            duplicate_manager=None,
            unique=False,
            context_manager=None):

        ## --- We use the context handler to remove duplicates only over the non-context variables
        if context_manager and not self.space._has_bandit():
            # print("In AnchorPointsGenerator: ")
            # print("         space.config_space_expanded        : ", self.space.config_space_expanded)
            # print("         context_manager.nocontext_index_obj: ", context_manager.noncontext_bounds)
            space_configuration_without_context = [
                self.space.config_space_expanded[idx]
                for idx in context_manager.nocontext_index_obj
            ]
            space = Design_space(space_configuration_without_context,
                                 context_manager.space.constraints)
            add_context = lambda x: context_manager._expand_vector(x)
        else:
            space = self.space
            add_context = lambda x: x

        ## --- Generate initial design
        X = initial_design(self.design_type, space, self.num_samples)

        if unique:
            sorted_design = sorted(list({tuple(x) for x in X}))
            X = space.unzip_inputs(np.vstack(sorted_design))
        else:
            X = space.unzip_inputs(X)

        ## --- Add context variables
        X = add_context(X)

        if duplicate_manager:
            is_duplicate = duplicate_manager.is_unzipped_x_duplicate
        else:
            # In absence of duplicate manager, we never detect duplicates
            is_duplicate = lambda _: False

        non_duplicate_anchor_point_indexes = [
            index for index, x in enumerate(X) if not is_duplicate(x)
        ]

        if not non_duplicate_anchor_point_indexes:
            raise FullyExploredOptimizationDomainError(
                "No anchor points could be generated ({} used samples, {} requested anchor points)."
                .format(self.num_samples, num_anchor))

        if len(non_duplicate_anchor_point_indexes) < num_anchor:
            # Since logging has not been setup yet, I do not know how to express warnings...I am using standard print for now.
            print("Warning: expecting {} anchor points, only {} available.".
                  format(num_anchor, len(non_duplicate_anchor_point_indexes)))

        X = X[non_duplicate_anchor_point_indexes, :]

        scores = self.get_anchor_point_scores(X)

        anchor_points = X[np.argsort(scores)[:min(len(scores), num_anchor)], :]

        return anchor_points
Beispiel #32
0
    def _marginal_acq_with_gradient(self, X, utility_params_samples):
        """
        """
        marginal_acqX = np.zeros((X.shape[0], len(utility_params_samples)))
        marginal_dacq_dX = np.zeros(
            (X.shape[0], X.shape[1], len(utility_params_samples)))
        n_h = 1  # Number of GP hyperparameters samples.
        # gp_hyperparameters_samples = self.model.get_hyperparameters_samples(n_h)

        n_z = len(self.Z_samples_obj)  #2  # Number of samples of Z.
        Z_samples = self.Z_samples_obj  #np.random.normal(size=n_z)
        Z_samples_c = self.Z_samples_const

        for h in range(n_h):
            # self.model.set_hyperparameters(h)
            varX = self.model.posterior_variance(X, noise=True)
            dvar_dX = self.model.posterior_variance_gradient(X)

            varX_c = self.model_c.posterior_variance(X, noise=True)
            dvar_c_dX = self.model_c.posterior_variance_gradient(X)
            for i in range(0, len(X)):
                x = np.atleast_2d(X[i])
                self.model.partial_precomputation_for_covariance(x)
                self.model.partial_precomputation_for_covariance_gradient(x)

                self.model_c.partial_precomputation_for_covariance(x)
                self.model_c.partial_precomputation_for_covariance_gradient(x)

                for l in range(0, len(utility_params_samples)):
                    # Precompute aux1 and aux2 for computational efficiency.
                    aux = np.multiply(np.square(utility_params_samples[l]),
                                      np.reciprocal(varX[:, i]))
                    aux2 = np.multiply(np.square(utility_params_samples[l]),
                                       np.square(np.reciprocal(varX[:, i])))

                    aux_c = np.reciprocal(varX_c[:, i])
                    aux2_c = np.square(np.reciprocal(varX_c[:, i]))
                    for z in range(len(Z_samples)):
                        # inner function of maKG acquisition function.
                        def inner_func(X_inner):
                            X_inner = np.atleast_2d(X_inner)
                            muX_inner = self.model.posterior_mean(X_inner)
                            cov = self.model.posterior_covariance_between_points_partially_precomputed(
                                X_inner, x)[:, :, 0]
                            a = np.matmul(utility_params_samples[l], muX_inner)
                            # a = support[t]*muX_inner
                            b = np.sqrt(np.matmul(aux, np.square(cov)))
                            func_val = np.reshape(a + b * Z_samples[z],
                                                  (len(X_inner), 1))

                            grad_c = gradients(
                                x_new=x,
                                model=self.model_c,
                                Z=Z_samples_c[z],
                                aux=aux_c,
                                X_inner=X_inner
                            )  # , test_samples = initial_design('random', self.space, 1000))
                            Fz = grad_c.compute_probability_feasibility_multi_gp(
                                x=X_inner, l=0)

                            func_val_constrained = func_val * Fz
                            return -func_val_constrained

                        # inner function of maKG acquisition function with its gradient.
                        def inner_func_with_gradient(X_inner):
                            X_inner = np.atleast_2d(X_inner)
                            muX_inner = self.model.posterior_mean(X_inner)
                            dmu_dX_inner = self.model.posterior_mean_gradient(
                                X_inner)
                            cov = self.model.posterior_covariance_between_points_partially_precomputed(
                                X_inner, x)[:, :, 0]
                            dcov_dX_inner = self.model.posterior_covariance_gradient_partially_precomputed(
                                X_inner, x)
                            a = np.matmul(utility_params_samples[l], muX_inner)
                            # a = support[t]*muX_inner
                            da_dX_inner = np.tensordot(
                                utility_params_samples[l],
                                dmu_dX_inner,
                                axes=1)
                            b = np.sqrt(np.matmul(aux, np.square(cov)))
                            for k in range(X_inner.shape[1]):
                                dcov_dX_inner[:, :, k] = np.multiply(
                                    cov, dcov_dX_inner[:, :, k])
                            db_dX_inner = np.tensordot(aux,
                                                       dcov_dX_inner,
                                                       axes=1)
                            db_dX_inner = np.multiply(np.reciprocal(b),
                                                      db_dX_inner.T).T
                            func_val = np.reshape(a + b * Z_samples[z],
                                                  (len(X_inner), 1))
                            func_gradient = np.reshape(
                                da_dX_inner + db_dX_inner * Z_samples[z],
                                X_inner.shape)

                            grad_c = gradients(x_new=x,
                                               model=self.model_c,
                                               Z=Z_samples_c[z],
                                               aux=aux_c,
                                               X_inner=X_inner,
                                               precompute_grad=True)
                            Fz, grad_Fz = grad_c.compute_probability_feasibility_multi_gp(
                                x=X_inner, l=0, gradient_flag=True)

                            func_val_constrained = func_val * Fz
                            func_gradient_constrained = np.array(
                                func_val).reshape(-1) * grad_Fz.reshape(
                                    -1) + Fz.reshape(
                                        -1) * func_gradient.reshape(-1)

                            return -func_val_constrained, -func_gradient_constrained

                        x_opt, opt_val = self.optimizer.optimize_inner_func(
                            f=inner_func, f_df=inner_func_with_gradient)
                        marginal_acqX[i, l] -= opt_val
                        x_opt = np.atleast_2d(x_opt)

                        #mu x opt calculations
                        muX_inner = self.model.posterior_mean(x_opt)
                        cov = self.model.posterior_covariance_between_points_partially_precomputed(
                            x_opt, x)[:, :, 0]
                        a = np.matmul(utility_params_samples[l], muX_inner)
                        b = np.sqrt(np.matmul(aux, np.square(cov)))
                        mu_xopt = np.reshape(a + b * Z_samples[z],
                                             (len(x_opt), 1))

                        #grad x opt calculations
                        cov_opt = self.model.posterior_covariance_between_points_partially_precomputed(
                            x_opt, x)[:, 0, 0]
                        dcov_opt_dx = self.model.posterior_covariance_gradient(
                            x, x_opt)[:, 0, :]
                        b = np.sqrt(np.dot(aux, np.square(cov_opt)))
                        grad_mu_xopt = 0.5 * Z_samples[z] * np.reciprocal(
                            b) * np.matmul(
                                aux2, (2 * np.multiply(varX[:, i] * cov_opt,
                                                       dcov_opt_dx.T) -
                                       np.multiply(np.square(cov_opt),
                                                   dvar_dX[:, i, :].T)).T)

                        grad_c = gradients(x_new=x,
                                           model=self.model_c,
                                           Z=Z_samples_c[z],
                                           xopt=x_opt,
                                           aux=aux_c,
                                           aux2=aux2_c,
                                           varX=varX_c[:, i],
                                           dvar_dX=dvar_c_dX[:, i, :],
                                           test_samples=initial_design(
                                               'random', self.space, 1000))

                        Fz_xopt, grad_Fz_xopt = grad_c.compute_probability_feasibility_multi_gp_xopt(
                            xopt=x_opt, gradient_flag=True)

                        grad_f_val_xopt = np.array(mu_xopt).reshape(
                            -1) * np.array(grad_Fz_xopt).reshape(
                                -1) + np.array(Fz_xopt).reshape(-1) * np.array(
                                    grad_mu_xopt).reshape(-1)

                        marginal_dacq_dX[i, :,
                                         l] = grad_f_val_xopt  # grad_f_val

        marginal_acqX = marginal_acqX / (n_h * n_z)
        marginal_dacq_dX = marginal_dacq_dX / (n_h * n_z)
        return marginal_acqX, marginal_dacq_dX
Beispiel #33
0
    def run_optimization(self,
                         max_iter=1,
                         max_time=np.inf,
                         eps=1e-8,
                         context=None,
                         verbosity=False,
                         evaluations_file=None):
        """
        Runs Bayesian Optimization for a number 'max_iter' of iterations (after the initial exploration data)

        :param max_iter: exploration horizon, or number of acquisitions. If nothing is provided optimizes the current acquisition.
        :param max_time: maximum exploration horizon in seconds.
        :param eps: minimum distance between two consecutive x's to keep running the model.
        :param context: fixes specified variables to a particular context (values) for the optimization run (default, None).
        :param verbosity: flag to print the optimization results after each iteration (default, False).
        :param evaluations_file: filename of the file where the evaluated points and corresponding evaluations are saved (default, None).
        """
        self.verbosity = verbosity
        if self.objective is None:
            raise InvalidConfigError(
                "Cannot run the optimization loop without the objective function"
            )

        # --- Save the options to print and save the results
        self.verbosity = verbosity
        self.evaluations_file = evaluations_file
        self.context = context

        # --- Setting up stop conditions
        self.eps = eps
        if (max_iter is None) and (max_time is None):
            self.max_iter = 0
            self.max_time = np.inf
        elif (max_iter is None) and (max_time is not None):
            self.max_iter = np.inf
            self.max_time = max_time
        elif (max_iter is not None) and (max_time is None):
            self.max_iter = max_iter
            self.max_time = np.inf
        else:
            self.max_iter = max_iter
            self.max_time = max_time

        # print("------------------------TRAINING HYPERS----------------------")
        # self._get_hyperparameters()

        # --- Initial function evaluation and model fitting
        if self.X is not None and self.Y is None:
            self.Y, cost_values = self.objective.evaluate(self.X)

            if self.constraint is not None:
                self.C, cost_values = self.constraint.evaluate(self.X)
            if self.cost.cost_type == 'evaluation_time':
                self.cost.update_cost_model(self.X, cost_values)

        #self.model.updateModel(self.X,self.Y)

        # --- Initialize iterations and running time
        self.time_zero = time.time()
        self.cum_time = 0
        self.num_acquisitions = 0
        self.suggested_sample = self.X
        self.Y_new = self.Y
        self.Opportunity_Cost = []
        value_so_far = []

        # --- Initialize time cost of the evaluations
        print("-----------------------MAIN LOOP STARTS----------------------")
        Opportunity_Cost = []
        self.true_best_stats = {
            "true_best": [],
            "mean_gp": [],
            "std gp": [],
            "pf": [],
            "mu_pf": [],
            "var_pf": [],
            "residual_noise": []
        }
        while (self.max_iter > self.num_acquisitions):

            self._update_model()

            if self.constraint is None:
                self.Opportunity_Cost_caller_unconstrained()
            else:
                self.Opportunity_Cost_caller_constrained()

            print("maKG optimizer")
            start = time.time()
            self.suggested_sample = self._compute_next_evaluations()
            finish = time.time()
            print("time optimisation point X", finish - start)

            if verbosity:
                ####plots
                design_plot = initial_design('random', self.space, 1000)
                ac_f = self.expected_improvement(design_plot)
                Y, _ = self.objective.evaluate(design_plot)
                C, _ = self.constraint.evaluate(design_plot)
                pf = self.probability_feasibility_multi_gp(
                    design_plot, self.model_c).reshape(-1, 1)
                mu_f = self.model.predict(design_plot)[0]

                bool_C = np.product(np.concatenate(C, axis=1) < 0, axis=1)
                func_val = Y * bool_C.reshape(-1, 1)

                print("self.suggested_sample", self.suggested_sample)
                fig, axs = plt.subplots(2, 2)
                axs[0, 0].set_title('True Function')
                axs[0, 0].scatter(design_plot[:, 0],
                                  design_plot[:, 1],
                                  c=np.array(func_val).reshape(-1))
                axs[0, 0].scatter(self.X[:, 0],
                                  self.X[:, 1],
                                  color="red",
                                  label="sampled")
                axs[0, 0].scatter(self.suggested_sample[:, 0],
                                  self.suggested_sample[:, 1],
                                  marker="x",
                                  color="red",
                                  label="suggested")

                axs[0, 1].set_title('approximation Acqu Function')
                axs[0, 1].scatter(design_plot[:, 0],
                                  design_plot[:, 1],
                                  c=np.array(ac_f).reshape(-1))

                axs[1, 0].set_title("convergence")
                axs[1, 0].plot(range(len(self.Opportunity_Cost)),
                               np.array(self.Opportunity_Cost).reshape(-1))
                axs[1, 0].set_yscale("log")

                axs[1, 1].set_title("mu")
                axs[1, 1].scatter(design_plot[:, 0],
                                  design_plot[:, 1],
                                  c=np.array(mu_f).reshape(-1) *
                                  np.array(pf).reshape(-1))

                plt.show()

            self.X = np.vstack((self.X, self.suggested_sample))
            # --- Evaluate *f* in X, augment Y and update cost function (if needed)
            self.evaluate_objective()

            print("X", self.X, "Y", self.Y, "C", self.C, "OC",
                  self.Opportunity_Cost)
            # --- Update current evaluation time and function evaluations
            self.cum_time = time.time() - self.time_zero
            self.num_acquisitions += 1

        return self.X, self.Y, self.C, self.Opportunity_Cost
Beispiel #34
0
 def _dropout_random(self, embedded_idx):
     return initial_design(
         'random', get_subspace(space=self.space,
                                subspace_idx=embedded_idx), 1)[0]
        'name': 'tau_0',
        'type': 'continuous',
        'domain': scaled_parameter_bounds['tau_0']
    },
    {
        'name': 'nu',
        'type': 'continuous',
        'domain': scaled_parameter_bounds['nu']
    },
]
domain = [
    item for item in full_domain
    if item['name'] not in fixed_parameters.keys()
]
space = gpo.Design_space(domain)
X_init = initial_design('sobol', space, n_initial_points)


def value_from_X(X, param_name):
    names = [item['name'] for item in domain]
    if param_name in names:
        return X[names.index(param_name)]
    else:
        return fixed_parameters[param_name]


queries = []
for X in X_init:
    query = "INSERT INTO jobs (job_name, is_initial, fish_group, machine_assigned, start_time, completed_time, objective_function,\
               delta_0, alpha_tau, alpha_d, beta, A_0, t_s_0, discriminability, flicker_frequency, tau_0, nu) VALUES \
               (\"{job_name}\", TRUE, \"{fish_group}\", NULL, NULL, NULL, NULL, \
Beispiel #36
0
    def run_optimization(self, max_iter = 1, max_time = np.inf,  eps = 1e-8, context = None, verbosity=False, evaluations_file = None):
        """
        Runs Bayesian Optimization for a number 'max_iter' of iterations (after the initial exploration data)

        :param max_iter: exploration horizon, or number of acquisitions. If nothing is provided optimizes the current acquisition.
        :param max_time: maximum exploration horizon in seconds.
        :param eps: minimum distance between two consecutive x's to keep running the model.
        :param context: fixes specified variables to a particular context (values) for the optimization run (default, None).
        :param verbosity: flag to print the optimization results after each iteration (default, False).
        :param evaluations_file: filename of the file where the evaluated points and corresponding evaluations are saved (default, None).
        """

        if self.objective is None:
            raise InvalidConfigError("Cannot run the optimization loop without the objective function")

        # --- Save the options to print and save the results
        self.verbosity = verbosity
        self.evaluations_file = evaluations_file
        self.context = context
    
                
        # --- Setting up stop conditions
        self.eps = eps
        if  (max_iter is None) and (max_time is None):
            self.max_iter = 0
            self.max_time = np.inf
        elif (max_iter is None) and (max_time is not None):
            self.max_iter = np.inf
            self.max_time = max_time
        elif (max_iter is not None) and (max_time is None):
            self.max_iter = max_iter
            self.max_time = np.inf
        else:
            self.max_iter = max_iter
            self.max_time = max_time

        # --- Initial function evaluation and model fitting
        if self.X is not None and self.Y is None:
            self.Y, cost_values = self.objective.evaluate(self.X)
            self.C, cost_values = self.constraint.evaluate(self.X)
            if self.cost.cost_type == 'evaluation_time':
                self.cost.update_cost_model(self.X, cost_values)
    
        #self.model.updateModel(self.X,self.Y)

        # --- Initialize iterations and running time
        self.time_zero = time.time()
        self.cum_time  = 0
        self.num_acquisitions = 0
        self.suggested_sample = self.X
        self.Y_new = self.Y
        self.Opportunity_Cost = []
        value_so_far = []

        # --- Initialize time cost of the evaluations
        print("MAIN LOOP STARTS")
        Opportunity_Cost = []
        while (self.max_iter > self.num_acquisitions ):


            # self._update_model()


            print("maKG optimizer")
            start = time.time()
            self.suggested_sample = initial_design('random', self.space, 1)
            finish = time.time()
            print("time optimisation point X", finish - start)

            if verbosity:
                self.verbosity_plot_2D()
            print("self.Opportunity_Cost",self.Opportunity_Cost)
            self.X = np.vstack((self.X,self.suggested_sample))
            # --- Evaluate *f* in X, augment Y and update cost function (if needed)
            self.evaluate_objective()

            # --- Update current evaluation time and function evaluations
            self.cum_time = time.time() - self.time_zero
            self.num_acquisitions += 1
            print("optimize_final_evaluation")
            self.optimize_final_evaluation()
            print("self.X, self.Y, self.C , self.Opportunity_Cost",self.X, self.Y, self.C , self.Opportunity_Cost)

        return self.X, self.Y, self.C , self.Opportunity_Cost
Beispiel #37
0
    def suggest_sample(self, number_of_samples=1):
        """
        Returns a suggested next point to evaluate.
        """
        utility_parameter_sample = self.utility.sample_parameter(
            number_of_samples=1)
        model_sample = self.model.get_copy_of_model_sample()
        X_evaluated = np.copy(model_sample.X)
        Y_evaluated = np.copy(model_sample.Y)
        self.X_aux = np.copy(X_evaluated)
        self.Y_aux = np.copy(Y_evaluated)

        def objective_func_sample(d):
            X_new = np.vstack(
                [np.append(d, theta) for theta in self.scenario_support])
            Y_new = model_sample.posterior_samples_f(X_new,
                                                     size=1,
                                                     full_cov=True)
            self.X_aux = np.vstack((self.X_aux, X_new))
            self.Y_aux = np.vstack((self.Y_aux, Y_new))
            model_sample.set_XY(self.X_aux, self.Y_aux)
            val = 0.
            for w in range(self.scenario_support_cardinality):
                val += self.scenario_prob_dist[w] * self.utility.eval_func(
                    Y_new[w, 0], utility_parameter_sample)
            return -val

        d0 = initial_design('random', self.decision_space, 1)
        try:
            #argmax =  self.decision_space_optimizer.optimize(d0, objective_func_sample, maxfevals=200)[0]
            argmax = apply_optimizer(
                self.decision_space_optimizer,
                d0,
                f=objective_func_sample,
                context_manager=self.decision_space_context_manager,
                space=self.decision_space,
                maxfevals=200)[0]
        except:
            argmax = d0

        aux_grid = np.vstack(
            [np.append(argmax, theta) for theta in self.scenario_support])
        self.model.set_hyperparameters(0)
        var = self.model.posterior_variance(aux_grid)
        for h in range(1, self.number_of_gp_hyps_samples):
            self.model.set_hyperparameters(h)
            var += self.model.posterior_variance(aux_grid)
        var = var[:, 0]
        index = np.argmax(var)
        suggested_sample = np.append(argmax, self.scenario_support[index])

        use_suggested_sample = True
        i = 0
        min_distance = np.infty
        while use_suggested_sample and i < X_evaluated.shape[0]:
            distance_to_evaluated_point = euclidean(X_evaluated[i, :],
                                                    suggested_sample)
            if distance_to_evaluated_point < min_distance:
                min_distance = distance_to_evaluated_point
            if distance_to_evaluated_point < 1e-1 / np.sqrt(
                    X_evaluated.shape[1]):
                use_suggested_sample = False
            i += 1

        print('Minimum distance to previously evaluated point is: {}'.format(
            min_distance))

        if not use_suggested_sample:
            print(
                'Suggested point is to close to previously evaluated point; swithching to max expected value sampling policy.'
            )

            def expectation_objective_func(d):
                d = np.atleast_2d(d)
                func_val = 0.
                cross_product_grid = np.vstack(
                    [np.append(d, theta) for theta in self.scenario_support])
                for h in range(self.number_of_gp_hyps_samples):
                    self.model.set_hyperparameters(h)
                    mean, var = self.model.predict_noiseless(
                        cross_product_grid)
                    for w in range(self.scenario_support_cardinality):
                        expectation_utility = self.expectation_utility.eval_func(
                            mean[w, 0], var[w, 0], utility_parameter_sample)
                        func_val += self.scenario_prob_dist[
                            w] * expectation_utility
                func_val /= self.number_of_gp_hyps_samples
                func_val = func_val[:, 0]
                return -func_val

            #argmax =  self.decision_space_optimizer.optimize(d0, expectation_objective_func)[0]
            argmax = apply_optimizer(
                self.decision_space_optimizer,
                d0,
                f=expectation_objective_func,
                context_manager=self.decision_space_context_manager,
                space=self.decision_space)[0]

            aux_grid = np.vstack(
                [np.append(argmax, theta) for theta in self.scenario_support])
            self.model.set_hyperparameters(0)
            var = self.model.posterior_variance(aux_grid)
            for h in range(1, self.number_of_gp_hyps_samples):
                self.model.set_hyperparameters(h)
                var += self.model.posterior_variance(aux_grid)
            var = var[:, 0]
            index = np.argmax(var)
            suggested_sample = np.append(argmax, self.scenario_support[index])
            use_suggested_sample = True
            i = 0
            min_distance = np.infty
            while use_suggested_sample and i < X_evaluated.shape[0]:
                distance_to_evaluated_point = euclidean(
                    X_evaluated[i, :], suggested_sample)
                if distance_to_evaluated_point < min_distance:
                    min_distance = distance_to_evaluated_point
                if distance_to_evaluated_point < 1e-2 / np.sqrt(
                        X_evaluated.shape[1]):
                    use_suggested_sample = False
                i += 1

            print(
                'Minimum distance to previously evaluated point is: {}'.format(
                    min_distance))

        if not use_suggested_sample:
            print(
                'Suggested point is to close to previously evaluated point; swithching to max variance sampling policy.'
            )

            def posterior_variance(x):
                self.model.set_hyperparameters(0)
                var = self.model.posterior_variance(x)
                for h in range(1, self.number_of_gp_hyps_samples):
                    self.model.set_hyperparameters(h)
                    var += self.model.posterior_variance(x)
                var = var[:, 0]
                return -var

            x0 = initial_design('random', self.decision_context_space, 1)
            #suggested_sample =  self.decision_context_space_optimizer.optimize(x0, posterior_variance)[0]
            suggested_sample = apply_optimizer(
                self.decision_context_space_optimizer,
                x0,
                f=posterior_variance,
                context_manager=self.decision_context_space_context_manager,
                space=self.decision_context_space)[0]

        suggested_sample = np.atleast_2d(suggested_sample)
        return suggested_sample