def _compute_final_evaluations(self,
                                   pending_zipped_X=None,
                                   ignored_zipped_X=None,
                                   re_use=False):
        """
        Computes the location of the new evaluation (optimizes the acquisition in the standard case).
        :param pending_zipped_X: matrix of input configurations that are in a pending state (i.e., do not have an evaluation yet).
        :param ignored_zipped_X: matrix of input configurations that the user black-lists, i.e., those configurations will not be suggested again.
        :return:
        """
        ## --- Update the context if any

        self.acquisition.optimizer.context_manager = ContextManager(
            self.space,
            self.context,
        )
        print("compute next evaluation")
        if self.sample_from_acq:
            print("suggest next location given THOMPSON SAMPLING")
            candidate_points = initial_design('latin', self.space, 2000)
            aux_var = self.acquisition._compute_acq(candidate_points)
        else:
            if self.constraint is not None:
                aux_var = self.last_step_evaluator.compute_batch(
                    duplicate_manager=None, re_use=re_use, constrained=True)
            else:
                aux_var = self.last_step_evaluator.compute_batch(
                    duplicate_manager=None, re_use=re_use, constrained=False)

        return self.space.zip_inputs(aux_var[0])
Exemple #2
0
    def _compute_next_evaluations(self,
                                  pending_zipped_X=None,
                                  ignored_zipped_X=None):
        """
        Computes the location of the new evaluation (optimizes the acquisition in the standard case).
        :param pending_zipped_X: matrix of input configurations that are in a pending state (i.e., do not have an evaluation yet).
        :param ignored_zipped_X: matrix of input configurations that the user black-lists, i.e., those configurations will not be suggested again.
        :return:
        """

        ## --- Update the context if any
        self.acquisition.optimizer.context_manager = ContextManager(
            self.space, self.context)

        ### --- Activate de_duplication
        if self.de_duplication:
            duplicate_manager = DuplicateManager(
                space=self.space,
                zipped_X=self.X,
                pending_zipped_X=pending_zipped_X,
                ignored_zipped_X=ignored_zipped_X)
        else:
            duplicate_manager = None

        ### We zip the value in case there are categorical variables
        return self.space.zip_inputs(
            self.evaluator.compute_batch(
                duplicate_manager=duplicate_manager,
                context_manager=self.acquisition.optimizer.context_manager))
Exemple #3
0
    def compute_next_evaluations(self,
                                 pending_zipped_X=None,
                                 ignored_zipped_X=None):
        """
        Computes the location of the new evaluation (optimizes the acquisition in the standard case).
        :param pending_zipped_X: matrix of input configurations that are in a pending state (i.e., do not have an evaluation yet).
        :param ignored_zipped_X: matrix of input configurations that the user black-lists, i.e., those configurations will not be suggested again.
        :return:
        """
        # --- Initial function evaluation
        if self.X is not None and self.Y is None:
            self.Y, cost_values = self.objective.evaluate(self.X)
            if self.cost.cost_type == 'evaluation_time':
                self.cost.update_cost_model(self.X, cost_values)
        # --- Initialize model
        self.model.updateModel(self.X, self.Y)

        ## --- Update the context if any
        self.acquisition.optimizer.context_manager = ContextManager(
            self.space, self.context)

        ### We zip the value in case there are categorical variables
        #X_baseline = np.empty((self.X.shape[1], self.X.shape[1]))
        #X_baseline[0, :] = self.current_argmax
        #for i in range(1, self.X.shape[1]):
        #X_baseline[i, :] = self.current_argmax + np.random.normal(loc=0.0, scale=0.1, size=self.current_argmax.shape)
        #return self.space.zip_inputs(self.evaluator.compute_batch(duplicate_manager=None, x_baseline=X_baseline))
        return self.space.zip_inputs(
            self.evaluator.compute_batch(duplicate_manager=None,
                                         x_baseline=self.current_argmax))
Exemple #4
0
    def step(self, obs, explore=True):
        """
        Returns the policy for a single step.

        Parameters
        ----------
        obs:
            Observation from env.
        explore: bool
             If True, it suggests also exploratory actions.
        """
        # We need at least one data point before creating the BO model.
        # Until then, we sample from the action space
        if self.BO is None:
            return self.ac_space.sample()
        else:
            # Add new data to gp and optimize hyperparameters if max_iters > 0
            self.BO._update_model(self.BO.normalization_type)

            # Create context dict
            if not hasattr(obs, '__iter__'):
                obs = np.array([obs])
            obs = np.asarray(obs)
            obs_copy = obs.copy()
            context = {
                'obsvar_{}'.format(i): v
                for i, v in enumerate(obs_copy.flatten())
            }

            # Maximizing the UCB criterion trades-off exploration/exploitation
            if explore:
                self.BO.num_acquisitions += 1
                self.BO.acquisition.exploration_weight = self.beta(
                    self.BO.num_acquisitions)
                self.BO.context = context
                x = self.BO._compute_next_evaluations()

            # In case we don't explore, we maximize the posterior mean
            else:
                # Optimize the exploit function (posterior mean)
                self.exploit_optimizer.context_manager = ContextManager(
                    space=self.BO.space, context=context)
                x, fx = self.exploit_optimizer.optimize(f=self.exploit_func)

            # Remove context (i.e. state) and cast to correct type
            x = np.squeeze(x)
            x_no_context = x[len(obs):]
            if isinstance(self.ac_space, Discrete):
                x_no_context = int(x_no_context[0])
            elif isinstance(self.ac_space, MultiDiscrete):
                x_no_context.astype(np.int64)

            return x_no_context
Exemple #5
0
    def setUp(self):
        np.random.seed(123)
        domain          = [{'name': 'var1', 'type': 'continuous', 'domain': (-5, 5), 'dimensionality': 5}]
        space           = Design_space(domain)
        func            = alpine1(input_dim=5, bounds=space.get_bounds())
        bo              = BayesianOptimization(f=func.f, domain=domain)
        context         = {'var1_1': 0.3, 'var1_2': 0.4}
        context_manager = ContextManager(space, context)
        x0              = np.array([[0, 0, 0, 0, 0]])

        # initialize the model in a least intrusive way possible
        bo.suggest_next_locations()

        f = bo.acquisition.acquisition_function
        f_df = bo.acquisition.acquisition_function_withGradients
        self.problem_with_context = OptimizationWithContext(x0=x0, f=f, df=None, f_df=f_df, context_manager=context_manager)
        self.x = np.array([[3, -3, 3]])
Exemple #6
0
    def setUp(self):
        self.mock_model = Mock()
        self.mock_optimizer = Mock()
        self.expected_optimum_position = [[0, 0]]
        self.mock_optimizer.optimize.return_value = self.expected_optimum_position, self.expected_optimum_position
        domain = [{
            'name': 'var_1',
            'type': 'continuous',
            'domain': (-5, 5),
            'dimensionality': 2
        }]
        self.space = Design_space(domain, None)
        self.mock_optimizer.context_manager = ContextManager(self.space)
        self.ei_acquisition = AcquisitionEI(self.mock_model, self.space,
                                            self.mock_optimizer)

        self.random_batch = RandomBatch(self.ei_acquisition, 10)
Exemple #7
0
    def _compute_setting(self, pending_zipped_X, ignored_zipped_X):
        context_manager = ContextManager(self.subspace, self.context)

        # --- Update the context if any
        self.acquisition.optimizer.context_manager = context_manager

        # --- Activate de_duplication
        if self.de_duplication:
            duplicate_manager = DuplicateManager(
                space=self.subspace,
                zipped_X=self.X,
                pending_zipped_X=pending_zipped_X,
                ignored_zipped_X=ignored_zipped_X)
        else:
            duplicate_manager = None

        return context_manager, duplicate_manager
Exemple #8
0
    def optimize_final_evaluation(self):

        out = self.acquisition.optimizer.optimize(f=self.current_best,
                                                  duplicate_manager=None)
        print("out", out)
        print("out", out[1])

        self.best_mu_all = -1 * np.array(out[1]).reshape(-1)

        self.acquisition.optimizer.context_manager = ContextManager(
            self.space, self.context)
        out = self.acquisition.optimizer.optimize(f=self.expected_improvement,
                                                  duplicate_manager=None)
        suggested_sample = self.space.zip_inputs(out[0])
        # suggested_sample = suggested_sample.astype("int")
        print("suggested_sample", suggested_sample)
        return suggested_sample
    def _compute_next_evaluations(self, pending_zipped_X=None, ignored_zipped_X=None):
        # --- Update the context if any
        self.acquisition.optimizer.context_manager = ContextManager(self.subspace, self.context)

        # --- Activate de_duplication
        if self.de_duplication:
            duplicate_manager = DuplicateManager(
                space=self.subspace, zipped_X=self.X, pending_zipped_X=pending_zipped_X,
                ignored_zipped_X=ignored_zipped_X)
        else:
            duplicate_manager = None

        # We zip the value in case there are categorical variables
        suggested_ = self.subspace.zip_inputs(self.evaluator.compute_batch(
            duplicate_manager=duplicate_manager,
            context_manager=self.acquisition.optimizer.context_manager))

        return suggested_
Exemple #10
0
 def __init__(self,
              space: ParameterSpace,
              context: Context,
              gpyopt_space: Optional[Dict[str, Any]] = None):
     """
     :param space: Parameter space of the search problem.
     :param context: Dictionary of variables and their context values.
                     These values are fixed while optimization.
     :param gpyopt_space: Same as space but in GPyOpt format.
     """
     self.space = space
     if gpyopt_space is None:
         gpyopt_space = space.convert_to_gpyopt_design_space()
     self._gpyopt_context_manager = GPyOptContextManager(
         gpyopt_space, context)
     self.contextfree_space = ParameterSpace([
         param for param in self.space.parameters
         if param.name not in context
     ])
     self.context_space = ParameterSpace([
         param for param in self.space.parameters if param.name in context
     ])
    def test_context_hadler(self):
        space = [{
            'name': 'var1',
            'type': 'continuous',
            'domain': (-3, 1),
            'dimensionality': 3
        }, {
            'name': 'var2',
            'type': 'discrete',
            'domain': (0, 1, 2, 3)
        }, {
            'name': 'var3',
            'type': 'continuous',
            'domain': (-5, 5)
        }, {
            'name': 'var4',
            'type': 'categorical',
            'domain': (0, 1)
        }]

        context = {'var1_1': 0.45, 'var3': 0.52}

        design_space = Design_space(space)
        np.random.seed(666)

        self.context_manager = ContextManager(space=design_space,
                                              context=context)

        noncontext_bounds = [(-3, 1), (-3, 1), (0, 3), (0, 1), (0, 1)]
        noncontext_index = [1, 2, 3, 5, 6]
        expanded_vector = np.array([[0.45, 0., 0., 0., 0.52, 0., 0.]])

        assert np.all(
            noncontext_bounds == self.context_manager.noncontext_bounds)
        assert np.all(
            noncontext_index == self.context_manager.noncontext_index)
        assert np.all(expanded_vector == self.context_manager._expand_vector(
            np.array([[0, 0, 0, 0, 0]])))
Exemple #12
0
class ContextManager:
    """
    Handles the context variables in the optimizer
    """
    def __init__(self,
                 space: ParameterSpace,
                 context: Context,
                 gpyopt_space: Optional[Dict[str, Any]] = None):
        """
        :param space: Parameter space of the search problem.
        :param context: Dictionary of variables and their context values.
                        These values are fixed while optimization.
        :param gpyopt_space: Same as space but in GPyOpt format.
        """
        self.space = space
        if gpyopt_space is None:
            gpyopt_space = space.convert_to_gpyopt_design_space()
        self._gpyopt_context_manager = GPyOptContextManager(
            gpyopt_space, context)
        self.contextfree_space = ParameterSpace([
            param for param in self.space.parameters
            if param.name not in context
        ])
        self.context_space = ParameterSpace([
            param for param in self.space.parameters if param.name in context
        ])

    def expand_vector(self, x: np.ndarray) -> np.ndarray:
        """
        Expand contextfree parameter vector by values of the context.

        :param x: Contextfree parameter values as 2d-array
        :return: Parameter values with inserted context values
        """
        if len(self.context_space.parameters) == 0:
            return x
        else:
            return self._gpyopt_context_manager._expand_vector(x)