def _build_optimizer(self):
     """Set :attr:`optimizer` to the optimizing class used to both estimate the utility of sets of hyperparameters by learning
     from executed Experiments, and suggest points at which the objective should be evaluated"""
     self.optimizer = AskingOptimizer(
         dimensions=self.hyperparameter_space,
         base_estimator=self.base_estimator,
         n_initial_points=self.n_initial_points,
         acq_func=self.acquisition_function,
         acq_optimizer=self.acquisition_optimizer,
         random_state=self.random_state,
         acq_func_kwargs=self.acquisition_function_kwargs,
         acq_optimizer_kwargs=self.acquisition_optimizer_kwargs,
     )
class InformedOptimizationProtocol(BaseOptimizationProtocol,
                                   metaclass=ABCMeta):
    def __init__(
        self,
        target_metric=None,
        iterations=1,
        verbose=1,
        read_experiments=True,
        reporter_parameters=None,

        #################### Optimizer Class Parameters ####################
        base_estimator='GP',
        n_initial_points=10,
        acquisition_function='gp_hedge',
        acquisition_optimizer='auto',
        random_state=32,
        acquisition_function_kwargs=None,
        acquisition_optimizer_kwargs=None,

        #################### Minimizer Parameters ####################
        n_random_starts=10,
        callbacks=None,

        #################### Other Parameters ####################
        base_estimator_kwargs=None,
    ):
        """Base class for Informed Optimization Protocols

        Parameters
        ----------
        target_metric: Tuple, default=('oof', <first key in :attr:`environment.Environment.metrics_map`>)
            A path denoting the metric to be used to compare completed Experiments within the Optimization Protocol. The first
            value should be one of ['oof', 'holdout', 'in_fold']. The second value should be the name of a metric being recorded
            according to the values supplied in :attr:`environment.Environment.metrics_params`. See the documentation for
            :func:`metrics.get_formatted_target_metric` for more info; any values returned by, or used as the `target_metric`
            input to this function are acceptable values for :attr:`BaseOptimizationProtocol.target_metric`
        iterations: Int, default=1
            The number of distinct experiments to execute
        verbose: Int 0, 1, or 2, default=1
            Verbosity mode for console logging. 0: Silent. 1: Show only logs from the Optimization Protocol. 2: In addition to
            logs shown when verbose=1, also show the logs from individual Experiments
        read_experiments: Boolean, default=True
            If True, all Experiment records that fit within the current :attr:`hyperparameter_space`, and are for the same
            :attr:`algorithm_name`, and match the current guidelines, will be read in and used to fit any optimizers
        reporter_parameters: Dict, or None, default=None
            Additional parameters passed to :meth:`reporting.OptimizationReporter.__init__`
        base_estimator: String in ['GP', 'GBRT', 'RF', 'ET', 'DUMMY'], or an `sklearn` regressor, default='GP'
            If one of the above strings, a default model of that type will be used. Else, should inherit from
            :class:`sklearn.base.RegressorMixin`, and its :meth:`predict` should have an optional `return_std` argument, which
            returns `std(Y | x)`, along with `E[Y | x]`
        n_initial_points: Int, default=10
            The number of complete evaluation points necessary before allowing Experiments to be approximated with
            `base_estimator`. Any valid Experiment records found will count as initialization points. If enough Experiment records
            are not found, additional points will be randomly sampled
        acquisition_function: String in ['LCB', 'EI', 'PI', 'gp_hedge'], default='gp_hedge'
            Function to minimize over the posterior distribution. 'LCB': lower confidence bound. 'EI': negative expected
            improvement. 'PI': negative probability of improvement. 'gp_hedge': Probabilistically choose one of the preceding
            three acquisition functions at each iteration
        acquisition_optimizer: String in ['sampling', 'lbfgs', 'auto'], default='auto'
            Method to minimize the acquisition function. The fit model is updated with the optimal value obtained by optimizing
            `acquisition_function` with `acquisition_optimizer`. 'sampling': optimize by computing `acquisition_function` at
            `acquisition_optimizer_kwargs['n_points']` randomly sampled points. 'lbfgs': optimize by sampling
            `n_restarts_optimizer` random points, then run 'lbfgs' for 20 iterations with those points to find local minima, the
            optimal of which is used to update the prior. 'auto': configure on the basis of `base_estimator` and `dimensions`
        random_state: Int, `RandomState` instance, or None, default=None
            Set to something other than None for reproducible results
        acquisition_function_kwargs: Dict, or None, default=dict(xi=0.01, kappa=1.96)
            Additional arguments passed to the acquisition function
        acquisition_optimizer_kwargs: Dict, or None, default=dict(n_points=10000, n_restarts_optimizer=5, n_jobs=1)
            Additional arguments passed to the acquisition optimizer
        n_random_starts: Int, default=10
            The number of Experiments to execute with random points before checking that `n_initial_points` have been evaluated
        callbacks: Callable, list of callables, or None, default=[]
            If callable, then `callbacks(self.optimizer_result)` is called after each update to :attr:`optimizer`. If list, then
            each callable is called
        base_estimator_kwargs: Dict, or None, default={}
            Additional arguments passed to `base_estimator` when it is initialized

        Notes
        -----
        To provide initial input points for evaluation, individual Experiments can be executed prior to instantiating an
        Optimization Protocol. The results of these Experiments will automatically be detected and cherished by the optimizer.

        :class:`.InformedOptimizationProtocol` and its children in :mod:`.optimization` rely heavily on the utilities provided by
        the `Scikit-Optimize` library, so thank you to the creators and contributors for their excellent work."""
        # TODO: Add 'EIps', and 'PIps' to the allowable `acquisition_function` values - Will need to return execution times

        #################### Optimizer Parameters ####################
        self.base_estimator = base_estimator
        self.n_initial_points = n_initial_points
        self.acquisition_function = acquisition_function
        self.acquisition_optimizer = acquisition_optimizer
        self.random_state = random_state
        self.acquisition_function_kwargs = dict(xi=0.01, kappa=1.96)
        self.acquisition_optimizer_kwargs = dict(n_points=10000,
                                                 n_restarts_optimizer=5,
                                                 n_jobs=1)

        self.acquisition_function_kwargs.update(acquisition_function_kwargs
                                                or {})
        self.acquisition_optimizer_kwargs.update(acquisition_optimizer_kwargs
                                                 or {})

        #################### Minimizer Parameters ####################
        # TODO: n_random_starts does nothing currently - Fix that
        self.n_random_starts = n_random_starts
        self.callbacks = callbacks or []

        #################### Other Parameters ####################
        self.base_estimator_kwargs = base_estimator_kwargs or {}

        #################### Placeholder Attributes ####################
        self.optimizer = None
        self.optimizer_result = None
        self.current_hyperparameters_list = None

        super().__init__(target_metric=target_metric,
                         iterations=iterations,
                         verbose=verbose,
                         read_experiments=read_experiments,
                         reporter_parameters=reporter_parameters)

    def _set_hyperparameter_space(self):
        """Initialize :attr:`hyperparameter_space` according to the provided hyperparameter search dimensions, and
        :attr:`base_estimator` and :attr:`optimizer`"""
        self.hyperparameter_space = Space(dimensions=self.dimensions)
        self._prepare_estimator()
        self._build_optimizer()

    def _prepare_estimator(self):
        """Initialize :attr:`base_estimator` with :attr:`hyperparameter_space` and any other kwargs, using
        `skopt.utils.cook_estimator`"""
        self.base_estimator = cook_estimator(self.base_estimator,
                                             space=self.hyperparameter_space,
                                             **self.base_estimator_kwargs)

    def _build_optimizer(self):
        """Set :attr:`optimizer` to the optimizing class used to both estimate the utility of sets of hyperparameters by learning
        from executed Experiments, and suggest points at which the objective should be evaluated"""
        self.optimizer = AskingOptimizer(
            dimensions=self.hyperparameter_space,
            base_estimator=self.base_estimator,
            n_initial_points=self.n_initial_points,
            acq_func=self.acquisition_function,
            acq_optimizer=self.acquisition_optimizer,
            random_state=self.random_state,
            acq_func_kwargs=self.acquisition_function_kwargs,
            acq_optimizer_kwargs=self.acquisition_optimizer_kwargs,
        )

    def _execute_experiment(self):
        """After executing parent's :meth:`_execute_experiment`, fit :attr:`optimizer` with the set of hyperparameters that
        were used, and the utility of those hyperparameters"""
        super()._execute_experiment()

        # FLAG: Resolve switching between below options depending on `target_metric`
        # self.optimizer_result = self.optimizer.tell(self.current_hyperparameters_list, self.current_score, fit=True)
        self.optimizer_result = self.optimizer.tell(
            self.current_hyperparameters_list, -self.current_score, fit=True)
        # FLAG: Resolve switching between above options depending on `target_metric`

        if eval_callbacks(self.callbacks, self.optimizer_result):
            return

    def _get_current_hyperparameters(self):
        """Ask :attr:`optimizer` for the upcoming set of hyperparameters that should be searched, then format them to be used
        in the next Experiment

        Returns
        -------
        current_hyperparameters: Dict
            The next set of hyperparameters that will be searched"""
        _current_hyperparameters = self.optimizer.ask()

        if _current_hyperparameters == self.current_hyperparameters_list:
            new_parameters = self.hyperparameter_space.rvs(
                random_state=None)[0]
            G.debug_('REPEATED     asked={}     new={}'.format(
                _current_hyperparameters, new_parameters))
            _current_hyperparameters = new_parameters

        self.current_hyperparameters_list = _current_hyperparameters

        current_hyperparameters = dict(
            zip(self.hyperparameter_space.get_names(use_location=False),
                self.current_hyperparameters_list))

        return current_hyperparameters

    def _find_similar_experiments(self):
        """After locating similar experiments by way of the parent's :meth:`_find_similar_experiments`, fit :attr:`optimizer`
        with the hyperparameters and results of each located experiment"""
        super()._find_similar_experiments()

        for _i, _experiment in enumerate(self.similar_experiments[::-1]):
            _hyperparameters = dimension_subset(
                _experiment[0], self.hyperparameter_space.get_names())
            _evaluation = _experiment[1]
            _experiment_id = _experiment[2] if len(_experiment) > 2 else None
            self.logger.print_result(_hyperparameters,
                                     _evaluation,
                                     experiment_id=_experiment_id)

            # FLAG: Resolve switching between below options depending on `target_metric`
            # self.optimizer_result = self.optimizer.tell(_hyperparameters, _evaluation)
            self.optimizer_result = self.optimizer.tell(
                _hyperparameters, -_evaluation)
            # FLAG: Resolve switching between above options depending on `target_metric`

            # self.optimizer_result = self.optimizer.tell(
            #     _hyperparameters, _evaluation, fit=(_i == len(self.similar_experiments) - 1))

            if eval_callbacks(self.callbacks, self.optimizer_result):
                return self.optimizer_result
            # FLAG: Could wrap above `tell` call in try/except, then attempt `_tell` with improper dimensions

    def _validate_parameters(self):
        """Ensure provided input parameters are properly formatted"""
        super()._validate_parameters()

        #################### callbacks ####################
        self.callbacks = check_callback(self.callbacks)

    @property
    def search_space_size(self):
        """The number of different hyperparameter permutations possible given the current hyperparameter search dimensions.

        Returns
        -------
        :attr:`_search_space_size`: Int, or `numpy.inf`
            Infinity will be returned if any of the following constraints are met: 1) the hyperparameter dimensions include any
            real-valued boundaries, 2) the boundaries include values that are neither categorical nor integer, or 3) the search
            space size is otherwise incalculable"""
        if self._search_space_size is None:
            self._search_space_size = len(self.hyperparameter_space)
        return self._search_space_size