Пример #1
0
class RandomSampler(HyperoptSampler):
    num_samples = 10

    def __init__(self,
                 goal: str,
                 parameters: Dict[str, Any],
                 num_samples=10,
                 **kwargs) -> None:
        HyperoptSampler.__init__(self, goal, parameters)
        params_for_join_space = copy.deepcopy(parameters)
        for param_values in params_for_join_space.values():
            if param_values[TYPE] == CATEGORY:
                param_values[TYPE] = 'cat'
            if param_values[TYPE] == FLOAT:
                param_values[TYPE] = 'real'
            if param_values[TYPE] == INT or param_values[TYPE] == 'real':
                if SPACE not in param_values:
                    param_values[SPACE] = 'linear'
                param_values['range'] = (param_values['low'],
                                         param_values['high'])
                del param_values['low']
                del param_values['high']

        self.space = JointSpace(params_for_join_space)
        self.num_samples = num_samples
        self.samples = self._determine_samples()
        self.sampled_so_far = 0

    def _determine_samples(self):
        samples = []
        for _ in range(self.num_samples):
            bnds = self.space.get_bounds()
            x = bnds[:, 0] + (bnds[:, 1] - bnds[:, 0]) * np.random.rand(
                1, len(self.space.get_bounds()))
            sample = self.space.unwarp(x)[0]
            samples.append(sample)
        return samples

    def sample(self) -> Dict[str, Any]:
        if self.sampled_so_far >= len(self.samples):
            raise IndexError()
        sample = self.samples[self.sampled_so_far]
        self.sampled_so_far += 1
        return sample

    def update(self, sampled_parameters: Dict[str, Any], metric_score: float):
        pass

    def finished(self) -> bool:
        return self.sampled_so_far >= len(self.samples)
Пример #2
0
def suggest_dict(X, y, meta, n_suggestions=1, random=np_util.random):
    """Stateless function to create suggestions for next query point in random search optimization.

    This implements the API for general structures of different data types.

    Parameters
    ----------
    X : list(dict)
        Places where the objective function has already been evaluated. Not actually used in random search.
    y : :class:`numpy:numpy.ndarray`, shape (n,)
        Corresponding values where objective has been evaluated. Not actually used in random search.
    meta : dict(str, dict)
        Configuration of the optimization variables. See API description.
    n_suggestions : int
        Desired number of parallel suggestions in the output
    random : :class:`numpy:numpy.random.RandomState`
        Optionally pass in random stream for reproducibility.

    Returns
    -------
    next_guess : list(dict)
        List of `n_suggestions` suggestions to evaluate the objective function.
        Each suggestion is a dictionary where each key corresponds to a parameter being optimized.
    """
    # Warp and get bounds
    space_x = JointSpace(meta)
    X_warped = space_x.warp(X)
    bounds = space_x.get_bounds()
    _, n_params = _check_x_y(X_warped, y, allow_impute=True)
    lb, ub = _check_bounds(bounds, n_params)

    # Get the suggestion
    suggest_x = random.uniform(lb, ub, size=(n_suggestions, n_params))

    # Unwarp
    next_guess = space_x.unwarp(suggest_x)
    return next_guess
Пример #3
0
class PySOTOptimizer(AbstractOptimizer):
    primary_import = "pysot"

    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.create_opt_prob()  # Sets up the optimization problem (needs self.bounds)
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.batch_size = None
        self.history = []
        self.proposals = []

    def create_opt_prob(self):
        """Create an optimization problem object."""
        opt = OptimizationProblem()
        opt.lb = self.bounds[:, 0]  # In warped space
        opt.ub = self.bounds[:, 1]  # In warped space
        opt.dim = len(self.bounds)
        opt.cont_var = np.arange(len(self.bounds))
        opt.int_var = []
        assert len(opt.cont_var) + len(opt.int_var) == opt.dim
        opt.objfun = None
        self.opt = opt

    def start(self, max_evals):
        """Starts a new pySOT run."""
        self.history = []
        self.proposals = []

        # Symmetric Latin hypercube design
        des_pts = max([self.batch_size, 2 * (self.opt.dim + 1)])
        slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)

        # Warped RBF interpolant
        rbf = RBFInterpolant(dim=self.opt.dim, kernel=CubicKernel(), tail=LinearTail(self.opt.dim), eta=1e-4)
        rbf = SurrogateUnitBox(rbf, lb=self.opt.lb, ub=self.opt.ub)

        # Optimization strategy
        self.strategy = SRBFStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt,
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1,
            use_restarts=True,
        )

    def suggest(self, n_suggestions=1):
        """Get a suggestion from the optimizer.

        Parameters
        ----------
        n_suggestions : int
            Desired number of parallel suggestions in the output

        Returns
        -------
        next_guess : list of dict
            List of `n_suggestions` suggestions to evaluate the objective
            function. Each suggestion is a dictionary where each key
            corresponds to a parameter being optimized.
        """

        if self.batch_size is None:  # First call to suggest
            self.batch_size = n_suggestions
            self.start(self.max_evals)

        # Set the tolerances pretending like we are running batch
        d, p = float(self.opt.dim), float(n_suggestions)
        self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p)))

        # Now we can make suggestions
        x_w = []
        self.proposals = []
        for _ in range(n_suggestions):
            proposal = self.strategy.propose_action()
            record = EvalRecord(proposal.args, status="pending")
            proposal.record = record
            proposal.accept()  # This triggers all the callbacks

            # It is possible that pySOT proposes a previously evaluated point
            # when all variables are integers, so we just abort in this case
            # since we have likely converged anyway. See PySOT issue #30.
            x = list(proposal.record.params)  # From tuple to list
            x_unwarped, = self.space_x.unwarp(x)
            if x_unwarped in self.history:
                warnings.warn("pySOT proposed the same point twice")
                self.start(self.max_evals)
                return self.suggest(n_suggestions=n_suggestions)

            # NOTE: Append unwarped to avoid rounding issues
            self.history.append(copy(x_unwarped))
            self.proposals.append(proposal)
            x_w.append(copy(x_unwarped))

        return x_w

    def _observe(self, x, y):
        # Find the matching proposal and execute its callbacks
        idx = [x == xx for xx in self.history]
        i = np.argwhere(idx)[0].item()  # Pick the first index if there are ties
        proposal = self.proposals[i]
        proposal.record.complete(y)
        self.proposals.pop(i)
        self.history.pop(i)

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        assert len(X) == len(y)

        for x_, y_ in zip(X, y):
            # Just ignore, any inf observations we got, unclear if right thing
            if np.isfinite(y_):
                self._observe(x_, y_)
Пример #4
0
class TurboOptimizer(AbstractOptimizer):
    primary_import = "Turbo"

    def __init__(self, api_config, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.dimensions, self.vars_types, self.param_list = TurboOptimizer.get_sk_dimensions(
            api_config)
        print("dimensions: {}".format(self.dimensions))
        print("vars_types: {}".format(self.vars_types))
        # names of variables
        print("param_list: {}".format(self.param_list))

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.lb, self.ub = self.bounds[:, 0], self.bounds[:, 1]
        self.dim = len(self.bounds)
        print("lb: {}".format(self.lb))
        print("ub: {}".format(self.ub))
        print("dim: {}".format(self.dim))

        if "max_depth" in self.param_list:
            print("DT or RF")
            # max_depth
            att = "max_depth"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 10
            self.ub[att_idx] = 15
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))

            # max_features
            att = "max_features"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = logit(0.9)
            self.ub[att_idx] = logit(0.99)
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))

            # min_impurity_decrease
            att = "min_impurity_decrease"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 1e-5
            self.ub[att_idx] = 1e-4
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
        if "beta_1" in self.param_list and "hidden_layer_sizes" in self.param_list:
            print("MLP-adam")
            # batch_size
            att = "batch_size"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 16
            self.ub[att_idx] = 128
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            # hidden_layer_sizes
            att = "hidden_layer_sizes"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 64
            self.ub[att_idx] = 200
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            # validation_fraction
            att = "validation_fraction"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = logit(0.1)
            self.ub[att_idx] = logit(0.2)
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
        if "momentum" in self.param_list and "hidden_layer_sizes" in self.param_list:
            print("MLP-sgd")
            # batch_size
            att = "batch_size"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 16
            self.ub[att_idx] = 128
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            # hidden_layer_sizes
            att = "hidden_layer_sizes"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 64
            self.ub[att_idx] = 200
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            # validation_fraction
            att = "validation_fraction"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = logit(0.1)
            self.ub[att_idx] = logit(0.2)
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
        if "C" in self.param_list and "gamma" in self.param_list:
            print("SVM")
            # C
            att = "C"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = np.log(1e0)
            self.ub[att_idx] = np.log(1e3)
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            # tol
            att = "tol"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = np.log(1e-3)
            self.ub[att_idx] = np.log(1e-1)
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
        if "learning_rate" in self.param_list and "n_estimators" in self.param_list:
            print("ada")
            # n_estimators
            att = "n_estimators"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 30
            self.ub[att_idx] = 100
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
        if "n_neighbors" in self.param_list:
            print("kNN")
            # n_neighbors
            att = "n_neighbors"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 1
            self.ub[att_idx] = 15
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            # p
            att = "p"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 1
            self.ub[att_idx] = 2
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))

        print("new_lb: {}".format(self.lb))
        print("new_ub: {}".format(self.ub))

        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.batch_size = None
        self.history = []

        self.turbo = Turbo1(
            f=None,
            lb=self.lb,
            ub=self.ub,
            n_init=2 * self.dim + 1,
            max_evals=self.max_evals,
            batch_size=1,  # We need to update this later
            verbose=False,
        )

        # count restart
        self.cnt_restart = 0
        # use smaller length_min
        self.turbo.length_min = 0.5**4
        # use distance between batch elements
        self.turbo.ele_distance = 1e-2

    # Obtain the search space configurations
    @staticmethod
    def get_sk_dimensions(api_config, transform="normalize"):
        """Help routine to setup skopt search space in constructor.

        Take api_config as argument so this can be static.
        """
        # The ordering of iteration prob makes no difference, but just to be
        # safe and consistnent with space.py, I will make sorted.
        param_list = sorted(api_config.keys())

        sk_types = []
        sk_dims = []
        for param_name in param_list:
            param_config = api_config[param_name]

            param_type = param_config["type"]
            param_space = param_config.get("space", None)
            param_range = param_config.get("range", None)
            param_values = param_config.get("values", None)

            # Some setup for case that whitelist of values is provided:
            values_only_type = param_type in ("cat", "ordinal")
            if (param_values is not None) and (not values_only_type):
                assert param_range is None
                param_values = np.unique(param_values)
                param_range = (param_values[0], param_values[-1])
            if param_type == "int":
                # Integer space in sklearn does not support any warping => Need
                # to leave the warping as linear in skopt.
                sk_dims.append(
                    Integer(param_range[0],
                            param_range[-1],
                            transform=transform,
                            name=param_name))
            elif param_type == "bool":
                assert param_range is None
                assert param_values is None
                sk_dims.append(
                    Integer(0, 1, transform=transform, name=param_name))
            elif param_type in ("cat", "ordinal"):
                assert param_range is None
                # Leave x-form to one-hot as per skopt default
                sk_dims.append(Categorical(param_values, name=param_name))
            elif param_type == "real":
                # Skopt doesn't support all our warpings, so need to pick
                # closest substitute it does support.
                # prior = "log-uniform" if param_space in ("log", "logit") else "uniform"
                if param_space == "log":
                    prior = "log-uniform"
                elif param_space == "logit":
                    prior = "logit-uniform"
                else:
                    prior = "uniform"
                sk_dims.append(
                    Real(param_range[0],
                         param_range[-1],
                         prior=prior,
                         transform=transform,
                         name=param_name))
            else:
                assert False, "type %s not handled in API" % param_type
            sk_types.append(param_type)
        return sk_dims, sk_types, param_list

    def restart(self):
        self.turbo._restart()
        self.turbo._X = np.zeros((0, self.turbo.dim))
        self.turbo._fX = np.zeros((0, 1))
        X_init = latin_hypercube(self.turbo.n_init, self.dim)
        self.X_init = from_unit_cube(X_init, self.lb, self.ub)

    def suggest(self, n_suggestions=1):
        if self.batch_size is None:  # Remember the batch size on the first call to suggest
            self.batch_size = n_suggestions
            self.turbo.batch_size = n_suggestions
            self.turbo.failtol = np.ceil(
                np.max([4.0 / self.batch_size, self.dim / self.batch_size]))
            self.turbo.n_init = max([self.turbo.n_init, self.batch_size])
            self.cnt_restart = self.cnt_restart + 1
            self.restart()

        X_next = np.zeros((n_suggestions, self.dim))

        # Pick from the initial points
        n_init = min(len(self.X_init), n_suggestions)
        if n_init > 0:
            X_next[:n_init] = deepcopy(self.X_init[:n_init, :])
            self.X_init = self.X_init[
                n_init:, :]  # Remove these pending points

        # Get remaining points from TuRBO
        n_adapt = n_suggestions - n_init
        print("n_adapt: {}, n_suggestions: {}, n_init: {}".format(
            n_adapt, n_suggestions, n_init))
        if n_adapt > 0:
            if len(self.turbo._X
                   ) > 0:  # Use random points if we can't fit a GP
                print("running Turbo...")
                X = to_unit_cube(deepcopy(self.turbo._X), self.lb, self.ub)
                fX = copula_standardize(deepcopy(
                    self.turbo._fX).ravel())  # Use Copula
                X_cand, y_cand, _ = self.turbo._create_candidates(
                    X,
                    fX,
                    length=self.turbo.length,
                    n_training_steps=100,
                    hypers={})
                X_next[-n_adapt:, :] = self.turbo._select_candidates(
                    X_cand, y_cand)[:n_adapt, :]
                X_next[-n_adapt:, :] = from_unit_cube(X_next[-n_adapt:, :],
                                                      self.lb, self.ub)

        # Unwarp the suggestions
        suggestions = self.space_x.unwarp(X_next)
        return suggestions

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        assert len(X) == len(y)
        XX, yy = self.space_x.warp(X), np.array(y)[:, None]

        if len(self.turbo._fX) >= self.turbo.n_init:
            print("adjust region length")
            print("original region length: {}".format(self.turbo.length))
            self.turbo._adjust_length(yy)
            print("adjusted region length: {}".format(self.turbo.length))

        self.turbo.n_evals += self.batch_size

        self.turbo._X = np.vstack((self.turbo._X, deepcopy(XX)))
        self.turbo._fX = np.vstack((self.turbo._fX, deepcopy(yy)))
        self.turbo.X = np.vstack((self.turbo.X, deepcopy(XX)))
        self.turbo.fX = np.vstack((self.turbo.fX, deepcopy(yy)))

        ind_best = np.argmin(self.turbo.fX)
        f_best, x_best = self.turbo.fX[ind_best], self.turbo.X[ind_best, :]
        print("best f(x): {}, at x: {}".format(round(f_best[0], 2),
                                               np.around(x_best, 2)))
        print("x_best: {}".format(self.space_x.unwarp([x_best])))

        # Check for a restart
        print("turbo.length: {}, turbo.length_min: {}".format(
            self.turbo.length, self.turbo.length_min))
        if self.turbo.length < self.turbo.length_min:
            self.cnt_restart = self.cnt_restart + 1
            self.restart()
            print("original new region length: {}".format(self.turbo.length))
            # already exploit current region (current_length < length_min)
            # try new region but smaller one
            self.turbo.length = round(self.turbo.length / self.cnt_restart, 1)
            print("reduced new region length: {}".format(self.turbo.length))
Пример #5
0
class RandomSampler(HyperoptSampler):
    num_samples = 10

    def __init__(self,
                 goal: str,
                 parameters: Dict[str, Any],
                 num_samples=10,
                 **kwargs) -> None:
        HyperoptSampler.__init__(self, goal, parameters)
        params_for_join_space = copy.deepcopy(parameters)

        cat_params_values_types = {}
        for param_name, param_values in params_for_join_space.items():
            if param_values[TYPE] == CATEGORY:
                param_values[TYPE] = 'cat'
                values_str = []
                values_types = {}
                for value in param_values['values']:
                    value_type = type(value)
                    if value_type == bool:
                        value_str = str(value)
                        value_type = str2bool
                    elif value_type == str or value_type == int or \
                            value_type == float:
                        value_str = str(value)
                    else:
                        value_str = json.dumps(value)
                        value_type = json.loads
                    values_str.append(value_str)
                    values_types[value_str] = value_type
                param_values['values'] = values_str
                cat_params_values_types[param_name] = values_types
            if param_values[TYPE] == FLOAT:
                param_values[TYPE] = 'real'
            if param_values[TYPE] == INT or param_values[TYPE] == 'real':
                if SPACE not in param_values:
                    param_values[SPACE] = 'linear'
                param_values['range'] = (param_values['low'],
                                         param_values['high'])
                del param_values['low']
                del param_values['high']

        self.cat_params_values_types = cat_params_values_types
        self.space = JointSpace(params_for_join_space)
        self.num_samples = num_samples
        self.samples = self._determine_samples()
        self.sampled_so_far = 0
        self.default_batch_size = self.num_samples

    def _determine_samples(self):
        samples = []
        for _ in range(self.num_samples):
            bnds = self.space.get_bounds()
            x = bnds[:, 0] + (bnds[:, 1] - bnds[:, 0]) * np.random.rand(
                1, len(self.space.get_bounds()))
            sample = self.space.unwarp(x)[0]
            samples.append(sample)
        return samples

    def sample(self) -> Dict[str, Any]:
        if self.sampled_so_far >= len(self.samples):
            raise IndexError()
        sample = self.samples[self.sampled_so_far]
        for key in sample:
            if key in self.cat_params_values_types:
                values_types = self.cat_params_values_types[key]
                sample[key] = values_types[sample[key]](sample[key])
        self.sampled_so_far += 1
        return sample

    def update(self, sampled_parameters: Dict[str, Any], metric_score: float):
        pass

    def finished(self) -> bool:
        return self.sampled_so_far >= len(self.samples)
Пример #6
0
class TurboOptimizer(AbstractOptimizer):
    primary_import = "Turbo"

    def __init__(self, api_config, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.lb, self.ub = self.bounds[:, 0], self.bounds[:, 1]
        self.dim = len(self.bounds)
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.batch_size = None
        self.history = []

        self.turbo = Turbo1(
            f=None,
            lb=self.bounds[:, 0],
            ub=self.bounds[:, 1],
            n_init=2 * self.dim + 1,
            max_evals=self.max_evals,
            batch_size=1,  # We need to update this later
            verbose=False,
        )

    def restart(self):
        self.turbo._restart()
        self.turbo._X = np.zeros((0, self.turbo.dim))
        self.turbo._fX = np.zeros((0, 1))
        X_init = latin_hypercube(self.turbo.n_init, self.dim)
        self.X_init = from_unit_cube(X_init, self.lb, self.ub)

    def suggest(self, n_suggestions=1):
        if self.batch_size is None:  # Remember the batch size on the first call to suggest
            self.batch_size = n_suggestions
            self.turbo.batch_size = n_suggestions
            self.turbo.failtol = np.ceil(
                np.max([4.0 / self.batch_size, self.dim / self.batch_size]))
            self.turbo.n_init = max([self.turbo.n_init, self.batch_size])
            self.restart()

        X_next = np.zeros((n_suggestions, self.dim))

        # Pick from the initial points
        n_init = min(len(self.X_init), n_suggestions)
        if n_init > 0:
            X_next[:n_init] = deepcopy(self.X_init[:n_init, :])
            self.X_init = self.X_init[
                n_init:, :]  # Remove these pending points

        # Get remaining points from TuRBO
        n_adapt = n_suggestions - n_init
        if n_adapt > 0:
            if len(self.turbo._X
                   ) > 0:  # Use random points if we can't fit a GP
                X = to_unit_cube(deepcopy(self.turbo._X), self.lb, self.ub)
                fX = copula_standardize(deepcopy(
                    self.turbo._fX).ravel())  # Use Copula
                X_cand, y_cand, _ = self.turbo._create_candidates(
                    X,
                    fX,
                    length=self.turbo.length,
                    n_training_steps=100,
                    hypers={})
                X_next[-n_adapt:, :] = self.turbo._select_candidates(
                    X_cand, y_cand)[:n_adapt, :]
                X_next[-n_adapt:, :] = from_unit_cube(X_next[-n_adapt:, :],
                                                      self.lb, self.ub)

        # Unwarp the suggestions
        suggestions = self.space_x.unwarp(X_next)
        return suggestions

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        assert len(X) == len(y)
        XX, yy = self.space_x.warp(X), np.array(y)[:, None]

        if len(self.turbo._fX) >= self.turbo.n_init:
            self.turbo._adjust_length(yy)

        self.turbo.n_evals += self.batch_size

        self.turbo._X = np.vstack((self.turbo._X, deepcopy(XX)))
        self.turbo._fX = np.vstack((self.turbo._fX, deepcopy(yy)))
        self.turbo.X = np.vstack((self.turbo.X, deepcopy(XX)))
        self.turbo.fX = np.vstack((self.turbo.fX, deepcopy(yy)))

        # Check for a restart
        if self.turbo.length < self.turbo.length_min:
            self.restart()
Пример #7
0
class tuSOTOptimizer(AbstractOptimizer):
    primary_import = "pysot"

    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.create_opt_prob()  # Sets up the optimization problem (needs self.bounds)
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.turbo_batch_size = None
        self.pysot_batch_size = None
        self.history = []
        self.proposals = []
        self.lb, self.ub = self.bounds[:, 0], self.bounds[:, 1]
        self.dim = len(self.bounds)

        self.turbo = Turbo1(
            f=None,
            lb=self.bounds[:, 0],
            ub=self.bounds[:, 1],
            n_init=2 * self.dim + 1,
            max_evals=self.max_evals,
            batch_size=4,  # We need to update this later
            verbose=False,
        )
        self.meta_init = self.get_init_hp()

    def get_init_hp(self):
        api_config = self.api_config
        feature = get_api_config_feature(api_config)
        result, values = [], []
        for i in range(9):
            model = joblib.load(
                './lgb_meta_model_{}.pkl'.format(
                    i))
            value = model.predict(np.array(feature).reshape(1, -1))[-1]
            values.append(value)
            result.append(math.e ** value)
        result_dict = {}
        for i, (k, v) in enumerate(api_config.items()):
            if v['type'] == "real":
                lower, upper = v["range"][0], v["range"][1]
                if lower < result[i] < upper:
                    out = result[i]
                else:
                    out = np.random.uniform(lower, upper)
            elif v['type'] == "int":
                lower, upper = v["range"][0], v["range"][1]
                if lower < result[i] < upper:
                    out = int(result[i])
                else:
                    out = np.random.randint(lower, upper)
            elif v['type'] == "bool":
                out = True if int(result[i]) >= 1 else False
            else:
                out = api_config[k]["values"][0]
            result_dict[k] = out
        return result[:len(api_config)], values[:len(api_config)], result_dict

    def restart(self):
        self.turbo._restart()
        self.turbo._X = np.zeros((0, self.turbo.dim))
        self.turbo._fX = np.zeros((0, 1))
        X_init = latin_hypercube(self.turbo.n_init, self.dim)
        self.X_init = from_unit_cube(X_init, self.lb, self.ub)

    def create_opt_prob(self):
        """Create an optimization problem object."""
        opt = OptimizationProblem()
        opt.lb = self.bounds[:, 0]  # In warped space
        opt.ub = self.bounds[:, 1]  # In warped space
        opt.dim = len(self.bounds)
        opt.cont_var = np.arange(len(self.bounds))
        opt.int_var = []
        assert len(opt.cont_var) + len(opt.int_var) == opt.dim
        opt.objfun = None
        self.opt = opt

    def start(self):
        """Starts a new pySOT run."""
        self.history = []
        self.proposals = []

        # Symmetric Latin hypercube design
        des_pts = max([self.pysot_batch_size, 2 * (self.opt.dim + 1)])
        slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)

        # Warped RBF interpolant
        rbf = RBFInterpolant(dim=self.opt.dim, kernel=CubicKernel(), tail=LinearTail(self.opt.dim), eta=1e-4)
        rbf = SurrogateUnitBox(rbf, lb=self.opt.lb, ub=self.opt.ub)

        # Optimization strategy
        self.strategy = SRBFStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt,
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1,
            use_restarts=True,
        )

    def pysot_suggest(self, n_suggestions=1):
        if self.pysot_batch_size is None:  # First call to suggest
            self.pysot_batch_size = n_suggestions
            self.start()

        # Set the tolerances pretending like we are running batch
        d, p = float(self.opt.dim), float(n_suggestions)
        self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p)))

        # Now we can make suggestions
        x_w = []
        self.proposals = []
        for _ in range(n_suggestions):
            proposal = self.strategy.propose_action()
            record = EvalRecord(proposal.args, status="pending")
            proposal.record = record
            proposal.accept()  # This triggers all the callbacks

            # It is possible that pySOT proposes a previously evaluated point
            # when all variables are integers, so we just abort in this case
            # since we have likely converged anyway. See PySOT issue #30.
            x = list(proposal.record.params)  # From tuple to list
            x_unwarped, = self.space_x.unwarp(x)
            if x_unwarped in self.history:
                warnings.warn("pySOT proposed the same point twice")
                self.start()
                return self.suggest(n_suggestions=n_suggestions)

            # NOTE: Append unwarped to avoid rounding issues
            self.history.append(copy(x_unwarped))
            self.proposals.append(proposal)
            x_w.append(copy(x_unwarped))

        return x_w

    def turbo_suggest(self, n_suggestions=1):
        if self.turbo_batch_size is None:  # Remember the batch size on the first call to suggest
            self.turbo_batch_size = n_suggestions
            self.turbo.batch_size = n_suggestions
            self.turbo.failtol = np.ceil(np.max([4.0 / self.turbo_batch_size, self.dim / self.turbo_batch_size]))
            self.turbo.n_init = max([self.turbo.n_init, self.turbo_batch_size])
            self.restart()

        X_next = np.zeros((n_suggestions, self.dim))

        # Pick from the initial points
        n_init = min(len(self.X_init), n_suggestions)
        if n_init > 0:
            X_next[:n_init] = deepcopy(self.X_init[:n_init, :])
            self.X_init = self.X_init[n_init:, :]  # Remove these pending points

        # Get remaining points from TuRBO
        n_adapt = n_suggestions - n_init
        if n_adapt > 0:
            if len(self.turbo._X) > 0:  # Use random points if we can't fit a GP
                X = to_unit_cube(deepcopy(self.turbo._X), self.lb, self.ub)
                fX = copula_standardize(deepcopy(self.turbo._fX).ravel())  # Use Copula
                X_cand, y_cand, _ = self.turbo._create_candidates(
                    X, fX, length=self.turbo.length, n_training_steps=100, hypers={}
                )
                X_next[-n_adapt:, :] = self.turbo._select_candidates(X_cand, y_cand)[:n_adapt, :]
                X_next[-n_adapt:, :] = from_unit_cube(X_next[-n_adapt:, :], self.lb, self.ub)
        # Unwarp the suggestions
        suggestions = self.space_x.unwarp(X_next)
        suggestions[-1] = self.meta_init[2]
        return suggestions

    def suggest(self, n_suggestions=1):
        if n_suggestions == 1:
            return self.turbo_suggest(n_suggestions)
        else:
            suggestion = n_suggestions // 2
            return self.turbo_suggest(suggestion) + self.pysot_suggest(n_suggestions - suggestion)

    def _observe(self, x, y):
        # Find the matching proposal and execute its callbacks
        idx = [x == xx for xx in self.history]
        if np.any(idx):
            i = np.argwhere(idx)[0].item()  # Pick the first index if there are ties
            proposal = self.proposals[i]
            proposal.record.complete(y)
            self.proposals.pop(i)
            self.history.pop(i)

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        assert len(X) == len(y)

        for x_, y_ in zip(X, y):
            # Just ignore, any inf observations we got, unclear if right thing
            if np.isfinite(y_):
                self._observe(x_, y_)

        XX, yy = self.space_x.warp(X), np.array(y)[:, None]

        if len(self.turbo._fX) >= self.turbo.n_init:
            self.turbo._adjust_length(yy)

        self.turbo.n_evals += self.turbo_batch_size

        self.turbo._X = np.vstack((self.turbo._X, deepcopy(XX)))
        self.turbo._fX = np.vstack((self.turbo._fX, deepcopy(yy)))
        self.turbo.X = np.vstack((self.turbo.X, deepcopy(XX)))
        self.turbo.fX = np.vstack((self.turbo.fX, deepcopy(yy)))

        # Check for a restart
        if self.turbo.length < self.turbo.length_min:
            self.restart()
class SpacePartitioningOptimizer(AbstractOptimizer):
    primary_import = 'scikit-learn'

    def __init__(self, api_config, **kwargs):
        AbstractOptimizer.__init__(self, api_config)

        print('api_config:', api_config)
        self.api_config = api_config

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.lb, self.ub = self.bounds[:, 0], self.bounds[:, 1]
        self.dim = len(self.bounds)

        self.X = np.zeros((0, self.dim))
        self.y = np.zeros((0, 1))

        self.X_init = None
        self.batch_size = None
        self.turbo = None
        self.split_used = 0
        self.node = None
        self.best_values = []

        self.config = self._read_config()
        print('config:', self.config)
        optimizer_seed = self.config.get('optimizer_seed')
        fix_optimizer_seed(optimizer_seed)
        self.sampler_seed = self.config.get('sampler_seed')
        sampler.fix_sampler_seed(self.sampler_seed)

        self.is_init_batch = False
        self.init_batches = []

    def _read_config(self):
        return {
            'turbo_training_steps': 100,
            'turbo_length_retries': 10,
            'turbo_length_init_method': 'default',
            'experimental_design': 'lhs_classic_ratio',
            'n_init_points': 24,
            'max_tree_depth': 5,
            'kmeans_resplits': 10,
            'split_model': {
                'type': 'SVC',
                'args': {
                    'kernel': 'poly',
                    'gamma': 'scale',
                    'C': 745.3227447730735
                }
            },
            'reset_no_improvement': 8,
            'reset_split_after': 4,
            'turbo': {
                'budget': 128,
                'use_cylinder': 0,
                'use_pull': 0,
                'use_lcb': 0,
                'kappa': 2.0,
                'use_decay': 1,
                'decay_alpha': 0.49937937259674076,
                'decay_threshold': 0.5,
                'length_min': 1e-06,
                'length_max': 2.0,
                'length_init': 0.8,
                'length_multiplier': 2.0
            },
            'sampler_seed': 42,
            'optimizer_seed': 578330
        }

    def _init(self, n_suggestions):
        self.batch_size = n_suggestions
        n_init_points = self.config['n_init_points']
        if n_init_points == -1:
            # Special value to use the default 2*D+1 number.
            n_init_points = 2 * self.dim + 1
        self.n_init = max(self.batch_size, n_init_points)
        exp_design = self.config['experimental_design']
        if exp_design == 'latin_hypercube':
            X_init = latin_hypercube(self.n_init, self.dim)
        elif exp_design == 'halton':
            halton_sampler = sampler.Sampler(method='halton',
                                             api_config=self.api_config,
                                             n_points=self.n_init)
            X_init = halton_sampler.generate(random_state=self.sampler_seed)
            X_init = self.space_x.warp(X_init)
            X_init = to_unit_cube(X_init, self.lb, self.ub)
        elif exp_design == 'lhs_classic_ratio':
            lhs_sampler = sampler.Sampler(method='lhs',
                                          api_config=self.api_config,
                                          n_points=self.n_init,
                                          generator_kwargs={
                                              'lhs_type': 'classic',
                                              'criterion': 'ratio'
                                          })
            X_init = lhs_sampler.generate(random_state=self.sampler_seed)
            X_init = self.space_x.warp(X_init)
            X_init = to_unit_cube(X_init, self.lb, self.ub)
        else:
            raise ValueError(f'Unknown experimental design: {exp_design}.')
        self.X_init = X_init
        if DEBUG:
            print(
                f'Initialized the method with {self.n_init} points by {exp_design}:'
            )
            print(X_init)

    def _get_split_model(self, X, kmeans_labels):
        split_model_config = self.config['split_model']
        model_type = split_model_config['type']
        args = split_model_config['args']
        if model_type == 'SVC':
            split_model = SVC(**args, max_iter=10**7)
        elif model_type == 'KNeighborsClassifier':
            split_model = KNeighborsClassifier(**args)
        else:
            raise ValueError(
                f'Unknown split model type in the config: {model_type}.')

        split_model.fit(X, kmeans_labels)
        split_model_predictions = split_model.predict(X)
        split_model_matches = np.sum(split_model_predictions == kmeans_labels)
        split_model_mismatches = np.sum(
            split_model_predictions != kmeans_labels)
        print('Labels for the split model:', kmeans_labels)
        print('Predictions of the split model:', split_model_predictions)
        print(
            f'Split model matches {split_model_matches} and mismatches {split_model_mismatches}'
        )
        return split_model

    def _find_split(self, X, y) -> Optional:
        max_margin = None
        max_margin_labels = None
        for _ in range(self.config['kmeans_resplits']):
            kmeans = KMeans(n_clusters=2).fit(y)
            kmeans_labels = kmeans.labels_
            if np.count_nonzero(kmeans_labels == 1) > 0 and np.count_nonzero(
                    kmeans_labels == 0) > 0:
                if np.mean(y[kmeans_labels == 1]) > np.mean(
                        y[kmeans_labels == 0]):
                    # Reverse labels if the entries with 1s have a higher mean error, since 1s go to the left branch.
                    kmeans_labels = 1 - kmeans_labels
                margin = -(np.mean(y[kmeans_labels == 1]) -
                           np.mean(y[kmeans_labels == 0]))
                if DEBUG:
                    print('MARGIN is', margin,
                          np.count_nonzero(kmeans_labels == 1),
                          np.count_nonzero(kmeans_labels == 0))
                if max_margin is None or margin > max_margin:
                    max_margin = margin
                    max_margin_labels = kmeans_labels
        if DEBUG:
            print('MAX MARGIN is', max_margin)
        if max_margin_labels is None:
            return None
        else:
            return self._get_split_model(X, max_margin_labels)

    def _build_tree(self, X, y, depth=0):
        print('len(X) in _build_tree is', len(X))
        if depth == self.config['max_tree_depth']:
            return []
        split = self._find_split(X, y)
        if split is None:
            return []
        in_region_points = split.predict(X)
        left_subtree_size = np.count_nonzero(in_region_points == 1)
        right_subtree_size = np.count_nonzero(in_region_points == 0)
        print(
            f'{len(X)} points would be split {left_subtree_size}/{right_subtree_size}.'
        )
        if left_subtree_size < self.n_init:
            return []
        idx = (in_region_points == 1)
        splits = self._build_tree(X[idx], y[idx], depth + 1)
        return [split] + splits

    def _get_in_node_region(self, points, splits):
        in_region = np.ones(len(points))
        for split in splits:
            split_in_region = split.predict(points)
            in_region *= split_in_region
        return in_region

    def _suggest(self, n_suggestions):
        X = to_unit_cube(deepcopy(self.X), self.lb, self.ub)
        y = deepcopy(self.y)
        if not self.node:
            self.split_used = 0
            self.node = self._build_tree(X, y)
            used_budget = len(y)
            idx = (self._get_in_node_region(X, self.node) == 1)
            X = X[idx]
            y = y[idx]
            print(f'Rebuilt the tree of depth {len(self.node)}')
            model_config = self.config['turbo']
            #print('CONFIG!!!!!', model_config)
            self.turbo = Turbo1(
                f=None,
                lb=self.bounds[:, 0],
                ub=self.bounds[:, 1],
                n_init=len(X),
                max_evals=np.iinfo(np.int32).max,
                batch_size=self.batch_size,
                verbose=False,
                use_cylinder=model_config['use_cylinder'],
                budget=model_config['budget'],
                use_decay=model_config['use_decay'],
                decay_threshold=model_config['decay_threshold'],
                decay_alpha=model_config['decay_alpha'],
                use_pull=model_config['use_pull'],
                use_lcb=model_config['use_lcb'],
                kappa=model_config['kappa'],
                length_min=model_config['length_min'],
                length_max=model_config['length_max'],
                length_init=model_config['length_init'],
                length_multiplier=model_config['length_multiplier'],
                used_budget=used_budget)
            self.turbo._X = np.array(X, copy=True)
            self.turbo._fX = np.array(y, copy=True)
            self.turbo.X = np.array(X, copy=True)
            self.turbo.fX = np.array(y, copy=True)
            print('Initialized TURBO')
        else:
            idx = (self._get_in_node_region(X, self.node) == 1)
            X = X[idx]
            y = y[idx]
        self.split_used += 1

        length_init_method = self.config['turbo_length_init_method']
        if length_init_method == 'default':
            length = self.turbo.length
        elif length_init_method == 'length_init':
            length = self.turbo.length_init
        elif length_init_method == 'length_max':
            length = self.turbo.length_max
        elif length_init_method == 'infinity':
            length = np.iinfo(np.int32).max
        else:
            raise ValueError(
                f'Unknown init method for turbo\'s length: {length_init_method}.'
            )
        length_reties = self.config['turbo_length_retries']
        for retry in range(length_reties):
            XX = X
            yy = copula_standardize(y.ravel())
            X_cand, y_cand, _ = self.turbo._create_candidates(
                XX,
                yy,
                length=length,
                n_training_steps=self.config['turbo_training_steps'],
                hypers={})
            in_region_predictions = self._get_in_node_region(X_cand, self.node)
            in_region_idx = in_region_predictions == 1
            if DEBUG:
                print(
                    f'In region: {np.sum(in_region_idx)} out of {len(X_cand)}')
            if np.sum(in_region_idx) >= n_suggestions:
                X_cand, y_cand = X_cand[in_region_idx], y_cand[in_region_idx]
                self.turbo.f_var = self.turbo.f_var[in_region_idx]
                if DEBUG:
                    print('Found a suitable set of candidates.')
                break
            else:
                length /= 2
                if DEBUG:
                    print(f'Retrying {retry + 1}/{length_reties} time')

        X_cand = self.turbo._select_candidates(X_cand,
                                               y_cand)[:n_suggestions, :]
        if DEBUG:
            if X.shape[1] == 3:
                tx = np.arange(0.0, 1.0 + 1e-6, 0.1)
                ty = np.arange(0.0, 1.0 + 1e-6, 0.1)
                tz = np.arange(0.0, 1.0 + 1e-6, 0.1)
                p = np.array([[x, y, z] for x in tx for y in ty for z in tz])
            elif X.shape[1] == 2:
                tx = np.arange(0.0, 1.0 + 1e-6, 0.1)
                ty = np.arange(0.0, 1.0 + 1e-6, 0.1)
                p = np.array([[x, y] for x in tx for y in ty])
            else:
                raise ValueError(
                    'The points for the DEBUG should either be 2D or 3D.')
            p_predictions = self._get_in_node_region(p, self.node)
            in_turbo_bounds = np.logical_and(
                np.all(self.turbo.cand_lb <= p, axis=1),
                np.all(p <= self.turbo.cand_ub, axis=1))
            pcds = []
            _add_pcd(pcds, p[p_predictions == 0], (1.0, 0.0, 0.0))
            _add_pcd(
                pcds, p[np.logical_and(p_predictions == 1,
                                       np.logical_not(in_turbo_bounds))],
                (0.0, 1.0, 0.0))
            _add_pcd(pcds, p[np.logical_and(p_predictions == 1,
                                            in_turbo_bounds)], (0.0, 0.5, 0.0))
            _add_pcd(pcds, X_cand, (0.0, 0.0, 0.0))
            open3d.visualization.draw_geometries(pcds)
        return X_cand

    def suggest(self, n_suggestions=1):
        X_suggestions = np.zeros((n_suggestions, self.dim))
        # Initialize the design if it is the first call
        if self.X_init is None:
            self._init(n_suggestions)
            if self.init_batches:
                print('REUSING INITIALIZATION:')
                for X, Y in self.init_batches:
                    print('Re-observing a batch!')
                    self.observe(X, Y)
                self.X_init = []

        # Pick from the experimental design
        n_init = min(len(self.X_init), n_suggestions)
        if n_init > 0:
            X_suggestions[:n_init] = self.X_init[:n_init]
            self.X_init = self.X_init[n_init:]
            self.is_init_batch = True
        else:
            self.is_init_batch = False

        # Pick from the model based on the already received observations
        n_suggest = n_suggestions - n_init
        if n_suggest > 0:
            X_cand = self._suggest(n_suggest)
            X_suggestions[-n_suggest:] = X_cand

        # Map into the continuous space with the api bounds and unwarp the suggestions
        X_min_bound = 0.0
        X_max_bound = 1.0
        X_suggestions_min = X_suggestions.min()
        X_suggestions_max = X_suggestions.max()
        if X_suggestions_min < X_min_bound or X_suggestions_max > X_max_bound:
            print(
                f'Some suggestions are out of the bounds in suggest(): {X_suggestions_min}, {X_suggestions_max}'
            )
            print('Clipping everything...')
            X_suggestions = np.clip(X_suggestions, X_min_bound, X_max_bound)
        X_suggestions = from_unit_cube(X_suggestions, self.lb, self.ub)
        X_suggestions = self.space_x.unwarp(X_suggestions)
        return X_suggestions

    def observe(self, X_observed, Y_observed):
        if self.is_init_batch:
            self.init_batches.append([X_observed, Y_observed])
        X, Y = [], []
        for x, y in zip(X_observed, Y_observed):
            if np.isfinite(y):
                X.append(x)
                Y.append(y)
            else:
                # Ignore for now; could potentially substitute with an upper bound.
                continue
        if not X:
            return
        X, Y = self.space_x.warp(X), np.array(Y)[:, None]
        self.X = np.vstack((self.X, deepcopy(X)))
        self.y = np.vstack((self.y, deepcopy(Y)))
        self.best_values.append(Y.min())

        if self.turbo:
            if len(self.turbo._X) >= self.turbo.n_init:
                self.turbo._adjust_length(Y)
            print('TURBO length:', self.turbo.length)
            self.turbo._X = np.vstack((self.turbo._X, deepcopy(X)))
            self.turbo._fX = np.vstack((self.turbo._fX, deepcopy(Y)))
            self.turbo.X = np.vstack((self.turbo.X, deepcopy(X)))
            self.turbo.fX = np.vstack((self.turbo.fX, deepcopy(Y)))

        N = self.config['reset_no_improvement']
        if len(self.best_values) > N and np.min(
                self.best_values[:-N]) <= np.min(self.best_values[-N:]):
            print('########## RESETTING COMPLETELY! ##########')
            self.X = np.zeros((0, self.dim))
            self.y = np.zeros((0, 1))
            self.best_values = []
            self.X_init = None
            self.node = None
            self.turbo = None
            self.split_used = 0

        if self.split_used >= self.config['reset_split_after']:
            print('########## REBUILDING THE SPLIT! ##########')
            self.node = None
            self.turbo = None
            self.split_used = 0
Пример #9
0
class BoEI(AbstractOptimizer):
    primary_import = None
    
    def __init__(self, api_config):
        """Build wrapper class to use optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        
        api_space = BoEI.api_manipulator(api_config)  # used for GPyOpt initialization

        self.space_x = JointSpace(api_config) # used for warping & unwarping of new suggestions & observations

        self.hasCat, self.cat_vec = BoEI.is_cat(api_config)
        
        self.dim = len(self.space_x.get_bounds())

        self.objective = GPyOpt.core.task.SingleObjective(None)

        self.space = GPyOpt.Design_space(api_space)
        
        self.model = GPyOpt.models.GPModel(optimize_restarts=5,verbose=False)
        
        self.aquisition_optimizer = GPyOpt.optimization.AcquisitionOptimizer(self.space)
        
        
        self.aquisition = AcquisitionEI(self.model, self.space, optimizer=self.aquisition_optimizer, cost_withGradients=None)
        
        self.batch_size = None
        

        
    # return an array that indicates which variable is cat/ordinal
    @staticmethod
    def is_cat(API):
        api_items = API.items()
        cat_vec = []
        counter = 0
        hasCat = False

        for item in api_items:
            if item[1]['type'] == 'cat':
                hasCat = True
                singleCat = [counter, len(item[1]['values'])]
                cat_vec.append(singleCat)
            counter += 1            
        cat_vec = np.array(cat_vec)
        
        return hasCat, cat_vec

        
    @staticmethod
    def api_manipulator(api_config):
        api = deepcopy(api_config)
        api_space = []
        api_items = api.items()
        api_items = sorted(api_items)   # make sure the entries are aligned with warpping
        for item in api_items:
            variable = {}
    
            # get name
            variable['name'] = item[0]
            if item[1]['type'] == 'real':                  #real input
                if 'range' in item[1]:
                    variable['type'] = 'continuous'       #continuous domain
                if 'values' in item[1]:
                    variable['type'] = 'discrete'         #discrete domain         
            elif item[1]['type'] == 'int':                # int input
                variable['type'] = 'discrete'
                if 'range' in item[1]:                    #tranform into discrete domain
                    lb = item[1]['range'][0]
                    ub = item[1]['range'][1]
                    values_array = np.arange(lb, ub+1)
                    del item[1]['range']
                    item[1]['values'] = values_array            
            elif item[1]['type'] == 'cat':
                variable['type'] = 'categorical'        
            elif item[1]['type'] == 'bool':
                variable['type'] = 'categorical'
         
            #transform space
            if (item[1]['type'] == 'real') or (item[1]['type'] == 'int'):
                if item[1]['space'] == 'log':
                    if 'range' in item[1]:
                        lb = item[1]['range'][0]
                        ub = item[1]['range'][1]
                        assert lb > 0
                        assert ub > 0
                        item[1]['range'] = (math.log(lb), math.log(ub))
                    if 'values' in item[1]:
                        item[1]['values'] = np.log(item[1]['values'])
                if item[1]['space'] == 'logit':
                    if 'range' in item[1]:
                        lb = item[1]['range'][0]
                        ub = item[1]['range'][1] 
                        assert lb > 0 and lb < 1
                        assert ub > 0 and lb < 1
                        lb_new = math.log(lb/(1.0-lb))
                        ub_new = math.log(ub/(1.0-ub))
                        item[1]['range'] = (lb_new, ub_new)
                    if 'values' in item[1]:
                        values_arr = item[1]['values']
                        item[1]['values'] = np.log(values_arr/(1.0-values_arr))
                if item[1]['space'] == 'bilog':
                    if 'range' in item[1]:
                        lb = item[1]['range'][0]
                        ub = item[1]['range'][1] 
                        lb_new = math.log(1.0+lb) if lb >= 0.0 else -math.log(1.0-lb)
                        ub_new = math.log(1.0+ub) if ub >= 0.0 else -math.log(1.0-ub)
                        item[1]['range'] = (lb_new, ub_new)
                    if 'values' in item[1]:
                        values_arr = item[1]['values']
                        item[1]['values'] = np.sign(values_arr) * np.log(1.0 + np.abs(values_arr))
                        
   
            #get domain
            if (item[1]['type'] == 'real') or (item[1]['type'] == 'int'):        
                if 'range' in item[1]:
                    variable['domain'] = item[1]['range']
                if 'values' in item[1]:
                    variable['domain'] = tuple(item[1]['values'])
            
            if item[1]['type'] == 'cat':
                ub = len(item[1]['values'])
                values_array = np.arange(0, ub)
                variable['domain'] = tuple(values_array)
        
            if item[1]['type'] == 'bool':
                variable['domain'] = (0, 1)
    
            api_space.append(variable)
        
        
        return api_space
        
        
    def suggest(self, n_suggestions=1):
        """Get suggestions from the optimizer.

        Parameters
        ----------
        n_suggestions : int
            Desired number of parallel suggestions in the output

        Returns
        -------
        next_guess : list of dict
            List of `n_suggestions` suggestions to evaluate the objective
            function. Each suggestion is a dictionary where each key
            corresponds to a parameter being optimized.
        """
       
        if self.batch_size is None:
            next_guess = GPyOpt.experiment_design.initial_design('random', self.space, n_suggestions)
        else:
            next_guess = self.bo._compute_next_evaluations()#in the shape of np.zeros((n_suggestions, self.dim))
    



        # preprocess the array from GpyOpt for unwarpping
        if self.hasCat == True:
            new_suggest = []
            cat_vec_pos = self.cat_vec[:,0]
            cat_vec_len = self.cat_vec[:,1]
            
            # for each suggstion in the batch
            for i in range(len(next_guess)):
                index = 0
                single_suggest = []
                # parsing through suggestions to replace the cat ones to the usable format 
                for j in range(len(next_guess[0])):
                    if j != cat_vec_pos[index]:
                        single_suggest.append(next_guess[0][j])
                    else:
                        # if a cat varible
                        value = next_guess[i][j]
                        vec = [0.]*cat_vec_len[index]
                        vec[value] = 1.
                        single_suggest.extend(vec)
                        index += 1
                        index = min(index, len(cat_vec_pos)-1)
                # asserting the desired length of the suggestion
                assert len(single_suggest) == len(next_guess[0])+sum(cat_vec_len)-len(self.cat_vec)
                new_suggest.append(single_suggest)
            assert len(new_suggest) == len(next_guess)
           
            new_suggest = np.array(new_suggest).reshape(len(suggest), len(suggest[0])+sum(cat_vec_len)-len(self.cat_vec))
            next_guess = new_suggest
             

        suggestions = self.space_x.unwarp(next_guess)
        
        return suggestions
    
    
    def observe(self, X, y):
        """Feed an observation back.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        assert len(X) == len(y)
        
        XX = self.space_x.warp(X)
        yy = np.array(y)[:, None]


        # preprocess XX after warpping for GpyOpt to use
        if self.hasCat == True:
            new_XX = np.zeros((len(XX), self.dim))
            cat_vec_pos = self.cat_vec[:,0]
            cat_vec_len = self.cat_vec[:,1]

            for i in range(len(XX)):
                index = 0  # for cat_vec
                traverse = 0 # for XX
                for j in range(self.dim):
                    if j != cat_vec_pos[index]:
                        new_XX[i][j] = int(XX[i][traverse])
                        traverse += 1
                    else:
                        for v in range(cat_vec_len[index]):
                            if XX[i][traverse + v] == 1.0:
                                new_XX[i][j] = int(v)
                        traverse += cat_vec_len[index]
                        index += 1
                        index = min(index, len(cat_vec_pos)-1)

            XX = new_XX 
            
        if self.batch_size is None:
            self.X_init = XX
            self.batch_size = len(XX)
            self.Y_init = yy
            # evaluator useless but need for GPyOpt instantiation
            self.evaluator = GPyOpt.core.evaluators.RandomBatch(acquisition=self.aquisition, batch_size = self.batch_size)
            self.bo = GPyOpt.methods.ModularBayesianOptimization(self.model, self.space, self.objective, self.aquisition, self.evaluator, self.X_init, Y_init=self.Y_init)
            self.X = self.X_init
            self.Y = self.Y_init
        else:
            # update the stack of all the evaluated X's and y's
            self.bo.X = np.vstack((self.bo.X, deepcopy(XX)))
            self.bo.Y = np.vstack((self.bo.Y, deepcopy(yy)))
            # update GP model
            
      
        
        # update GP model
        self.bo._update_model('stats')
        # bo has attribute bo.num_acquisitions        
        self.bo.num_acquisitions += 1  
Пример #10
0
class steade(AbstractOptimizer):
    primary_import = "pysot"

    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.
        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.search_space = JointSpace(api_config)
        self.bounds = self.search_space.get_bounds()
        self.iter = 0
        # Sets up the optimization problem (needs self.bounds)
        self.create_opt_prob()
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.batch_size = None
        self.history = []
        self.proposals = []
        # Population-based parameters in DE
        self.population = []
        self.fitness = []
        self.F = 0.7
        self.Cr = 0.7
        # For bayes opt
        self.dim = len(self.search_space.param_list)
        self.torch_bounds = torch.from_numpy(self.search_space.get_bounds().T)
        self.min_max_bounds = torch.from_numpy(
            np.stack([np.zeros(self.dim),
                      np.ones(self.dim)]))
        self.archive = []
        self.arc_fitness = []

    def create_opt_prob(self):
        """Create an optimization problem object."""
        opt = OptimizationProblem()
        opt.lb = self.bounds[:, 0]  # In warped space
        opt.ub = self.bounds[:, 1]  # In warped space
        opt.dim = len(self.bounds)
        opt.cont_var = np.arange(len(self.bounds))
        opt.int_var = []
        assert len(opt.cont_var) + len(opt.int_var) == opt.dim
        opt.objfun = None
        self.opt = opt

    def start(self, max_evals):
        """Starts a new pySOT run."""
        self.history = []
        self.proposals = []

        # Symmetric Latin hypercube design
        des_pts = max([self.batch_size, 2 * (self.opt.dim + 1)])
        slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)

        # Warped RBF interpolant
        rbf = RBFInterpolant(dim=self.opt.dim,
                             lb=self.opt.lb,
                             ub=self.opt.ub,
                             kernel=CubicKernel(),
                             tail=LinearTail(self.opt.dim),
                             eta=1e-4)

        # Optimization strategy
        self.strategy = DYCORSStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt,
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1,
            use_restarts=True,
        )

    def _suggest(self, n_suggestions=1):
        """Get a suggestion from the optimizer.
        Parameters
        ----------
        n_suggestions : int
            Desired number of parallel suggestions in the output
        Returns
        -------
        next_guess : list of dict
            List of `n_suggestions` suggestions to evaluate the objective
            function. Each suggestion is a dictionary where each key
            corresponds to a parameter being optimized.
        """

        if self.batch_size is None:  # First call to suggest
            self.batch_size = n_suggestions
            self.start(self.max_evals)

        # Set the tolerances pretending like we are running batch
        d, p = float(self.opt.dim), float(n_suggestions)
        self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p)))

        # Now we can make suggestions
        x_w = []
        self.proposals = []
        for _ in range(n_suggestions):
            proposal = self.strategy.propose_action()
            record = EvalRecord(proposal.args, status="pending")
            proposal.record = record
            proposal.accept()  # This triggers all the callbacks

            # It is possible that pySOT proposes a previously evaluated point
            # when all variables are integers, so we just abort in this case
            # since we have likely converged anyway. See PySOT issue #30.
            x = list(proposal.record.params)  # From tuple to list
            x_unwarped, = self.search_space.unwarp(x)
            if x_unwarped in self.history:
                warnings.warn("pySOT proposed the same point twice")
                self.start(self.max_evals)
                return self.suggest(n_suggestions=n_suggestions)

            # NOTE: Append unwarped to avoid rounding issues
            self.history.append(copy(x_unwarped))
            self.proposals.append(proposal)
            x_w.append(copy(x_unwarped))

        return x_w

    @staticmethod
    def make_model(train_x, train_y, state_dict=None):
        """
        Define the models based on the observed data
        :param train_x: The design points/ trial solutions
        :param train_y: The objective functional value of the trial solutions used for model fitting
        :param state_dict: Dictionary storing the parameters of the GP model
        :return:
        """
        try:
            model = SingleTaskGP(train_x, train_y)
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            # load state dict if it is passed
            if state_dict is not None:
                model.load_state_dict(state_dict)
        except Exception as e:
            print('Exception: {} in make_model()'.format(e))

        return model, mll

    def get_bayes_pop(self, n_suggestions):
        """
        Parameters
        ----------
        n_suggestions: Number of new suggestions/trial solutions to generate using BO
        Returns
        The new set of trial solutions obtained by optimizing the acquisition function
        -------
        """

        try:
            candidates, _ = optimize_acqf(
                acq_function=self.acquisition,
                bounds=self.min_max_bounds,
                q=n_suggestions,
                num_restarts=10,
                raw_samples=512,  # used for initialization heuristic
                sequential=True)

            bayes_pop = unnormalize(candidates, self.torch_bounds).numpy()
        except Exception as e:
            print('Error in get_bayes_pop(): {}'.format(e))

        population = self.search_space.unwarp(
            bayes_pop)  # Translate the solution back to the original space

        return population

    def mutate(self, n_suggestions):
        """

        Parameters
        ----------
        n_suggestions
        Returns
        -------
        """

        parents = self.search_space.warp(self.population)
        surrogates = self.search_space.warp(self._suggest(n_suggestions))

        # Pop out 'n_suggestions' number of solutions from the archives since they will be modified
        for _ in range(n_suggestions):
            self.history.pop()
            # self.proposals.pop()

        # Applying DE mutation, for more details refer to https://ieeexplore.ieee.org/abstract/document/5601760
        a, b = 0, 0
        while a == b:
            a = random.randrange(1, n_suggestions - 1)
            b = random.randrange(1, n_suggestions - 1)

        rand1 = random.sample(range(0, n_suggestions), n_suggestions)
        rand2 = [(r + a) % n_suggestions for r in rand1]
        rand3 = [(r + b) % n_suggestions for r in rand1]

        try:
            bayes_pop = self.search_space.warp(
                self.get_bayes_pop(n_suggestions))

            # Bayesian mutation inspired from DE/rand/2 mutation
            mutants = bayes_pop[rand1, :] + \
                      self.F * (surrogates[rand2, :] - surrogates[rand3, :]) + \
                      1 / self.iter * np.random.random(parents.shape) * (parents[rand2, :] - parents[rand3, :])

        except Exception as e:
            # DE/rand/2 mutation applied when decomposition error encountered in BO
            mutants = parents[rand1, :] + \
                      self.F * (surrogates[rand2, :] - surrogates[rand3, :]) + \
                      1 / self.iter * np.random.random(parents.shape) * (parents[rand2, :] - parents[rand3, :])

        # Check the bound constraints and do (binomial) crossover to generate offsprings/ donor vectors
        offsprings = deepcopy(parents)
        bounds = self.search_space.get_bounds()
        dims = len(bounds)

        for i in range(n_suggestions):
            j_rand = random.randrange(dims)

            for j in range(dims):
                # Check if the bound-constraints are satisfied or not
                if mutants[i, j] < bounds[j, 0]:
                    mutants[i, j] = bounds[j, 0]  # surrogates[i, j]
                if bounds[j, 1] < mutants[i, j]:
                    mutants[i, j] = bounds[j, 1]  # surrogates[i, j]

                if random.random() <= self.Cr or j == j_rand:
                    offsprings[i, j] = mutants[i, j]

        # Translate the offspring back into the original space
        population = self.search_space.unwarp(offsprings)

        # Now insert the solutions back to the archive.
        for i in range(n_suggestions):
            self.history.append(population[i])

        return population

    def suggest(self, n_suggestions=1):
        """Get a suggestion from the optimizer.
        Parameters
        ----------
        n_suggestions : int
            Desired number of parallel suggestions in the output
        Returns
        -------
        next_guess : list of dict
            List of `n_suggestions` suggestions to evaluate the objective
            function. Each suggestion is a dictionary where each key
            corresponds to a parameter being optimized.
        """

        self.iter += 1
        lamda = 10  # defines the transition point in the algorithm

        if self.iter < lamda:
            population = self._suggest(n_suggestions)
        else:
            population = self.mutate(n_suggestions)

        return population

    def _observe(self, x, y):
        # Find the matching proposal and execute its callbacks
        idx = [x == xx for xx in self.history]
        i = np.argwhere(
            idx)[0].item()  # Pick the first index if there are ties
        proposal = self.proposals[i]
        proposal.record.complete(y)
        self.proposals.pop(i)
        self.history.pop(i)

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.
        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        try:
            assert len(X) == len(y)
            c = 0

            for x_, y_ in zip(X, y):
                # Archive stores all the solutions
                self.archive.append(x_)
                self.arc_fitness.append(
                    -y_)  # As BoTorch solves a maximization problem

                if self.iter == 1:
                    self.population.append(x_)
                    self.fitness.append(y_)
                else:
                    if y_ <= self.fitness[c]:
                        self.population[c] = x_
                        self.fitness[c] = y_

                    c += 1

                # Just ignore, any inf observations we got, unclear if right thing
                if np.isfinite(y_):
                    self._observe(x_, y_)

            # Transform the data (seen till now) into tensors and train the model
            train_x = normalize(torch.from_numpy(
                self.search_space.warp(self.archive)),
                                bounds=self.torch_bounds)
            train_y = standardize(
                torch.from_numpy(
                    np.array(self.arc_fitness).reshape(len(self.arc_fitness),
                                                       1)))
            # Fit the GP based on the actual observed values
            if self.iter == 1:
                self.model, mll = self.make_model(train_x, train_y)
            else:
                self.model, mll = self.make_model(train_x, train_y,
                                                  self.model.state_dict())

            # mll.train()
            fit_gpytorch_model(mll)

            # define the sampler
            sampler = SobolQMCNormalSampler(num_samples=512)

            # define the acquisition function
            self.acquisition = qExpectedImprovement(model=self.model,
                                                    best_f=train_y.max(),
                                                    sampler=sampler)

        except Exception as e:
            print('Error: {} in observe()'.format(e))
Пример #11
0
class tuSOTOptimizer(AbstractOptimizer):
    primary_import = "pysot"

    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.create_opt_prob(
        )  # Sets up the optimization problem (needs self.bounds)
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.turbo_batch_size = None
        self.pysot_batch_size = None
        self.history = []
        self.proposals = []
        self.lb, self.ub = self.bounds[:, 0], self.bounds[:, 1]
        self.dim = len(self.bounds)

        self.turbo = Turbo1(
            f=None,
            lb=self.bounds[:, 0],
            ub=self.bounds[:, 1],
            n_init=2 * self.dim + 1,
            max_evals=self.max_evals,
            batch_size=4,  # We need to update this later
            verbose=False,
        )

        # hyperopt
        self.random = np_random

        space, self.round_to_values = tuSOTOptimizer.get_hyperopt_dimensions(
            api_config)
        self.domain = Domain(dummy_f, space, pass_expr_memo_ctrl=None)
        self.trials = Trials()

        # Some book keeping like opentuner wrapper
        self.trial_id_lookup = {}

        # Store just for data validation
        self.param_set_chk = frozenset(api_config.keys())

    def restart(self):
        self.turbo._restart()
        self.turbo._X = np.zeros((0, self.turbo.dim))
        self.turbo._fX = np.zeros((0, 1))
        X_init = latin_hypercube(self.turbo.n_init, self.dim)
        self.X_init = from_unit_cube(X_init, self.lb, self.ub)

    def create_opt_prob(self):
        """Create an optimization problem object."""
        opt = OptimizationProblem()
        opt.lb = self.bounds[:, 0]  # In warped space
        opt.ub = self.bounds[:, 1]  # In warped space
        opt.dim = len(self.bounds)
        opt.cont_var = np.arange(len(self.bounds))
        opt.int_var = []
        assert len(opt.cont_var) + len(opt.int_var) == opt.dim
        opt.objfun = None
        self.opt = opt

    def start(self):
        """Starts a new pySOT run."""
        self.history = []
        self.proposals = []

        # Symmetric Latin hypercube design
        des_pts = max([self.pysot_batch_size, 2 * (self.opt.dim + 1)])
        slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)

        # Warped RBF interpolant
        rbf = RBFInterpolant(dim=self.opt.dim,
                             kernel=CubicKernel(),
                             tail=LinearTail(self.opt.dim),
                             eta=1e-4)
        rbf = SurrogateUnitBox(rbf, lb=self.opt.lb, ub=self.opt.ub)

        # Optimization strategy
        self.strategy = SRBFStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt,
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1,
            use_restarts=True,
        )

    @staticmethod
    def hashable_dict(d):
        hashable_object = frozenset(d.items())
        return hashable_object

    @staticmethod
    def get_hyperopt_dimensions(api_config):
        param_list = sorted(api_config.keys())

        space = {}
        round_to_values = {}
        for param_name in param_list:
            param_config = api_config[param_name]

            param_type = param_config["type"]

            param_space = param_config.get("space", None)
            param_range = param_config.get("range", None)
            param_values = param_config.get("values", None)

            # Some setup for case that whitelist of values is provided:
            values_only_type = param_type in ("cat", "ordinal")
            if (param_values is not None) and (not values_only_type):
                assert param_range is None
                param_values = np.unique(param_values)
                param_range = (param_values[0], param_values[-1])
                round_to_values[param_name] = interp1d(
                    param_values,
                    param_values,
                    kind="nearest",
                    fill_value="extrapolate")

            if param_type == "int":
                low, high = param_range
                if param_space in ("log", "logit"):
                    space[param_name] = hp.qloguniform(param_name, np.log(low),
                                                       np.log(high), 1)
                else:
                    space[param_name] = hp.quniform(param_name, low, high, 1)
            elif param_type == "bool":
                assert param_range is None
                assert param_values is None
                space[param_name] = hp.choice(param_name, (False, True))
            elif param_type in ("cat", "ordinal"):
                assert param_range is None
                space[param_name] = hp.choice(param_name, param_values)
            elif param_type == "real":
                low, high = param_range
                if param_space in ("log", "logit"):
                    space[param_name] = hp.loguniform(param_name, np.log(low),
                                                      np.log(high))
                else:
                    space[param_name] = hp.uniform(param_name, low, high)
            else:
                assert False, "type %s not handled in API" % param_type

        return space, round_to_values

    def get_trial(self, trial_id):
        for trial in self.trials._dynamic_trials:
            if trial["tid"] == trial_id:
                assert isinstance(trial, dict)
                # Make sure right kind of dict
                assert "state" in trial and "result" in trial
                assert trial["state"] == JOB_STATE_NEW
                return trial
        assert False, "No matching trial ID"

    def cleanup_guess(self, x_guess):
        assert isinstance(x_guess, dict)
        # Also, check the keys are only the vars we are searching over:
        assert frozenset(x_guess.keys()) == self.param_set_chk

        # Do the rounding
        # Make a copy to be safe, and also unpack singletons
        # We may also need to consider clip_chk at some point like opentuner
        x_guess = {k: only(x_guess[k]) for k in x_guess}
        for param_name, round_f in self.round_to_values.items():
            x_guess[param_name] = round_f(x_guess[param_name])
        # Also ensure this is correct dtype so sklearn is happy
        x_guess = {
            k: DTYPE_MAP[self.api_config[k]["type"]](x_guess[k])
            for k in x_guess
        }
        return x_guess

    def pysot_suggest(self, n_suggestions=1):
        if self.pysot_batch_size is None:  # First call to suggest
            self.pysot_batch_size = n_suggestions
            self.start()

        # Set the tolerances pretending like we are running batch
        d, p = float(self.opt.dim), float(n_suggestions)
        self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p)))

        # Now we can make suggestions
        x_w = []
        self.proposals = []
        for _ in range(n_suggestions):
            proposal = self.strategy.propose_action()
            record = EvalRecord(proposal.args, status="pending")
            proposal.record = record
            proposal.accept()  # This triggers all the callbacks

            # It is possible that pySOT proposes a previously evaluated point
            # when all variables are integers, so we just abort in this case
            # since we have likely converged anyway. See PySOT issue #30.
            x = list(proposal.record.params)  # From tuple to list
            x_unwarped, = self.space_x.unwarp(x)
            if x_unwarped in self.history:
                warnings.warn("pySOT proposed the same point twice")
                self.start()
                return self.suggest(n_suggestions=n_suggestions)

            # NOTE: Append unwarped to avoid rounding issues
            self.history.append(copy(x_unwarped))
            self.proposals.append(proposal)
            x_w.append(copy(x_unwarped))

        return x_w

    def pysot_get_suggest(self, suggests):
        turbo_suggest_warps = self.space_x.warp(suggests)
        for i, warps in enumerate(turbo_suggest_warps):
            proposal = self.strategy.make_proposal(warps)
            proposal.add_callback(self.strategy.on_initial_proposal)
            record = EvalRecord(proposal.args, status="pending")
            proposal.record = record
            proposal.accept()

            self.history.append(copy(suggests[i]))
            self.proposals.append(proposal)

    def turbo_suggest(self, n_suggestions=1):
        if self.turbo_batch_size is None:  # Remember the batch size on the first call to suggest
            self.turbo_batch_size = n_suggestions
            self.turbo.batch_size = n_suggestions
            self.turbo.failtol = np.ceil(
                np.max([
                    4.0 / self.turbo_batch_size,
                    self.dim / self.turbo_batch_size
                ]))
            self.turbo.n_init = max([self.turbo.n_init, self.turbo_batch_size])
            self.restart()

        X_next = np.zeros((n_suggestions, self.dim))

        # Pick from the initial points
        n_init = min(len(self.X_init), n_suggestions)
        if n_init > 0:
            X_next[:n_init] = deepcopy(self.X_init[:n_init, :])
            self.X_init = self.X_init[
                n_init:, :]  # Remove these pending points

        # Get remaining points from TuRBO
        n_adapt = n_suggestions - n_init
        if n_adapt > 0:
            if len(self.turbo._X
                   ) > 0:  # Use random points if we can't fit a GP
                X = to_unit_cube(deepcopy(self.turbo._X), self.lb, self.ub)
                fX = copula_standardize(deepcopy(
                    self.turbo._fX).ravel())  # Use Copula
                X_cand, y_cand, _ = self.turbo._create_candidates(
                    X,
                    fX,
                    length=self.turbo.length,
                    n_training_steps=100,
                    hypers={})
                X_next[-n_adapt:, :] = self.turbo._select_candidates(
                    X_cand, y_cand)[:n_adapt, :]
                X_next[-n_adapt:, :] = from_unit_cube(X_next[-n_adapt:, :],
                                                      self.lb, self.ub)

        # Unwarp the suggestions
        suggestions = self.space_x.unwarp(X_next)
        return suggestions

    def _hyperopt_suggest(self):
        new_ids = self.trials.new_trial_ids(1)
        assert len(new_ids) == 1
        self.trials.refresh()

        seed = random_seed(self.random)
        new_trials = tpe.suggest(new_ids, self.domain, self.trials, seed)
        assert len(new_trials) == 1

        self.trials.insert_trial_docs(new_trials)
        self.trials.refresh()

        new_trial, = new_trials  # extract singleton
        return new_trial

    def _hyperopt_transform(self, x):
        new_id = self.trials.new_trial_ids(1)[0]

        domain = self.domain
        rng = np.random.RandomState(1)
        idxs, vals = pyll.rec_eval(domain.s_idxs_vals,
                                   memo={
                                       domain.s_new_ids: [new_id],
                                       domain.s_rng: rng,
                                   })
        rval_miscs = [dict(tid=new_id, cmd=domain.cmd, workdir=domain.workdir)]
        rval_results = domain.new_result()
        for (k, _) in vals.items():
            vals[k][0] = x[k]
        miscs_update_idxs_vals(rval_miscs, idxs, vals)
        rval_docs = self.trials.new_trial_docs([new_id], [None], rval_results,
                                               rval_miscs)

        return rval_docs[0]

    def hyperopt_suggest(self, n_suggestions=1):
        assert n_suggestions >= 1, "invalid value for n_suggestions"

        # Get the new trials, it seems hyperopt either uses random search or
        # guesses one at a time anyway, so we might as welll call serially.
        new_trials = [self._hyperopt_suggest() for _ in range(n_suggestions)]

        X = []
        for trial in new_trials:
            x_guess = self.cleanup_guess(trial["misc"]["vals"])
            X.append(x_guess)

            # Build lookup to get original trial object
            x_guess_ = tuSOTOptimizer.hashable_dict(x_guess)
            assert x_guess_ not in self.trial_id_lookup, "the suggestions should not already be in the trial dict"
            self.trial_id_lookup[x_guess_] = trial["tid"]

        assert len(X) == n_suggestions
        return X

    def hyperopt_get_suggest(self, suggests):
        trials = [self._hyperopt_transform(x) for x in suggests]
        for trial in trials:
            x_guess = self.cleanup_guess(trial["misc"]["vals"])
            x_guess_ = tuSOTOptimizer.hashable_dict(x_guess)
            assert x_guess_ not in self.trial_id_lookup, "the suggestions should not already be in the trial dict"
            self.trial_id_lookup[x_guess_] = trial["tid"]
        self.trials.insert_trial_docs(trials)
        self.trials.refresh()

    def suggest(self, n_suggestions=1):
        if n_suggestions == 1:
            return self.turbo_suggest(n_suggestions)
        else:
            t_suggestion = n_suggestions // 2
            # p_suggestion = int((n_suggestions - t_suggestion) * 3/4)
            h_suggestion = n_suggestions - t_suggestion
            turbo_suggest = self.turbo_suggest(t_suggestion)
            # pysot_suggest = self.pysot_suggest(p_suggestion)
            hyperopt_suggest = self.hyperopt_suggest(h_suggestion)
            self.hyperopt_get_suggest(turbo_suggest)
            # self.pysot_get_suggest(turbo_suggest + hyperopt_suggest)
            return turbo_suggest + hyperopt_suggest

    def _observe(self, x, y):
        # Find the matching proposal and execute its callbacks
        idx = [x == xx for xx in self.history]
        if np.any(idx):
            i = np.argwhere(
                idx)[0].item()  # Pick the first index if there are ties
            proposal = self.proposals[i]
            proposal.record.complete(y)
            self.proposals.pop(i)
            self.history.pop(i)

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        assert len(X) == len(y)

        # # pysot observe
        # for x_, y_ in zip(X, y):
        #     # Just ignore, any inf observations we got, unclear if right thing
        #     if np.isfinite(y_):
        #         self._observe(x_, y_)

        # turbo observe
        XX, yy = self.space_x.warp(X), np.array(y)[:, None]

        if len(self.turbo._fX) >= self.turbo.n_init:
            self.turbo._adjust_length(yy)

        self.turbo.n_evals += self.turbo_batch_size

        self.turbo._X = np.vstack((self.turbo._X, deepcopy(XX)))
        self.turbo._fX = np.vstack((self.turbo._fX, deepcopy(yy)))
        self.turbo.X = np.vstack((self.turbo.X, deepcopy(XX)))
        self.turbo.fX = np.vstack((self.turbo.fX, deepcopy(yy)))

        # Check for a restart
        if self.turbo.length < self.turbo.length_min:
            self.restart()

        # hyperopt observe
        for x_guess, y_ in zip(X, y):
            x_guess_ = tuSOTOptimizer.hashable_dict(x_guess)
            assert x_guess_ in self.trial_id_lookup, "Appears to be guess that did not originate from suggest"

            assert x_guess_ in self.trial_id_lookup, "trial object not available in trial dict"
            trial_id = self.trial_id_lookup.pop(x_guess_)
            trial = self.get_trial(trial_id)
            assert self.cleanup_guess(
                trial["misc"]["vals"]
            ) == x_guess, "trial ID not consistent with x values stored"

            # Cast to float to ensure native type
            result = {"loss": float(y_), "status": STATUS_OK}
            trial["state"] = JOB_STATE_DONE
            trial["result"] = result
        self.trials.refresh()