Ejemplo n.º 1
0
    def start(self, max_evals):
        """Starts a new pySOT run."""
        self.history = []
        self.proposals = []

        # Symmetric Latin hypercube design
        des_pts = max([self.batch_size, 2 * (self.opt.dim + 1)])
        slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)

        # Warped RBF interpolant
        rbf = RBFInterpolant(dim=self.opt.dim,
                             lb=self.opt.lb,
                             ub=self.opt.ub,
                             kernel=CubicKernel(),
                             tail=LinearTail(self.opt.dim),
                             eta=1e-4)

        # Optimization strategy
        self.strategy = DYCORSStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt,
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1,
            use_restarts=True,
        )
Ejemplo n.º 2
0
def test_dycors_async():
    max_evals = 200
    rbf = RBFInterpolant(dim=ackley.dim,
                         lb=ackley.lb,
                         ub=ackley.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = DYCORSStrategy(max_evals=max_evals,
                                         opt_prob=ackley,
                                         exp_design=slhd,
                                         surrogate=rbf,
                                         asynchronous=True,
                                         batch_size=None)

    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)
    controller.run()

    check_strategy(controller)
Ejemplo n.º 3
0
def test_dycors_serial():
    max_evals = 200
    rbf = RBFInterpolant(
        dim=ackley.dim, kernel=CubicKernel(),
        tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(
        dim=ackley.dim, num_pts=2*(ackley.dim+1))

    # Create a strategy and a controller
    controller = SerialController(ackley.eval)
    controller.strategy = DYCORSStrategy(
        max_evals=max_evals, opt_prob=ackley, exp_design=slhd,
        surrogate=rbf, asynchronous=True)
    controller.run()

    check_strategy(controller)
Ejemplo n.º 4
0
def init_serial():
    rbf = RBFInterpolant(dim=ackley.dim,
                         kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = SerialController(ackley.eval)
    controller.strategy = DYCORSStrategy(max_evals=max_evals,
                                         opt_prob=ackley,
                                         exp_design=slhd,
                                         surrogate=rbf,
                                         asynchronous=True)

    # Wrap controller in checkpoint object
    controller = CheckpointController(controller, fname=fname)
    controller.run()
Ejemplo n.º 5
0
    def pysot_cube(objective,
                   n_trials,
                   n_dim,
                   with_count=False,
                   method=None,
                   design=None):
        """ Minimize
        :param objective:
        :param n_trials:
        :param n_dim:
        :param with_count:
        :return:
        """
        logging.getLogger('pySOT').setLevel(logging.ERROR)

        num_threads = 1
        asynchronous = True

        max_evals = n_trials
        gp = GenericProblem(dim=n_dim, objective=objective)

        if design == 'latin':
            exp_design = LatinHypercube(dim=n_dim, num_pts=2 * (n_dim + 1))
        elif design == 'symmetric':
            exp_design = SymmetricLatinHypercube(dim=n_dim,
                                                 num_pts=2 * (n_dim + 1))
        elif design == 'factorial':
            exp_design = TwoFactorial(dim=n_dim)
        else:
            raise ValueError('design should be latin, symmetric or factorial')

        # Create a strategy and a controller
        #  SRBFStrategy, EIStrategy, DYCORSStrategy,RandomStrategy, LCBStrategy
        controller = ThreadController()
        if method.lower() == 'srbf':
            surrogate = RBFInterpolant(dim=n_dim,
                                       lb=np.array([0.0] * n_dim),
                                       ub=np.array([1.0] * n_dim),
                                       kernel=CubicKernel(),
                                       tail=LinearTail(n_dim))
            controller.strategy = SRBFStrategy(max_evals=max_evals,
                                               opt_prob=gp,
                                               exp_design=exp_design,
                                               surrogate=surrogate,
                                               asynchronous=asynchronous)
        elif method.lower() == 'ei':
            surrogate = GPRegressor(dim=n_dim,
                                    lb=np.array([0.0] * n_dim),
                                    ub=np.array([1.0] * n_dim))
            controller.strategy = EIStrategy(max_evals=max_evals,
                                             opt_prob=gp,
                                             exp_design=exp_design,
                                             surrogate=surrogate,
                                             asynchronous=asynchronous)
        elif method.lower() == 'dycors':
            surrogate = RBFInterpolant(dim=n_dim,
                                       lb=np.array([0.0] * n_dim),
                                       ub=np.array([1.0] * n_dim),
                                       kernel=CubicKernel(),
                                       tail=LinearTail(n_dim))
            controller.strategy = DYCORSStrategy(max_evals=max_evals,
                                                 opt_prob=gp,
                                                 exp_design=exp_design,
                                                 surrogate=surrogate,
                                                 asynchronous=asynchronous)
        elif method.lower() == 'lcb':
            surrogate = GPRegressor(dim=n_dim,
                                    lb=np.array([0.0] * n_dim),
                                    ub=np.array([1.0] * n_dim))
            controller.strategy = LCBStrategy(max_evals=max_evals,
                                              opt_prob=gp,
                                              exp_design=exp_design,
                                              surrogate=surrogate,
                                              asynchronous=asynchronous)
        elif method.lower() == 'random':
            controller.strategy = RandomStrategy(max_evals=max_evals,
                                                 opt_prob=gp)
        else:
            raise ValueError("Didn't recognize method passed to pysot")

        # Launch the threads and give them access to the objective function
        for _ in range(num_threads):
            worker = BasicWorkerThread(controller, gp.eval)
            controller.launch_worker(worker)

        # Run the optimization strategy
        result = controller.run()
        best_x = result.params[0].tolist()
        return (result.value, best_x,
                gp.feval_count) if with_count else (result.value, best_x)
Ejemplo n.º 6
0
class steade(AbstractOptimizer):
    primary_import = "pysot"

    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.
        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.search_space = JointSpace(api_config)
        self.bounds = self.search_space.get_bounds()
        self.iter = 0
        # Sets up the optimization problem (needs self.bounds)
        self.create_opt_prob()
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.batch_size = None
        self.history = []
        self.proposals = []
        # Population-based parameters in DE
        self.population = []
        self.fitness = []
        self.F = 0.7
        self.Cr = 0.7
        # For bayes opt
        self.dim = len(self.search_space.param_list)
        self.torch_bounds = torch.from_numpy(self.search_space.get_bounds().T)
        self.min_max_bounds = torch.from_numpy(
            np.stack([np.zeros(self.dim),
                      np.ones(self.dim)]))
        self.archive = []
        self.arc_fitness = []

    def create_opt_prob(self):
        """Create an optimization problem object."""
        opt = OptimizationProblem()
        opt.lb = self.bounds[:, 0]  # In warped space
        opt.ub = self.bounds[:, 1]  # In warped space
        opt.dim = len(self.bounds)
        opt.cont_var = np.arange(len(self.bounds))
        opt.int_var = []
        assert len(opt.cont_var) + len(opt.int_var) == opt.dim
        opt.objfun = None
        self.opt = opt

    def start(self, max_evals):
        """Starts a new pySOT run."""
        self.history = []
        self.proposals = []

        # Symmetric Latin hypercube design
        des_pts = max([self.batch_size, 2 * (self.opt.dim + 1)])
        slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)

        # Warped RBF interpolant
        rbf = RBFInterpolant(dim=self.opt.dim,
                             lb=self.opt.lb,
                             ub=self.opt.ub,
                             kernel=CubicKernel(),
                             tail=LinearTail(self.opt.dim),
                             eta=1e-4)

        # Optimization strategy
        self.strategy = DYCORSStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt,
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1,
            use_restarts=True,
        )

    def _suggest(self, n_suggestions=1):
        """Get a suggestion from the optimizer.
        Parameters
        ----------
        n_suggestions : int
            Desired number of parallel suggestions in the output
        Returns
        -------
        next_guess : list of dict
            List of `n_suggestions` suggestions to evaluate the objective
            function. Each suggestion is a dictionary where each key
            corresponds to a parameter being optimized.
        """

        if self.batch_size is None:  # First call to suggest
            self.batch_size = n_suggestions
            self.start(self.max_evals)

        # Set the tolerances pretending like we are running batch
        d, p = float(self.opt.dim), float(n_suggestions)
        self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p)))

        # Now we can make suggestions
        x_w = []
        self.proposals = []
        for _ in range(n_suggestions):
            proposal = self.strategy.propose_action()
            record = EvalRecord(proposal.args, status="pending")
            proposal.record = record
            proposal.accept()  # This triggers all the callbacks

            # It is possible that pySOT proposes a previously evaluated point
            # when all variables are integers, so we just abort in this case
            # since we have likely converged anyway. See PySOT issue #30.
            x = list(proposal.record.params)  # From tuple to list
            x_unwarped, = self.search_space.unwarp(x)
            if x_unwarped in self.history:
                warnings.warn("pySOT proposed the same point twice")
                self.start(self.max_evals)
                return self.suggest(n_suggestions=n_suggestions)

            # NOTE: Append unwarped to avoid rounding issues
            self.history.append(copy(x_unwarped))
            self.proposals.append(proposal)
            x_w.append(copy(x_unwarped))

        return x_w

    @staticmethod
    def make_model(train_x, train_y, state_dict=None):
        """
        Define the models based on the observed data
        :param train_x: The design points/ trial solutions
        :param train_y: The objective functional value of the trial solutions used for model fitting
        :param state_dict: Dictionary storing the parameters of the GP model
        :return:
        """
        try:
            model = SingleTaskGP(train_x, train_y)
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            # load state dict if it is passed
            if state_dict is not None:
                model.load_state_dict(state_dict)
        except Exception as e:
            print('Exception: {} in make_model()'.format(e))

        return model, mll

    def get_bayes_pop(self, n_suggestions):
        """
        Parameters
        ----------
        n_suggestions: Number of new suggestions/trial solutions to generate using BO
        Returns
        The new set of trial solutions obtained by optimizing the acquisition function
        -------
        """

        try:
            candidates, _ = optimize_acqf(
                acq_function=self.acquisition,
                bounds=self.min_max_bounds,
                q=n_suggestions,
                num_restarts=10,
                raw_samples=512,  # used for initialization heuristic
                sequential=True)

            bayes_pop = unnormalize(candidates, self.torch_bounds).numpy()
        except Exception as e:
            print('Error in get_bayes_pop(): {}'.format(e))

        population = self.search_space.unwarp(
            bayes_pop)  # Translate the solution back to the original space

        return population

    def mutate(self, n_suggestions):
        """

        Parameters
        ----------
        n_suggestions
        Returns
        -------
        """

        parents = self.search_space.warp(self.population)
        surrogates = self.search_space.warp(self._suggest(n_suggestions))

        # Pop out 'n_suggestions' number of solutions from the archives since they will be modified
        for _ in range(n_suggestions):
            self.history.pop()
            # self.proposals.pop()

        # Applying DE mutation, for more details refer to https://ieeexplore.ieee.org/abstract/document/5601760
        a, b = 0, 0
        while a == b:
            a = random.randrange(1, n_suggestions - 1)
            b = random.randrange(1, n_suggestions - 1)

        rand1 = random.sample(range(0, n_suggestions), n_suggestions)
        rand2 = [(r + a) % n_suggestions for r in rand1]
        rand3 = [(r + b) % n_suggestions for r in rand1]

        try:
            bayes_pop = self.search_space.warp(
                self.get_bayes_pop(n_suggestions))

            # Bayesian mutation inspired from DE/rand/2 mutation
            mutants = bayes_pop[rand1, :] + \
                      self.F * (surrogates[rand2, :] - surrogates[rand3, :]) + \
                      1 / self.iter * np.random.random(parents.shape) * (parents[rand2, :] - parents[rand3, :])

        except Exception as e:
            # DE/rand/2 mutation applied when decomposition error encountered in BO
            mutants = parents[rand1, :] + \
                      self.F * (surrogates[rand2, :] - surrogates[rand3, :]) + \
                      1 / self.iter * np.random.random(parents.shape) * (parents[rand2, :] - parents[rand3, :])

        # Check the bound constraints and do (binomial) crossover to generate offsprings/ donor vectors
        offsprings = deepcopy(parents)
        bounds = self.search_space.get_bounds()
        dims = len(bounds)

        for i in range(n_suggestions):
            j_rand = random.randrange(dims)

            for j in range(dims):
                # Check if the bound-constraints are satisfied or not
                if mutants[i, j] < bounds[j, 0]:
                    mutants[i, j] = bounds[j, 0]  # surrogates[i, j]
                if bounds[j, 1] < mutants[i, j]:
                    mutants[i, j] = bounds[j, 1]  # surrogates[i, j]

                if random.random() <= self.Cr or j == j_rand:
                    offsprings[i, j] = mutants[i, j]

        # Translate the offspring back into the original space
        population = self.search_space.unwarp(offsprings)

        # Now insert the solutions back to the archive.
        for i in range(n_suggestions):
            self.history.append(population[i])

        return population

    def suggest(self, n_suggestions=1):
        """Get a suggestion from the optimizer.
        Parameters
        ----------
        n_suggestions : int
            Desired number of parallel suggestions in the output
        Returns
        -------
        next_guess : list of dict
            List of `n_suggestions` suggestions to evaluate the objective
            function. Each suggestion is a dictionary where each key
            corresponds to a parameter being optimized.
        """

        self.iter += 1
        lamda = 10  # defines the transition point in the algorithm

        if self.iter < lamda:
            population = self._suggest(n_suggestions)
        else:
            population = self.mutate(n_suggestions)

        return population

    def _observe(self, x, y):
        # Find the matching proposal and execute its callbacks
        idx = [x == xx for xx in self.history]
        i = np.argwhere(
            idx)[0].item()  # Pick the first index if there are ties
        proposal = self.proposals[i]
        proposal.record.complete(y)
        self.proposals.pop(i)
        self.history.pop(i)

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.
        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        try:
            assert len(X) == len(y)
            c = 0

            for x_, y_ in zip(X, y):
                # Archive stores all the solutions
                self.archive.append(x_)
                self.arc_fitness.append(
                    -y_)  # As BoTorch solves a maximization problem

                if self.iter == 1:
                    self.population.append(x_)
                    self.fitness.append(y_)
                else:
                    if y_ <= self.fitness[c]:
                        self.population[c] = x_
                        self.fitness[c] = y_

                    c += 1

                # Just ignore, any inf observations we got, unclear if right thing
                if np.isfinite(y_):
                    self._observe(x_, y_)

            # Transform the data (seen till now) into tensors and train the model
            train_x = normalize(torch.from_numpy(
                self.search_space.warp(self.archive)),
                                bounds=self.torch_bounds)
            train_y = standardize(
                torch.from_numpy(
                    np.array(self.arc_fitness).reshape(len(self.arc_fitness),
                                                       1)))
            # Fit the GP based on the actual observed values
            if self.iter == 1:
                self.model, mll = self.make_model(train_x, train_y)
            else:
                self.model, mll = self.make_model(train_x, train_y,
                                                  self.model.state_dict())

            # mll.train()
            fit_gpytorch_model(mll)

            # define the sampler
            sampler = SobolQMCNormalSampler(num_samples=512)

            # define the acquisition function
            self.acquisition = qExpectedImprovement(model=self.model,
                                                    best_f=train_y.max(),
                                                    sampler=sampler)

        except Exception as e:
            print('Error: {} in observe()'.format(e))
Ejemplo n.º 7
0
class PySOTOptimizer(AbstractOptimizer):
    primary_import = "pysot"

    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.create_opt_prob(
        )  # Sets up the optimization problem (needs self.bounds)
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.batch_size = None
        self.history = []
        self.proposals = []

    def create_opt_prob(self):
        """Create an optimization problem object."""
        opt = OptimizationProblem()
        opt.lb = self.bounds[:, 0]  # In warped space
        opt.ub = self.bounds[:, 1]  # In warped space
        opt.dim = len(self.bounds)
        opt.cont_var = np.arange(len(self.bounds))
        opt.int_var = []
        assert len(opt.cont_var) + len(opt.int_var) == opt.dim
        opt.objfun = None
        self.opt = opt

    def start(self, max_evals):
        """Starts a new pySOT run."""
        self.history = []
        self.proposals = []

        # Symmetric Latin hypercube design
        des_pts = max([self.batch_size, 2 * (self.opt.dim + 1)])
        slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)

        # Warped RBF interpolant
        rbf = RBFInterpolant(dim=self.opt.dim,
                             lb=self.opt.lb,
                             ub=self.opt.ub,
                             kernel=CubicKernel(),
                             tail=LinearTail(self.opt.dim),
                             eta=1e-4)

        # Optimization strategy
        '''
        self.strategy = SRBFStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt, 
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1
            , use_restarts=True,
        )
        '''
        self.strategy = DYCORSStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt,
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1,
            use_restarts=True,
        )

    def suggest(self, n_suggestions=1):
        """Get a suggestion from the optimizer.

        Parameters
        ----------
        n_suggestions : int
            Desired number of parallel suggestions in the output

        Returns
        -------
        next_guess : list of dict
            List of `n_suggestions` suggestions to evaluate the objective
            function. Each suggestion is a dictionary where each key
            corresponds to a parameter being optimized.
        """

        if self.batch_size is None:  # First call to suggest
            self.batch_size = n_suggestions
            self.start(self.max_evals)

        # Set the tolerances pretending like we are running batch
        d, p = float(self.opt.dim), float(n_suggestions)
        self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p)))

        # Now we can make suggestions
        x_w = []
        self.proposals = []
        for _ in range(n_suggestions):
            proposal = self.strategy.propose_action()
            record = EvalRecord(proposal.args, status="pending")
            proposal.record = record
            proposal.accept()  # This triggers all the callbacks

            # It is possible that pySOT proposes a previously evaluated point
            # when all variables are integers, so we just abort in this case
            # since we have likely converged anyway. See PySOT issue #30.
            x = list(proposal.record.params)  # From tuple to list
            x_unwarped, = self.space_x.unwarp(x)
            if x_unwarped in self.history:
                warnings.warn("pySOT proposed the same point twice")
                self.start(self.max_evals)
                return self.suggest(n_suggestions=n_suggestions)

            # NOTE: Append unwarped to avoid rounding issues
            self.history.append(copy(x_unwarped))
            self.proposals.append(proposal)
            x_w.append(copy(x_unwarped))

        return x_w

    def _observe(self, x, y):
        # Find the matching proposal and execute its callbacks
        idx = [x == xx for xx in self.history]
        i = np.argwhere(
            idx)[0].item()  # Pick the first index if there are ties
        proposal = self.proposals[i]
        proposal.record.complete(y)
        self.proposals.pop(i)
        self.history.pop(i)

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        assert len(X) == len(y)

        for x_, y_ in zip(X, y):
            # Just ignore, any inf observations we got, unclear if right thing
            if np.isfinite(y_):
                self._observe(x_, y_)
Ejemplo n.º 8
0
Archivo: pysot.py Proyecto: evhub/bbopt
    def setup_backend(
        self,
        params,
        strategy="SRBF",
        surrogate="RBF",
        design=None,
    ):
        self.opt_problem = BBoptOptimizationProblem(params)

        design_kwargs = dict(dim=self.opt_problem.dim)
        _coconut_case_match_to_1 = design
        _coconut_case_match_check_1 = False
        if _coconut_case_match_to_1 is None:
            _coconut_case_match_check_1 = True
        if _coconut_case_match_check_1:
            self.exp_design = EmptyExperimentalDesign(**design_kwargs)
        if not _coconut_case_match_check_1:
            if _coconut_case_match_to_1 == "latin_hypercube":
                _coconut_case_match_check_1 = True
            if _coconut_case_match_check_1:
                self.exp_design = LatinHypercube(num_pts=2 *
                                                 (self.opt_problem.dim + 1),
                                                 **design_kwargs)
        if not _coconut_case_match_check_1:
            if _coconut_case_match_to_1 == "symmetric_latin_hypercube":
                _coconut_case_match_check_1 = True
            if _coconut_case_match_check_1:
                self.exp_design = SymmetricLatinHypercube(
                    num_pts=2 * (self.opt_problem.dim + 1), **design_kwargs)
        if not _coconut_case_match_check_1:
            if _coconut_case_match_to_1 == "two_factorial":
                _coconut_case_match_check_1 = True
            if _coconut_case_match_check_1:
                self.exp_design = TwoFactorial(**design_kwargs)
        if not _coconut_case_match_check_1:
            _coconut_match_set_name_design_cls = _coconut_sentinel
            _coconut_match_set_name_design_cls = _coconut_case_match_to_1
            _coconut_case_match_check_1 = True
            if _coconut_case_match_check_1:
                if _coconut_match_set_name_design_cls is not _coconut_sentinel:
                    design_cls = _coconut_case_match_to_1
            if _coconut_case_match_check_1 and not (callable(design_cls)):
                _coconut_case_match_check_1 = False
            if _coconut_case_match_check_1:
                self.exp_design = design_cls(**design_kwargs)
        if not _coconut_case_match_check_1:
            raise TypeError(
                "unknown experimental design {_coconut_format_0!r}".format(
                    _coconut_format_0=(design)))

        surrogate_kwargs = dict(dim=self.opt_problem.dim,
                                lb=self.opt_problem.lb,
                                ub=self.opt_problem.ub)
        _coconut_case_match_to_2 = surrogate
        _coconut_case_match_check_2 = False
        if _coconut_case_match_to_2 == "RBF":
            _coconut_case_match_check_2 = True
        if _coconut_case_match_check_2:
            self.surrogate = RBFInterpolant(
                kernel=LinearKernel() if design is None else CubicKernel(),
                tail=ConstantTail(self.opt_problem.dim)
                if design is None else LinearTail(self.opt_problem.dim),
                **surrogate_kwargs)
        if not _coconut_case_match_check_2:
            if _coconut_case_match_to_2 == "GP":
                _coconut_case_match_check_2 = True
            if _coconut_case_match_check_2:
                self.surrogate = GPRegressor(**surrogate_kwargs)
        if not _coconut_case_match_check_2:
            _coconut_match_set_name_surrogate_cls = _coconut_sentinel
            _coconut_match_set_name_surrogate_cls = _coconut_case_match_to_2
            _coconut_case_match_check_2 = True
            if _coconut_case_match_check_2:
                if _coconut_match_set_name_surrogate_cls is not _coconut_sentinel:
                    surrogate_cls = _coconut_case_match_to_2
            if _coconut_case_match_check_2 and not (callable(surrogate_cls)):
                _coconut_case_match_check_2 = False
            if _coconut_case_match_check_2:
                self.surrogate = surrogate_cls(**surrogate_kwargs)
        if not _coconut_case_match_check_2:
            raise TypeError("unknown surrogate {_coconut_format_0!r}".format(
                _coconut_format_0=(surrogate)))

        strategy_kwargs = dict(max_evals=sys.maxsize,
                               opt_prob=self.opt_problem,
                               exp_design=self.exp_design,
                               surrogate=self.surrogate,
                               asynchronous=True,
                               batch_size=1)
        _coconut_case_match_to_3 = strategy
        _coconut_case_match_check_3 = False
        if _coconut_case_match_to_3 == "SRBF":
            _coconut_case_match_check_3 = True
        if _coconut_case_match_check_3:
            self.strategy = SRBFStrategy(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            if _coconut_case_match_to_3 == "EI":
                _coconut_case_match_check_3 = True
            if _coconut_case_match_check_3:
                self.strategy = EIStrategy(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            if _coconut_case_match_to_3 == "DYCORS":
                _coconut_case_match_check_3 = True
            if _coconut_case_match_check_3:
                self.strategy = DYCORSStrategy(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            if _coconut_case_match_to_3 == "LCB":
                _coconut_case_match_check_3 = True
            if _coconut_case_match_check_3:
                self.strategy = LCBStrategy(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            _coconut_match_set_name_strategy_cls = _coconut_sentinel
            _coconut_match_set_name_strategy_cls = _coconut_case_match_to_3
            _coconut_case_match_check_3 = True
            if _coconut_case_match_check_3:
                if _coconut_match_set_name_strategy_cls is not _coconut_sentinel:
                    strategy_cls = _coconut_case_match_to_3
            if _coconut_case_match_check_3 and not (callable(strategy_cls)):
                _coconut_case_match_check_3 = False
            if _coconut_case_match_check_3:
                self.strategy = strategy_cls(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            raise TypeError("unknown strategy {_coconut_format_0!r}".format(
                _coconut_format_0=(strategy)))