def test_ei(): np.random.seed(0) ackley = Ackley(dim=1) X = np.expand_dims([-15, -10, 0, 1, 20], axis=1) fX = np.array([ackley.eval(x) for x in X]) gp = GPRegressor(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub) gp.add_points(X, fX) # Find the global optimizer of EI x_true = -1.7558 x_next = ei_ga(X=X, Xpend=None, dtol=0.0, ei_tol=0, fX=fX, num_pts=1, opt_prob=ackley, surrogate=gp) assert np.isclose(x_next, x_true, atol=1e-2) # Find the optimizer at least distance 5 from other points x_true = 10.6656 x_next = ei_ga(X=X, Xpend=None, dtol=5.0, ei_tol=0, fX=fX, num_pts=1, opt_prob=ackley, surrogate=gp) assert np.isclose(x_next, x_true, atol=1e-2)
def test_srbf(): np.random.seed(0) ackley = Ackley(dim=1) X = np.expand_dims([-15, -10, 0, 1, 20], axis=1) fX = np.array([ackley.eval(x) for x in X]) gp = GPRegressor(dim=1) gp.add_points(X, fX) # Find the next point with w = 0.25 x_true = 10.50 x_next = candidate_uniform( num_pts=1, X=X, Xpend=None, fX=fX, num_cand=10000, surrogate=gp, opt_prob=ackley, weights=[0.25]) assert np.isclose(x_next, x_true, atol=1e-2) x_next = candidate_srbf( num_pts=1, X=X, Xpend=None, fX=fX, num_cand=10000, surrogate=gp, opt_prob=ackley, weights=[0.25], sampling_radius=0.5) assert np.isclose(x_next, x_true, atol=1e-2) # Find the next point with w = 0.75 x_true = -3.395 x_next = candidate_uniform( num_pts=1, X=X, Xpend=None, fX=fX, num_cand=10000, surrogate=gp, opt_prob=ackley, weights=[0.75]) assert np.isclose(x_next, x_true, atol=1e-2) x_next = candidate_srbf( num_pts=1, X=X, Xpend=None, fX=fX, num_cand=10000, surrogate=gp, opt_prob=ackley, weights=[0.75], sampling_radius=0.5) assert np.isclose(x_next, x_true, atol=1e-2)
def test_ei(): np.random.seed(0) ackley = Ackley(dim=1) X = np.expand_dims([-15, -10, 0, 1, 20], axis=1) fX = np.array([ackley.eval(x) for x in X]) gp = GPRegressor(dim=1) gp.add_points(X, fX) # Find the global optimizer of EI x_true = -3.0556 x_next = expected_improvement_ga( X=X, Xpend=None, dtol=0.0, ei_tol=0, fX=fX, num_pts=1, opt_prob=ackley, surrogate=gp) assert np.isclose(x_next, x_true, atol=1e-2) x_next = expected_improvement_uniform( X=X, Xpend=None, dtol=0.0, ei_tol=0, fX=fX, num_pts=1, opt_prob=ackley, surrogate=gp, num_cand=10000) assert np.isclose(x_next, x_true, atol=1e-2) # Find the optimizer at least distance 5 from other points x_true = 11.14 x_next = expected_improvement_ga( X=X, Xpend=None, dtol=5.0, ei_tol=0, fX=fX, num_pts=1, opt_prob=ackley, surrogate=gp) assert np.isclose(x_next, x_true, atol=1e-2) x_next = expected_improvement_uniform( X=X, Xpend=None, dtol=5.0, ei_tol=0, fX=fX, num_pts=1, opt_prob=ackley, surrogate=gp, num_cand=10000) assert np.isclose(x_next, x_true, atol=1e-2)
def test_gp(): X = make_grid(30) # Make uniform grid with 30 x 30 points gp = GPRegressor(dim=2, lb=np.zeros(2), ub=np.ones(2)) assert isinstance(gp, Surrogate) fX = f(X) gp.add_points(X, fX) # Derivative at random points np.random.seed(0) Xs = np.random.rand(10, 2) fhx = gp.predict(Xs) fx = f(Xs) assert np.max(np.abs(fx - fhx)) < 1e-2 # Derivative at previous points # Reset the surrogate gp.reset() assert gp.num_pts == 0 and gp.dim == 2
class PySOTBackend(StandardBackend): """The pySOT backend uses pySOT for black box optimization.""" backend_name = "pySOT" implemented_funcs = ("choice", "randrange", "uniform") strategy = None @override def setup_backend( self, params, strategy="SRBF", surrogate="RBF", design=None, ): self.opt_problem = BBoptOptimizationProblem(params) design_kwargs = dict(dim=self.opt_problem.dim) _coconut_case_match_to_1 = design _coconut_case_match_check_1 = False if _coconut_case_match_to_1 is None: _coconut_case_match_check_1 = True if _coconut_case_match_check_1: self.exp_design = EmptyExperimentalDesign(**design_kwargs) if not _coconut_case_match_check_1: if _coconut_case_match_to_1 == "latin_hypercube": _coconut_case_match_check_1 = True if _coconut_case_match_check_1: self.exp_design = LatinHypercube(num_pts=2 * (self.opt_problem.dim + 1), **design_kwargs) if not _coconut_case_match_check_1: if _coconut_case_match_to_1 == "symmetric_latin_hypercube": _coconut_case_match_check_1 = True if _coconut_case_match_check_1: self.exp_design = SymmetricLatinHypercube( num_pts=2 * (self.opt_problem.dim + 1), **design_kwargs) if not _coconut_case_match_check_1: if _coconut_case_match_to_1 == "two_factorial": _coconut_case_match_check_1 = True if _coconut_case_match_check_1: self.exp_design = TwoFactorial(**design_kwargs) if not _coconut_case_match_check_1: _coconut_match_set_name_design_cls = _coconut_sentinel _coconut_match_set_name_design_cls = _coconut_case_match_to_1 _coconut_case_match_check_1 = True if _coconut_case_match_check_1: if _coconut_match_set_name_design_cls is not _coconut_sentinel: design_cls = _coconut_case_match_to_1 if _coconut_case_match_check_1 and not (callable(design_cls)): _coconut_case_match_check_1 = False if _coconut_case_match_check_1: self.exp_design = design_cls(**design_kwargs) if not _coconut_case_match_check_1: raise TypeError( "unknown experimental design {_coconut_format_0!r}".format( _coconut_format_0=(design))) surrogate_kwargs = dict(dim=self.opt_problem.dim, lb=self.opt_problem.lb, ub=self.opt_problem.ub) _coconut_case_match_to_2 = surrogate _coconut_case_match_check_2 = False if _coconut_case_match_to_2 == "RBF": _coconut_case_match_check_2 = True if _coconut_case_match_check_2: self.surrogate = RBFInterpolant( kernel=LinearKernel() if design is None else CubicKernel(), tail=ConstantTail(self.opt_problem.dim) if design is None else LinearTail(self.opt_problem.dim), **surrogate_kwargs) if not _coconut_case_match_check_2: if _coconut_case_match_to_2 == "GP": _coconut_case_match_check_2 = True if _coconut_case_match_check_2: self.surrogate = GPRegressor(**surrogate_kwargs) if not _coconut_case_match_check_2: _coconut_match_set_name_surrogate_cls = _coconut_sentinel _coconut_match_set_name_surrogate_cls = _coconut_case_match_to_2 _coconut_case_match_check_2 = True if _coconut_case_match_check_2: if _coconut_match_set_name_surrogate_cls is not _coconut_sentinel: surrogate_cls = _coconut_case_match_to_2 if _coconut_case_match_check_2 and not (callable(surrogate_cls)): _coconut_case_match_check_2 = False if _coconut_case_match_check_2: self.surrogate = surrogate_cls(**surrogate_kwargs) if not _coconut_case_match_check_2: raise TypeError("unknown surrogate {_coconut_format_0!r}".format( _coconut_format_0=(surrogate))) strategy_kwargs = dict(max_evals=sys.maxsize, opt_prob=self.opt_problem, exp_design=self.exp_design, surrogate=self.surrogate, asynchronous=True, batch_size=1) _coconut_case_match_to_3 = strategy _coconut_case_match_check_3 = False if _coconut_case_match_to_3 == "SRBF": _coconut_case_match_check_3 = True if _coconut_case_match_check_3: self.strategy = SRBFStrategy(**strategy_kwargs) if not _coconut_case_match_check_3: if _coconut_case_match_to_3 == "EI": _coconut_case_match_check_3 = True if _coconut_case_match_check_3: self.strategy = EIStrategy(**strategy_kwargs) if not _coconut_case_match_check_3: if _coconut_case_match_to_3 == "DYCORS": _coconut_case_match_check_3 = True if _coconut_case_match_check_3: self.strategy = DYCORSStrategy(**strategy_kwargs) if not _coconut_case_match_check_3: if _coconut_case_match_to_3 == "LCB": _coconut_case_match_check_3 = True if _coconut_case_match_check_3: self.strategy = LCBStrategy(**strategy_kwargs) if not _coconut_case_match_check_3: _coconut_match_set_name_strategy_cls = _coconut_sentinel _coconut_match_set_name_strategy_cls = _coconut_case_match_to_3 _coconut_case_match_check_3 = True if _coconut_case_match_check_3: if _coconut_match_set_name_strategy_cls is not _coconut_sentinel: strategy_cls = _coconut_case_match_to_3 if _coconut_case_match_check_3 and not (callable(strategy_cls)): _coconut_case_match_check_3 = False if _coconut_case_match_check_3: self.strategy = strategy_cls(**strategy_kwargs) if not _coconut_case_match_check_3: raise TypeError("unknown strategy {_coconut_format_0!r}".format( _coconut_format_0=(strategy))) @override def tell_data(self, new_data, new_losses): """Special method that allows fast updating of the backend with new examples.""" points, values = self.opt_problem.get_points_values( new_data, new_losses) for i in range(points.shape[0]): X = np.copy(points[i, :]) self.strategy.X = np.vstack((self.strategy.X, X)) self.strategy._X = np.vstack((self.strategy._X, X)) self.strategy.fX = np.vstack((self.strategy.fX, values[i])) self.strategy._fX = np.vstack((self.strategy._fX, values[i])) assert self.surrogate is self.strategy.surrogate, ( self.surrogate, self.strategy.surrogate) self.surrogate.add_points(X, values[i]) @override def get_next_values(self): """Special method to get the next set of values to evaluate.""" assert self.strategy._X.shape[0] > 0, self.strategy._X assert self.surrogate.num_pts > 0, self.surrogate.num_pts while True: proposal = self.strategy.propose_action() assert proposal, proposal if proposal.action == "terminate": proposal.accept() elif proposal.action == "eval": self.opt_problem.eval(*proposal.args) self.strategy.pending_evals -= 1 self.strategy.remove_pending(proposal.args[0]) break else: proposal.reject() assert self.opt_problem.got_values is not None, "pySOT optimization produced no values" return self.opt_problem.got_values