def test_trust_region_constrained_no_context_with_gradient( trust_region_constr_linear_constraint, objective, gradient, space): # Tests the optimizer when passing in f and df as separate function handles x0 = np.array([1, 1]) x, f = apply_optimizer(trust_region_constr_linear_constraint, x0, space, objective, gradient, None, None) assert np.all(np.isclose(x, np.array([0, 0.5])))
def test_lbfgs_with_gradient_and_context(lbfgs_context, objective, gradient, space): context = ContextManager(space, {'x': 0.5}) x0 = np.array([1, 1]) x, f = apply_optimizer(lbfgs_context, x0, space, objective, gradient, None, context) assert np.all(np.isclose(x, np.array([0.5, 0])))
def test_trust_region_constrained_nonlinear_constraint( trust_region_constr_nonlinear_constraint, objective, gradient, space ): # Tests the optimizer when passing in f and df as separate function handles x0 = np.array([1, 1]) x, f = apply_optimizer(trust_region_constr_nonlinear_constraint, x0, space, objective, gradient, None, None) assert np.all(np.isclose(x, np.array([np.sqrt(2) / 2, np.sqrt(2) / 2]), atol=1e-3))
def test_trust_region_constrained_no_context_with_f_df( trust_region_constr_linear_constraint, objective, gradient, space): # Tests the optimizer when passing in f and df as a single function handle f_df = lambda x: (objective(x), gradient(x)) x0 = np.array([1, 1]) x, f = apply_optimizer(trust_region_constr_linear_constraint, x0, space, None, None, f_df, None) assert np.all(np.isclose(x, np.array([0, 0.5]), atol=1e-3))
def _optimize(self, acquisition: Acquisition, context_manager: ContextManager) -> Tuple[np.ndarray, np.ndarray]: """ Implementation of abstract method. Taking into account gradients if acquisition supports them. See AcquisitionOptimizerBase._optimizer for parameter descriptions. See class docstring for implementation details. """ # Take negative of acquisition function because they are to be maximised and the optimizers minimise f = lambda x: -acquisition.evaluate(x) # Context validation if len(context_manager.contextfree_space.parameters) == 0: _log.warning("All parameters are fixed through context") x = np.array(context_manager.context_values)[None, :] return x, f(x) if acquisition.has_gradients: def f_df(x): f_value, df_value = acquisition.evaluate_with_gradients(x) return -f_value, -df_value else: f_df = None optimizer = self._get_optimizer(context_manager) anchor_points_generator = ObjectiveAnchorPointsGenerator(self.space, acquisition, self.num_anchor_points) # Select the anchor points (with context) anchor_points = anchor_points_generator.get(num_anchor=1, context_manager=context_manager) _log.info("Starting gradient-based optimization of acquisition function {}".format(type(acquisition))) optimized_points = [] for a in anchor_points: optimized_point = apply_optimizer(optimizer, a, space=self.space, f=f, df=None, f_df=f_df, context_manager=context_manager) optimized_points.append(optimized_point) x_min, fx_min = min(optimized_points, key=lambda t: t[1]) return x_min, -fx_min
def test_lbfgs_no_gradient_no_context(lbfgs, objective, space): x0 = np.array([1, 1]) x, f = apply_optimizer(lbfgs, x0, space, objective, None, None) assert np.all(np.isclose(x, np.array([0, 0])))
def test_trust_region_constrained_no_context( trust_region_constr_linear_constraint, objective, space): x0 = np.array([1, 1]) x, f = apply_optimizer(trust_region_constr_linear_constraint, x0, space, objective, None, None, None) assert np.all(np.isclose(x, np.array([0, 0.5])))