def calculate_bounds( self, objectives: Callable, n_objectives: int, x0: np.ndarray, epsilons: np.ndarray, bounds: Union[np.ndarray, None], constraints: Optional[Callable], method: Union[ScalarMethod, str, None], ) -> np.ndarray: """ Calculate the new bounds using Epsilon constraint method. Args: objectives (np.ndarray): The objective function values for each input vector. n_objectives (int): Total number of objectives. x0 (np.ndarray): Initial values for decision variables. epsilons (np.ndarray): Previous iteration point. bounds (Union[np.ndarray, None]): Bounds for decision variables. constraints (Callable): Constraints of the problem. method (Union[ScalarMethod, str, None]): The optimization method the scalarizer should be minimized with. Returns: np.ndarray: New lower bounds for objective functions. """ new_lower_bounds: np.ndarray = [None] * n_objectives # set polish to False method_e: ScalarMethod = ScalarMethod( lambda x, _, **y: differential_evolution(x, **y), method_args={"disp": False, "polish": False, "tol": 0.000001, "popsize": 10, "maxiter": 50000}, use_scipy=True, ) # solve new lower bounds for each objective for i in range(n_objectives): eps = ECM.EpsilonConstraintMethod( objectives, i, # take out the objective to be minimized np.array([val for ind, val in enumerate(epsilons) if ind != i]), constraints=constraints, ) cons_evaluate = eps.evaluate_constraints scalarized_objective = Scalarizer(objectives, eps) minimizer = ScalarMinimizer( scalarized_objective, bounds, constraint_evaluator=cons_evaluate, method=method_e ) res = minimizer.minimize(x0) # store objective function values as new lower bounds new_lower_bounds[i] = objectives(res["x"])[0][i] return new_lower_bounds
def test_scipy_minimize_cons(): solver = ScalarMinimizer( simple_problem, np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]).T, simple_constr, "scipy_minimize", ) res = solver.minimize(np.array([0.21, 0.999, 0.001])) assert not res["success"]
def test_dummy_cons(): method = ScalarMethod(dummy_minimizer) solver = ScalarMinimizer(simple_problem, np.array([[0, 0, 0], [1, 1, 1]]), simple_constr, method) res = solver.minimize(np.array([0.5, 0.5, 0.1])) assert res["success"] res = solver.minimize(np.array([0.5, 0.5, 0.5])) assert not res["success"]
def test_dummy_no_cons(): method = ScalarMethod(dummy_minimizer) solver = ScalarMinimizer(simple_problem, np.array([[0, 0, 0], [1, 1, 1]]), None, method) x0 = np.array([0.5, 0.5, 0.5]) res = solver.minimize(x0) assert np.array_equal(res["x"], x0) assert res["success"] assert (res["message"] == "I just retruned the initial guess as the optimal solution.")
def solve_asf( self, problem: Union[MOProblem, DiscreteDataProblem], ref_point: np.ndarray, method: Optional[ScalarMethod] = None, ): """ Solve the achievement scalarizing function Args: problem (MOProblem): The problem ref_point: A reference point method (Optional[ScalarMethod], optional): A method provided to the scalar minimizer Returns: np.ndarray: The decision vector which solves the achievement scalarizing function """ asf = SimpleASF(np.ones(ref_point.shape)) if isinstance(problem, MOProblem): scalarizer = Scalarizer( lambda x: problem.evaluate(x).objectives, asf, scalarizer_args={"reference_point": np.atleast_2d(ref_point)}, ) if problem.n_of_constraints > 0: _con_eval = lambda x: problem.evaluate(x).constraints.squeeze() else: _con_eval = None solver = ScalarMinimizer( scalarizer, problem.get_variable_bounds(), constraint_evaluator=_con_eval, method=method, ) res = solver.minimize(problem.get_variable_upper_bounds() / 2) if res["success"]: return res["x"] else: raise ParetoNavigatorException( "Could solve achievement scalarizing function") else: # Discrete case # Find closest objective to ref point scalarizer = DiscreteScalarizer(asf, {"reference_point": ref_point}) solver = DiscreteMinimizer(scalarizer) res = solver.minimize(problem.objectives) return res['x']
def test_scipy_de_cons(): solver = ScalarMinimizer( simple_problem, np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]).T, simple_constr, "scipy_de", ) res = solver.minimize(None) assert res["success"] assert np.all(np.array(res["constr"]) >= 0)
def __init__(self, problem: MOProblem, scalar_method: Optional[ScalarMethod] = None): # check if ideal and nadir are defined if problem.ideal is None or problem.nadir is None: # TODO: use same method as defined in scalar_method ideal, nadir = payoff_table_method(problem) self._ideal = ideal self._nadir = nadir else: self._ideal = problem.ideal self._nadir = problem.nadir self._scalar_method = scalar_method # generate Pareto optimal starting point asf = SimpleASF(np.ones(self._ideal.shape)) scalarizer = Scalarizer( lambda x: problem.evaluate(x).objectives, asf, scalarizer_args={"reference_point": np.atleast_2d(self._ideal)}, ) if problem.n_of_constraints > 0: _con_eval = lambda x: problem.evaluate(x).constraints.squeeze() else: _con_eval = None solver = ScalarMinimizer( scalarizer, problem.get_variable_bounds(), constraint_evaluator=_con_eval, method=self._scalar_method, ) # TODO: fix tools to check for scipy methods in general and delete me! solver._use_scipy = True res = solver.minimize(problem.get_variable_upper_bounds() / 2) if res["success"]: self._current_solution = res["x"] self._current_objectives = problem.evaluate( self._current_solution).objectives.squeeze() self._archive_solutions = [] self._archive_objectives = [] self._state = "classify" super().__init__(problem)
def solve_asf( self, ref_point: np.ndarray, x0: np.ndarray, preferential_factors: np.ndarray, nadir: np.ndarray, utopian: np.ndarray, objectives: Callable, variable_bounds: Optional[np.ndarray] = None, method: Union[ScalarMethod, str, None] = None, ) -> dict: """ Solve Achievement scalarizing function. Args: ref_point (np.ndarray): Reference point. x0 (np.ndarray): Initial values for decision variables. preferential_factors (np.ndarray): Preferential factors on how much would the Decision Maker wish to improve the values of each objective function. nadir (np.ndarray): Nadir vector. utopian (np.ndarray): Utopian vector. objectives (np.ndarray): The objective function values for each input vector. variable_bounds (Optional[np.ndarray)]: Lower and upper bounds of each variable as a 2D numpy array. If undefined variables, None instead. method (Union[ScalarMethod, str, None]): The optimization method the scalarizer should be minimized with. Returns: dict: A dictionary with at least the following entries: 'x' indicating the optimal variables found, 'fun' the optimal value of the optimized function, and 'success' a boolean indicating whether the optimization was conducted successfully. """ if variable_bounds is None: # set all bounds as [-inf, inf] variable_bounds = np.array([[-np.inf, np.inf]] * x0.shape[0]) # scalarize problem using reference point asf = ReferencePointASF(preferential_factors, nadir, utopian, rho=1e-4) asf_scalarizer = Scalarizer( evaluator=objectives, scalarizer=asf, scalarizer_args={"reference_point": ref_point}) # minimize minimizer = ScalarMinimizer(asf_scalarizer, variable_bounds, method=method) return minimizer.minimize(x0)
def calculate_new_solutions( self, number_of_solutions: int, levels: np.ndarray, improve_inds: np.ndarray, improve_until_inds: np.ndarray, acceptable_inds: np.ndarray, impaire_until_inds: np.ndarray, free_inds: np.ndarray, ) -> Tuple[NimbusSaveRequest, SimplePlotRequest]: """Calcualtes new solutions based on classifications supplied by the decision maker by solving ASF problems. Args: number_of_solutions (int): Number of solutions, should be between 1 and 4. levels (np.ndarray): Aspiration and upper bounds relevant to the some of the classifications. improve_inds (np.ndarray): Indices corresponding to the objectives which should be improved. improve_until_inds (np.ndarray): Like above, but improved until an aspiration level is reached. acceptable_inds (np.ndarray): Indices of objectives which are acceptable as they are now. impaire_until_inds (np.ndarray): Indices of objectives which may be impaired until an upper limit is reached. free_inds (np.ndarray): Indices of objectives which may change freely. Returns: Tuple[NimbusSaveRequest, SimplePlotRequest]: A save request with the newly computed soutions, and a plot request to visualize said solutions. """ results = [] # always computed asf_1 = MaxOfTwoASF(self._nadir, self._ideal, improve_inds, improve_until_inds) def cons_1( x: np.ndarray, f_current: np.ndarray = self._current_objectives, levels: np.ndarray = levels, improve_until_inds: np.ndarray = improve_until_inds, improve_inds: np.ndarray = improve_inds, impaire_until_inds: np.ndarray = impaire_until_inds, ): f = self._problem.evaluate(x).objectives.squeeze() res_1 = f_current[improve_inds] - f[improve_inds] res_2 = f_current[improve_until_inds] - f[improve_until_inds] res_3 = levels[impaire_until_inds] - f_current[impaire_until_inds] res = np.hstack((res_1, res_2, res_3)) if self._problem.n_of_constraints > 0: res_prob = self._problem.evaluate(x).constraints.squeeze() return np.hstack((res_prob, res)) else: return res scalarizer_1 = Scalarizer( lambda x: self._problem.evaluate(x).objectives, asf_1, scalarizer_args={"reference_point": levels}, ) solver_1 = ScalarMinimizer( scalarizer_1, self._problem.get_variable_bounds(), cons_1, method=self._scalar_method, ) res_1 = solver_1.minimize(self._current_solution) results.append(res_1) if number_of_solutions > 1: # create the reference point needed in the rest of the ASFs z_bar = np.zeros(self._problem.n_of_objectives) z_bar[improve_inds] = self._ideal[improve_inds] z_bar[improve_until_inds] = levels[improve_until_inds] z_bar[acceptable_inds] = self._current_objectives[acceptable_inds] z_bar[impaire_until_inds] = levels[impaire_until_inds] z_bar[free_inds] = self._nadir[free_inds] # second ASF asf_2 = StomASF(self._ideal) # cons_2 can be used in the rest of the ASF scalarizations, it's not a bug! if self._problem.n_of_constraints > 0: cons_2 = lambda x: self._problem.evaluate( x).constraints.squeeze() else: cons_2 = None scalarizer_2 = Scalarizer( lambda x: self._problem.evaluate(x).objectives, asf_2, scalarizer_args={"reference_point": z_bar}, ) solver_2 = ScalarMinimizer( scalarizer_2, self._problem.get_variable_bounds(), cons_2, method=self._scalar_method, ) res_2 = solver_2.minimize(self._current_solution) results.append(res_2) if number_of_solutions > 2: # asf 3 asf_3 = PointMethodASF(self._nadir, self._ideal) scalarizer_3 = Scalarizer( lambda x: self._problem.evaluate(x).objectives, asf_3, scalarizer_args={"reference_point": z_bar}, ) solver_3 = ScalarMinimizer( scalarizer_3, self._problem.get_variable_bounds(), cons_2, method=self._scalar_method, ) res_3 = solver_3.minimize(self._current_solution) results.append(res_3) if number_of_solutions > 3: # asf 4 asf_4 = AugmentedGuessASF(self._nadir, self._ideal, free_inds) scalarizer_4 = Scalarizer( lambda x: self._problem.evaluate(x).objectives, asf_4, scalarizer_args={"reference_point": z_bar}, ) solver_4 = ScalarMinimizer( scalarizer_4, self._problem.get_variable_bounds(), cons_2, method=self._scalar_method, ) res_4 = solver_4.minimize(self._current_solution) results.append(res_4) # create the save request solutions = [res["x"] for res in results] objectives = [ self._problem.evaluate(x).objectives.squeeze() for x in solutions ] save_request = NimbusSaveRequest(solutions, objectives) msg = "Computed new solutions." plot_request = self.create_plot_request(objectives, msg) return save_request, plot_request
def compute_intermediate_solutions( self, solutions: np.ndarray, n_desired: int, ) -> Tuple[NimbusSaveRequest, SimplePlotRequest]: """Computs intermediate solution between two solutions computed earlier. Args: solutions (np.ndarray): The solutions between which the intermediat solutions should be computed. n_desired (int): The number of intermediate solutions desired. Raises: NimbusException Returns: Tuple[NimbusSaveRequest, SimplePlotRequest]: A save request with the compured intermediate points, and a plot request to visualize said points. """ # vector between the two solutions between = solutions[0] - solutions[1] norm = np.linalg.norm(between) between_norm = between / norm # the plus 2 assumes we are interested only in n_desired points BETWEEN the # two supplied solutions step_size = norm / (2 + n_desired) intermediate_points = np.array([ solutions[1] + i * step_size * between_norm for i in range(1, n_desired + 1) ]) # project each of the intermediate solutions to the Pareto front intermediate_solutions = np.zeros(intermediate_points.shape) intermediate_objectives = np.zeros( (n_desired, self._problem.n_of_objectives)) asf = PointMethodASF(self._nadir, self._ideal) for i in range(n_desired): scalarizer = Scalarizer( lambda x: self._problem.evaluate(x).objectives, asf, scalarizer_args={ "reference_point": self._problem.evaluate(intermediate_points[i]).objectives }, ) if self._problem.n_of_constraints > 0: cons = lambda x: self._problem.evaluate(x).constraints.squeeze( ) else: cons = None solver = ScalarMinimizer( scalarizer, self._problem.get_variable_bounds(), cons, method=self._scalar_method, ) res = solver.minimize(self._current_solution) intermediate_solutions[i] = res["x"] intermediate_objectives[i] = self._problem.evaluate( res["x"]).objectives # create appropiate requests save_request = NimbusSaveRequest(list(intermediate_solutions), list(intermediate_objectives)) msg = "Computed intermediate solutions" plot_request = self.create_plot_request(intermediate_objectives, msg) return save_request, plot_request
def payoff_table_method_general( objective_evaluator: Callable[[np.ndarray], np.ndarray], n_of_objectives: int, variable_bounds: np.ndarray, constraint_evaluator: Optional[Callable[[np.ndarray], np.ndarray]] = None, initial_guess: Optional[np.ndarray] = None, solver_method: Optional[Union[ScalarMethod, str]] = "scipy_de", ) -> Tuple[np.ndarray, np.ndarray]: """Solves a representation for the nadir and ideal points for a multiobjective minimization problem with objectives defined as the result of some objective evaluator. Args: objective_evaluator (Callable[[np.ndarray], np.ndarray]): The evaluator which returns the objective values given a set of variabels. n_of_objectives (int): Number of objectives returned by calling objective_evaluator. variable_bounds (np.ndarray): The lower and upper bounds of the variables passed as argument to objective_evaluator. Should be a 2D numpy array with the limits for each variable being on each row. The first column should contain the lower bounds, and the second column the upper bounds. Use np.inf to indicate no bounds. constraint_evaluator (Optional[Callable[[np.ndarray], np.ndarray]], optional): An evaluator accepting the same arguments as objective_evaluator, which returns the constraint values of the multiobjective minimization problem being solved. A negative constraint value indicates a broken constraint. Defaults to None. initial_guess (Optional[np.ndarray], optional): The initial guess used for the variable values while solving the payoff table. The relevancy of this parameter depends on the solver_method being used. Defaults to None. solver_method (Optional[Union[ScalarMethod, str]], optional): The method to solve the scalarized problems in the payoff table method. Defaults to "scipy_de", which ignores initial_guess. Returns: Tuple[np.ndarray, np.ndarray]: The representations computed using the payoff table for the ideal and nadir points respectively. """ scalarizer = Scalarizer( objective_evaluator, weighted_scalarizer, scalarizer_args={"ws": None}, ) solver = ScalarMinimizer( scalarizer, variable_bounds, constraint_evaluator, solver_method, ) ws = np.eye(n_of_objectives) po_table = np.zeros((n_of_objectives, n_of_objectives)) if initial_guess is None: initial_guess = variable_bounds[:, 0] for i in range(n_of_objectives): scalarizer._scalarizer_args = {"ws": ws[i]} opt_res = solver.minimize(initial_guess) if not opt_res["success"]: print( "Unsuccessful optimization result encountered while computing a payoff table!" ) po_table[i] = objective_evaluator(opt_res["x"]) ideal = np.diag(po_table) nadir = np.max(po_table, axis=0) return ideal, nadir
def solve_pareto_front_representation_general( objective_evaluator: Callable[[np.ndarray], np.ndarray], n_of_objectives: int, variable_bounds: np.ndarray, step: Optional[Union[np.ndarray, float]] = 0.1, eps: Optional[float] = 1e-6, ideal: Optional[np.ndarray] = None, nadir: Optional[np.ndarray] = None, constraint_evaluator: Optional[Callable[[np.ndarray], np.ndarray]] = None, solver_method: Optional[Union[ScalarMethod, str]] = "scipy_de", ) -> Tuple[np.ndarray, np.ndarray]: """Computes a representation of a Pareto efficient front from a multiobjective minimizatino problem. Does so by generating an evenly spaced set of reference points (in the objective space), in the space spanned by the supplied ideal and nadir points. The generated reference points are then used to formulate achievement scalaraization problems, which when solved, yield a representation of a Pareto efficient solution. The result is guaranteed to contain only non-dominated solutions. Args: objective_evaluator (Callable[[np.ndarray], np.ndarray]): A vector valued function returning objective values given an array of decision variables. n_of_objectives (int): Numbr of objectives returned by objective_evaluator. variable_bounds (np.ndarray): The upper and lower bounds of the decision variables. Bound for each variable should be on the rows, with the first column containing lower bounds, and the second column upper bounds. Use np.inf to indicate no bounds. step (Optional[Union[np.ndarray, float]], optional): Etiher an float or an array of floats. If a single float is given, generates reference points with the objectives having values a step apart between the ideal and nadir points. If an array of floats is given, use the steps defined in the array for each objective's values. Default to 0.1. eps (Optional[float], optional): An offset to be added to the nadir value to keep the nadir inside the range when generating reference points. Defaults to 1e-6. ideal (Optional[np.ndarray], optional): The ideal point of the problem being solved. Defaults to None. nadir (Optional[np.ndarray], optional): The nadir point of the problem being solved. Defaults to None. constraint_evaluator (Optional[Callable[[np.ndarray], np.ndarray]], optional): An evaluator returning values for the constraints defined for the problem. A negative value for a constraint indicates a breach of that constraint. Defaults to None. solver_method (Optional[Union[ScalarMethod, str]], optional): The method used to minimize the achievement scalarization problems arising when calculating Pareto efficient solutions. Defaults to "scipy_de". Raises: MCDMUtilityException: Mismatching sizes of the supplied ideal and nadir points between the step, when step is an array. Or the type of step is something else than np.ndarray of float. Returns: Tuple[np.ndarray, np.ndarray]: A tuple containing representationns of the Pareto optimal variable values, and the corresponsing objective values. Note: The objective evaluator should be defined such that minimization is expected in each of the objectives. """ if ideal is None or nadir is None: # compure ideal and nadir using payoff table ideal, nadir = payoff_table_method_general( objective_evaluator, n_of_objectives, variable_bounds, constraint_evaluator, ) # use ASF to (almost) guarantee Pareto optimality. asf = PointMethodASF(nadir, ideal) scalarizer = Scalarizer(objective_evaluator, asf, scalarizer_args={"reference_point": None}) solver = ScalarMinimizer(scalarizer, bounds=variable_bounds, method=solver_method) # bounds to be used to compute slices stacked = np.stack((ideal, nadir)).T lower_slice_b, upper_slice_b = np.min(stacked, axis=1), np.max(stacked, axis=1) if type(step) is float: slices = [ slice(start, stop + eps, step) for (start, stop) in zip(lower_slice_b, upper_slice_b) ] elif type(step) is np.ndarray: if not ideal.shape == nadir.shape == step.shape: raise MCDMUtilityException( "The shapes of the supplied step array does not match the " "shape of the ideal and nadir points.") slices = [ slice(start, stop + eps, s) for (start, stop, s) in zip(lower_slice_b, upper_slice_b, step) ] else: raise MCDMUtilityException( "step must be either a numpy array or an float.") z_mesh = np.mgrid[slices].reshape(len(ideal), -1).T p_front_objectives = np.zeros(z_mesh.shape) p_front_variables = np.zeros( (len(p_front_objectives), len(variable_bounds.squeeze()))) for i, z in enumerate(z_mesh): scalarizer._scalarizer_args = {"reference_point": z} res = solver.minimize(None) if not res["success"]: print("Non successfull optimization") p_front_objectives[i] = np.nan p_front_variables[i] = np.nan continue # check for dominance, accept only non-dominated solutions f_i = objective_evaluator(res["x"]) if not np.all(f_i > p_front_objectives[:i] [~np.all(np.isnan(p_front_objectives[:i]), axis=1)]): p_front_objectives[i] = f_i p_front_variables[i] = res["x"] elif i < 1: p_front_objectives[i] = f_i p_front_variables[i] = res["x"] else: p_front_objectives[i] = np.nan p_front_variables[i] = np.nan return ( p_front_variables[~np.all(np.isnan(p_front_variables), axis=1)], p_front_objectives[~np.all(np.isnan(p_front_objectives), axis=1)], )
res_ind = dminimizer.minimize(non_dominated_points) assert res_ind == 1 # first point as closest, but invalid dscalarizer._scalarizer_args = { "reference_point": np.array([0.2, 0.4, 0.6, 0.8]) } res_ind = dminimizer.minimize(non_dominated_points) assert res_ind == 1 # all points invalid dminimizer._constraint_evaluator = lambda x: x[:, 0] > 1.0 with pytest.raises(ScalarSolverException): _ = dminimizer.minimize(non_dominated_points) if __name__ == "__main__": solver = ScalarMinimizer( simple_problem, np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]).T, simple_constr, "scipy_de", ) res = solver.minimize(np.array([0.21, 0.999, 0.001])) print(res)