def solve_nautilus_asf_problem(
        self,
        pareto_f: np.ndarray,
        subset_indices: [int],
        ref_point: np.ndarray,
        ideal: np.ndarray,
        nadir: np.ndarray,
    ) -> int:
        """Forms and solves the achievement scalarizing function to find the
        closesto point on the Pareto optimal front to the given reference
        point.

        Args:
            pareto_f (np.ndarray): The whole Pareto optimal front.
            subset_indices ([type]): Indices of the currently reachable solutions.
            ref_point (np.ndarray): The reference point indicating a decision
            maker's preference.
            ideal (np.ndarray): Ideal point.
            nadir (np.ndarray): Nadir point.

        Returns:
            int: Index of the closest point according the minimized value of the ASF.
        """
        asf = PointMethodASF(nadir, ideal)
        scalarizer = DiscreteScalarizer(asf, {"reference_point": ref_point})
        solver = DiscreteMinimizer(scalarizer)

        tmp = np.copy(pareto_f)
        mask = np.zeros(tmp.shape[0], dtype=bool)
        mask[subset_indices] = True
        tmp[~mask] = np.nan

        res = solver.minimize(tmp)

        return res
Пример #2
0
def test_discrete_solver_with_con():
    ideal = np.array([0, 0, 0, 0])
    nadir = np.array([1, 1, 1, 1])

    asf = PointMethodASF(nadir, ideal)
    con = lambda x: x[:, 0] > 0.2
    dscalarizer = DiscreteScalarizer(asf, {"reference_point": nadir})
    dminimizer = DiscreteMinimizer(dscalarizer, constraint_evaluator=con)

    non_dominated_points = np.array([
        [0.2, 0.4, 0.6, 0.8],
        [0.4, 0.2, 0.6, 0.8],
        [0.6, 0.4, 0.2, 0.8],
        [0.4, 0.8, 0.6, 0.2],
    ])

    # first occurrence with first point invalid
    res_ind = dminimizer.minimize(non_dominated_points)

    assert res_ind == 1

    # first point as closest, but invalid
    dscalarizer._scalarizer_args = {
        "reference_point": np.array([0.2, 0.4, 0.6, 0.8])
    }
    res_ind = dminimizer.minimize(non_dominated_points)

    assert res_ind == 1

    # all points invalid
    dminimizer._constraint_evaluator = lambda x: x[:, 0] > 1.0

    with pytest.raises(ScalarSolverException):
        _ = dminimizer.minimize(non_dominated_points)
Пример #3
0
def test_discrete_solver():
    ideal = np.array([0, 0, 0, 0])
    nadir = np.array([1, 1, 1, 1])

    asf = PointMethodASF(nadir, ideal)
    dscalarizer = DiscreteScalarizer(asf, {"reference_point": nadir})
    dminimizer = DiscreteMinimizer(dscalarizer)

    non_dominated_points = np.array([
        [0.2, 0.4, 0.6, 0.8],
        [0.4, 0.2, 0.6, 0.8],
        [0.6, 0.4, 0.2, 0.8],
        [0.4, 0.8, 0.6, 0.2],
    ])

    # first occurrence
    res_ind = dminimizer.minimize(non_dominated_points)
    assert res_ind == 0

    dscalarizer._scalarizer_args = {
        "reference_point": np.array([0.6, 0.4, 0.2, 0.8])
    }
    res_ind = dminimizer.minimize(non_dominated_points)

    assert res_ind == 2
Пример #4
0
def test_scalarizer_asf():
    asf = PointMethodASF(np.array([10, 10, 10]), np.array([-10, -10, -10]))
    ref = np.atleast_2d([1, 5, 2.5])
    scalarizer = Scalarizer(
        simple_vector_valued_fun, asf, scalarizer_args={"reference_point": ref}
    )

    res = scalarizer.evaluate(np.atleast_2d([2, 1, 1, 1]))

    assert np.allclose(res, 0.1000002)
Пример #5
0
    def solve_nautilus_asf_problem(
        pareto_f: np.ndarray,
        subset_indices: List[int],
        ref_point: np.ndarray,
        ideal: np.ndarray,
        nadir: np.ndarray,
        user_bounds: np.ndarray,
    ) -> int:
        """Forms and solves the achievement scalarizing function to find the
        closest point on the Pareto optimal front to the given reference
        point.

        Args:
            pareto_f (np.ndarray): The whole Pareto optimal front.
            subset_indices ([type]): Indices of the currently reachable solutions.
            ref_point (np.ndarray): The reference point indicating a decision
                maker's preference.
            ideal (np.ndarray): Ideal point.
            nadir (np.ndarray): Nadir point.
            user_bounds (np.ndarray): Bounds given by the user (the DM) for each objective,which should not be
                exceeded. A 1D array where NaN's indicate 'no bound is given' for the respective objective value.

        Returns:
            int: Index of the closest point according the minimized value of the ASF.
        """
        asf = PointMethodASF(nadir, ideal)
        scalarizer = DiscreteScalarizer(asf, {"reference_point": ref_point})
        solver = DiscreteMinimizer(scalarizer)

        # Copy the front and filter out the reachable solutions.
        # If user bounds are given, filter out solutions outside the those bounds.
        # Infeasible solutions on the pareto font are set to be NaNs.
        tmp = np.copy(pareto_f)
        mask = np.zeros(tmp.shape[0], dtype=bool)
        mask[subset_indices] = True
        tmp[~mask] = np.nan

        # indices of solutions with one or more objective value exceeding the user bounds.
        bound_mask = np.any(tmp > user_bounds, axis=1)
        tmp[bound_mask] = np.nan

        res = solver.minimize(tmp)

        return res["x"]
Пример #6
0
            if np.all(bad_con_mask):
                raise ScalarSolverException(
                    "None of the supplied vectors adhere to the given "
                    "constraint function.")
            tmp = np.copy(vectors)
            tmp[bad_con_mask] = np.nan
            return np.nanargmin(self._scalarizer(tmp))


if __name__ == "__main__":
    from desdeo_tools.scalarization.ASF import PointMethodASF

    ideal = np.array([0, 0, 0, 0])
    nadir = np.array([1, 1, 1, 1])

    asf = PointMethodASF(nadir, ideal)
    dscalarizer = DiscreteScalarizer(asf, {"reference_point": None})
    dminimizer = DiscreteMinimizer(dscalarizer)

    non_dominated_points = np.array([[0.2, 0.4, 0.6,
                                      0.8], [0.4, 0.2, 0.6, 0.8],
                                     [0.6, 0.4, 0.2, 0.8],
                                     [0.4, 0.8, 0.6, 0.2]])

    z = np.array([0.4, 0.2, 0.6, 0.8])

    dscalarizer._scalarizer_args = {"reference_point": z}

    print(asf(non_dominated_points, reference_point=z))

    res = dminimizer.minimize(non_dominated_points)
Пример #7
0
    def calculate_new_solutions(
        self,
        number_of_solutions: int,
        levels: np.ndarray,
        improve_inds: np.ndarray,
        improve_until_inds: np.ndarray,
        acceptable_inds: np.ndarray,
        impaire_until_inds: np.ndarray,
        free_inds: np.ndarray,
    ) -> Tuple[NimbusSaveRequest, SimplePlotRequest]:
        """Calcualtes new solutions based on classifications supplied by the decision maker by
            solving ASF problems.
        
        Args:
            number_of_solutions (int): Number of solutions, should be between 1 and 4.
            levels (np.ndarray): Aspiration and upper bounds relevant to the some of the classifications.
            improve_inds (np.ndarray): Indices corresponding to the objectives which should be improved.
            improve_until_inds (np.ndarray): Like above, but improved until an aspiration level is reached.
            acceptable_inds (np.ndarray): Indices of objectives which are acceptable as they are now.
            impaire_until_inds (np.ndarray): Indices of objectives which may be impaired until an upper limit is
                reached.
            free_inds (np.ndarray): Indices of objectives which may change freely.
        
        Returns:
            Tuple[NimbusSaveRequest, SimplePlotRequest]: A save request with the newly computed soutions, and 
            a plot request to visualize said solutions.
        """
        results = []

        # always computed
        asf_1 = MaxOfTwoASF(self._nadir, self._ideal, improve_inds,
                            improve_until_inds)

        def cons_1(
            x: np.ndarray,
            f_current: np.ndarray = self._current_objectives,
            levels: np.ndarray = levels,
            improve_until_inds: np.ndarray = improve_until_inds,
            improve_inds: np.ndarray = improve_inds,
            impaire_until_inds: np.ndarray = impaire_until_inds,
        ):
            f = self._problem.evaluate(x).objectives.squeeze()

            res_1 = f_current[improve_inds] - f[improve_inds]
            res_2 = f_current[improve_until_inds] - f[improve_until_inds]
            res_3 = levels[impaire_until_inds] - f_current[impaire_until_inds]

            res = np.hstack((res_1, res_2, res_3))

            if self._problem.n_of_constraints > 0:
                res_prob = self._problem.evaluate(x).constraints.squeeze()

                return np.hstack((res_prob, res))

            else:
                return res

        scalarizer_1 = Scalarizer(
            lambda x: self._problem.evaluate(x).objectives,
            asf_1,
            scalarizer_args={"reference_point": levels},
        )

        solver_1 = ScalarMinimizer(
            scalarizer_1,
            self._problem.get_variable_bounds(),
            cons_1,
            method=self._scalar_method,
        )

        res_1 = solver_1.minimize(self._current_solution)
        results.append(res_1)

        if number_of_solutions > 1:
            # create the reference point needed in the rest of the ASFs
            z_bar = np.zeros(self._problem.n_of_objectives)
            z_bar[improve_inds] = self._ideal[improve_inds]
            z_bar[improve_until_inds] = levels[improve_until_inds]
            z_bar[acceptable_inds] = self._current_objectives[acceptable_inds]
            z_bar[impaire_until_inds] = levels[impaire_until_inds]
            z_bar[free_inds] = self._nadir[free_inds]

            # second ASF
            asf_2 = StomASF(self._ideal)

            # cons_2 can be used in the rest of the ASF scalarizations, it's not a bug!
            if self._problem.n_of_constraints > 0:
                cons_2 = lambda x: self._problem.evaluate(
                    x).constraints.squeeze()
            else:
                cons_2 = None

            scalarizer_2 = Scalarizer(
                lambda x: self._problem.evaluate(x).objectives,
                asf_2,
                scalarizer_args={"reference_point": z_bar},
            )

            solver_2 = ScalarMinimizer(
                scalarizer_2,
                self._problem.get_variable_bounds(),
                cons_2,
                method=self._scalar_method,
            )

            res_2 = solver_2.minimize(self._current_solution)
            results.append(res_2)

        if number_of_solutions > 2:
            # asf 3
            asf_3 = PointMethodASF(self._nadir, self._ideal)

            scalarizer_3 = Scalarizer(
                lambda x: self._problem.evaluate(x).objectives,
                asf_3,
                scalarizer_args={"reference_point": z_bar},
            )

            solver_3 = ScalarMinimizer(
                scalarizer_3,
                self._problem.get_variable_bounds(),
                cons_2,
                method=self._scalar_method,
            )

            res_3 = solver_3.minimize(self._current_solution)
            results.append(res_3)

        if number_of_solutions > 3:
            # asf 4
            asf_4 = AugmentedGuessASF(self._nadir, self._ideal, free_inds)

            scalarizer_4 = Scalarizer(
                lambda x: self._problem.evaluate(x).objectives,
                asf_4,
                scalarizer_args={"reference_point": z_bar},
            )

            solver_4 = ScalarMinimizer(
                scalarizer_4,
                self._problem.get_variable_bounds(),
                cons_2,
                method=self._scalar_method,
            )

            res_4 = solver_4.minimize(self._current_solution)
            results.append(res_4)

        # create the save request
        solutions = [res["x"] for res in results]
        objectives = [
            self._problem.evaluate(x).objectives.squeeze() for x in solutions
        ]

        save_request = NimbusSaveRequest(solutions, objectives)

        msg = "Computed new solutions."
        plot_request = self.create_plot_request(objectives, msg)

        return save_request, plot_request
Пример #8
0
    def compute_intermediate_solutions(
        self,
        solutions: np.ndarray,
        n_desired: int,
    ) -> Tuple[NimbusSaveRequest, SimplePlotRequest]:
        """Computs intermediate solution between two solutions computed earlier.

        Args:
            solutions (np.ndarray): The solutions between which the intermediat solutions should
                be computed.
            n_desired (int): The number of intermediate solutions desired.
        
        Raises:
            NimbusException
        
        Returns:
            Tuple[NimbusSaveRequest, SimplePlotRequest]: A save request with the compured intermediate
            points, and a plot request to visualize said points.
        """
        # vector between the two solutions
        between = solutions[0] - solutions[1]
        norm = np.linalg.norm(between)
        between_norm = between / norm

        # the plus 2 assumes we are interested only in n_desired points BETWEEN the
        # two supplied solutions
        step_size = norm / (2 + n_desired)

        intermediate_points = np.array([
            solutions[1] + i * step_size * between_norm
            for i in range(1, n_desired + 1)
        ])

        # project each of the intermediate solutions to the Pareto front
        intermediate_solutions = np.zeros(intermediate_points.shape)
        intermediate_objectives = np.zeros(
            (n_desired, self._problem.n_of_objectives))
        asf = PointMethodASF(self._nadir, self._ideal)

        for i in range(n_desired):
            scalarizer = Scalarizer(
                lambda x: self._problem.evaluate(x).objectives,
                asf,
                scalarizer_args={
                    "reference_point":
                    self._problem.evaluate(intermediate_points[i]).objectives
                },
            )

            if self._problem.n_of_constraints > 0:
                cons = lambda x: self._problem.evaluate(x).constraints.squeeze(
                )
            else:
                cons = None

            solver = ScalarMinimizer(
                scalarizer,
                self._problem.get_variable_bounds(),
                cons,
                method=self._scalar_method,
            )

            res = solver.minimize(self._current_solution)
            intermediate_solutions[i] = res["x"]
            intermediate_objectives[i] = self._problem.evaluate(
                res["x"]).objectives

        # create appropiate requests
        save_request = NimbusSaveRequest(list(intermediate_solutions),
                                         list(intermediate_objectives))

        msg = "Computed intermediate solutions"
        plot_request = self.create_plot_request(intermediate_objectives, msg)

        return save_request, plot_request
Пример #9
0
def solve_pareto_front_representation_general(
    objective_evaluator: Callable[[np.ndarray], np.ndarray],
    n_of_objectives: int,
    variable_bounds: np.ndarray,
    step: Optional[Union[np.ndarray, float]] = 0.1,
    eps: Optional[float] = 1e-6,
    ideal: Optional[np.ndarray] = None,
    nadir: Optional[np.ndarray] = None,
    constraint_evaluator: Optional[Callable[[np.ndarray], np.ndarray]] = None,
    solver_method: Optional[Union[ScalarMethod, str]] = "scipy_de",
) -> Tuple[np.ndarray, np.ndarray]:
    """Computes a representation of a Pareto efficient front from a
    multiobjective minimizatino problem. Does so by generating an evenly spaced
    set of reference points (in the objective space), in the space spanned by
    the supplied ideal and nadir points. The generated reference points are
    then used to formulate achievement scalaraization problems, which when
    solved, yield a representation of a Pareto efficient solution. The result
    is guaranteed to contain only non-dominated solutions.
    
    Args:
        objective_evaluator (Callable[[np.ndarray], np.ndarray]): A vector
            valued function returning objective values given an array of decision
            variables.
        n_of_objectives (int): Numbr of objectives returned by
            objective_evaluator.
        variable_bounds (np.ndarray): The upper and lower bounds of the
            decision variables. Bound for each variable should be on the rows,
            with the first column containing lower bounds, and the second column
            upper bounds. Use np.inf to indicate no bounds.
        step (Optional[Union[np.ndarray, float]], optional): Etiher an float
            or an array of floats. If a single float is given, generates
            reference points with the objectives having values a step apart
            between the ideal and nadir points. If an array of floats is given,
            use the steps defined in the array for each objective's values.
            Default to 0.1.
        eps (Optional[float], optional): An offset to be added to the nadir
            value to keep the nadir inside the range when generating reference
            points. Defaults to 1e-6.
        ideal (Optional[np.ndarray], optional): The ideal point of the
            problem being solved. Defaults to None.
        nadir (Optional[np.ndarray], optional): The nadir point of the
            problem being solved. Defaults to None.
        constraint_evaluator (Optional[Callable[[np.ndarray], np.ndarray]], optional):
            An evaluator returning values for the constraints defined
            for the problem. A negative value for a constraint indicates a breach
            of that constraint. Defaults to None.
        solver_method (Optional[Union[ScalarMethod, str]], optional): The
            method used to minimize the achievement scalarization problems
            arising when calculating Pareto efficient solutions. Defaults to
            "scipy_de".

    Raises:
        MCDMUtilityException: Mismatching sizes of the supplied ideal and
        nadir points between the step, when step is an array. Or the type of
        step is something else than np.ndarray of float.
    
    Returns:
        Tuple[np.ndarray, np.ndarray]: A tuple containing representationns of
        the Pareto optimal variable values, and the corresponsing objective
        values.

    Note:
        The objective evaluator should be defined such that minimization is
        expected in each of the objectives.
    """
    if ideal is None or nadir is None:
        # compure ideal and nadir using payoff table
        ideal, nadir = payoff_table_method_general(
            objective_evaluator,
            n_of_objectives,
            variable_bounds,
            constraint_evaluator,
        )

    # use ASF to (almost) guarantee Pareto optimality.
    asf = PointMethodASF(nadir, ideal)

    scalarizer = Scalarizer(objective_evaluator,
                            asf,
                            scalarizer_args={"reference_point": None})
    solver = ScalarMinimizer(scalarizer,
                             bounds=variable_bounds,
                             method=solver_method)

    # bounds to be used to compute slices
    stacked = np.stack((ideal, nadir)).T
    lower_slice_b, upper_slice_b = np.min(stacked, axis=1), np.max(stacked,
                                                                   axis=1)

    if type(step) is float:
        slices = [
            slice(start, stop + eps, step)
            for (start, stop) in zip(lower_slice_b, upper_slice_b)
        ]

    elif type(step) is np.ndarray:
        if not ideal.shape == nadir.shape == step.shape:
            raise MCDMUtilityException(
                "The shapes of the supplied step array does not match the "
                "shape of the ideal and nadir points.")
        slices = [
            slice(start, stop + eps, s)
            for (start, stop, s) in zip(lower_slice_b, upper_slice_b, step)
        ]

    else:
        raise MCDMUtilityException(
            "step must be either a numpy array or an float.")

    z_mesh = np.mgrid[slices].reshape(len(ideal), -1).T

    p_front_objectives = np.zeros(z_mesh.shape)
    p_front_variables = np.zeros(
        (len(p_front_objectives), len(variable_bounds.squeeze())))

    for i, z in enumerate(z_mesh):
        scalarizer._scalarizer_args = {"reference_point": z}
        res = solver.minimize(None)

        if not res["success"]:
            print("Non successfull optimization")
            p_front_objectives[i] = np.nan
            p_front_variables[i] = np.nan
            continue

        # check for dominance, accept only non-dominated solutions
        f_i = objective_evaluator(res["x"])
        if not np.all(f_i > p_front_objectives[:i]
                      [~np.all(np.isnan(p_front_objectives[:i]), axis=1)]):
            p_front_objectives[i] = f_i
            p_front_variables[i] = res["x"]
        elif i < 1:
            p_front_objectives[i] = f_i
            p_front_variables[i] = res["x"]
        else:
            p_front_objectives[i] = np.nan
            p_front_variables[i] = np.nan

    return (
        p_front_variables[~np.all(np.isnan(p_front_variables), axis=1)],
        p_front_objectives[~np.all(np.isnan(p_front_objectives), axis=1)],
    )
Пример #10
0
    dscalarizer = DiscreteScalarizer(lambda x: np.sum(x, axis=1))
    res_1d = dscalarizer(vector)

    assert np.array_equal(res_1d, [6.0])


def test_discrete_args():
    vectors = np.array([[1, 1, 1], [2, 2, 2], [4, 5, 6.0]])
    dscalarizer = DiscreteScalarizer(
        lambda x, a=1: a * np.sum(x, axis=1), scalarizer_args={"a": 2}
    )
    res = dscalarizer(vectors)

    assert np.array_equal(res, [6, 12, 30])


if __name__ == "__main__":
    asf = PointMethodASF(np.array([10, 10, 10]), np.array([-10, -10, -10]))
    ref = np.atleast_2d([2.5, 2.5, 2.5])
    scalarizer = Scalarizer(
        simple_vector_valued_fun, asf, scalarizer_args={"reference_point": ref}
    )

    res = scalarizer.evaluate(np.atleast_2d([2, 1, 1, 1]))
    print(res)

    asf.nadir = np.array([9, 9, 9])

    res = scalarizer.evaluate(np.atleast_2d([2, 1, 1, 1]))
    print(res)