Пример #1
0
    def test_local_upper_bounds_utils(self):
        for dtype in (torch.float, torch.double):
            U = self.U.to(dtype=dtype)
            Z = self.Z.to(dtype=dtype)
            pareto_Y = self.pareto_Y.to(dtype=dtype)
            expected_U = self.expected_U_after_update.to(dtype=dtype)
            expected_Z = self.expected_Z_after_update.to(dtype=dtype)

            # test z dominates U
            U_new, Z_new = compute_local_upper_bounds(U=U,
                                                      Z=Z,
                                                      z=-self.ref_point + 1)
            self.assertTrue(torch.equal(U_new, U))
            self.assertTrue(torch.equal(Z_new, Z))

            # test compute_local_upper_bounds
            for i in range(pareto_Y.shape[0]):
                U, Z = compute_local_upper_bounds(U=U, Z=Z, z=-pareto_Y[i])
            self.assertTrue(torch.equal(U, expected_U))
            self.assertTrue(torch.equal(Z, expected_Z))

            # test update_local_upper_bounds_incremental
            # test that calling update_local_upper_bounds_incremental once with
            # the entire Pareto set yields the same result
            U2, Z2 = update_local_upper_bounds_incremental(
                new_pareto_Y=-pareto_Y,
                U=self.U.to(dtype=dtype),
                Z=self.Z.to(dtype=dtype),
            )
            self.assertTrue(torch.equal(U2, expected_U))
            self.assertTrue(torch.equal(Z2, expected_Z))
Пример #2
0
    def _get_partitioning(self) -> None:
        r"""Compute non-dominated partitioning.

        Given local upper bounds for the minimization problem (self._U), this computes
        the non-dominated partitioning for the maximization problem. Note that
        -self.U contains the local lower bounds for the maximization problem. Following
        [Yang2019]_, this treats -self.U as a *new* pareto frontier for a minimization
        problem with a reference point of [infinity]^m and computes a dominated
        partitioning for this minimization problem.
        """
        new_ref_point = torch.full(
            torch.Size([1]) + self._neg_ref_point.shape,
            float("inf"),
            dtype=self._neg_ref_point.dtype,
            device=self._neg_ref_point.device,
        )
        # initialize local upper bounds for the second minimization problem
        self.register_buffer("_U2", new_ref_point)
        # initialize defining points for the second minimization problem
        # use ref point for maximization as the ideal point for minimization.
        self._Z2 = self.ref_point.expand(1, self.num_outcomes,
                                         self.num_outcomes).clone()
        for j in range(self._neg_ref_point.shape[-1]):
            self._Z2[0, j, j] = self._U2[0, j]
        # incrementally update local upper bounds and defining points
        # for each new Pareto point
        self._U2, self._Z2 = update_local_upper_bounds_incremental(
            new_pareto_Y=-self._U,
            U=self._U2,
            Z=self._Z2,
        )
        cell_bounds = get_partition_bounds(Z=self._Z2,
                                           U=self._U2,
                                           ref_point=new_ref_point.view(-1))
        self.register_buffer("hypercell_bounds", cell_bounds)
Пример #3
0
    def _partition_space(self):
        r"""Partition the non-dominated space into disjoint hypercells.

        This method supports an arbitrary number of outcomes, but is
        less efficient than `partition_space_2d` for the 2-outcome case.
        """
        if len(self.batch_shape) > 0:
            # this could be triggered when m=2 outcomes and
            # BoxDecomposition._partition_space_2d is not overridden.
            raise NotImplementedError(
                "_partition_space does not support batch dimensions."
            )
        # this assumes minimization
        # initialize local upper bounds
        self.register_buffer("_U", self._neg_ref_point.unsqueeze(-2).clone())
        # initialize defining points to be the dummy points \hat{z} that are
        # defined in Sec 2.1 in [Lacour17]_. Note that in [Lacour17]_, outcomes
        # are assumed to be between [0,1], so they used 0 rather than -inf.
        self._Z = torch.zeros(
            1,
            self.num_outcomes,
            self.num_outcomes,
            dtype=self.Y.dtype,
            device=self.Y.device,
        )
        for j in range(self.ref_point.shape[-1]):
            # use ref point for maximization as the ideal point for minimization.
            self._Z[0, j] = float("-inf")
            self._Z[0, j, j] = self._U[0, j]
        # incrementally update local upper bounds and defining points
        # for each new Pareto point
        self._U, self._Z = update_local_upper_bounds_incremental(
            new_pareto_Y=self._neg_pareto_Y,
            U=self._U,
            Z=self._Z,
        )
        self._get_partitioning()
Пример #4
0
    def update(self, Y: Tensor) -> None:
        r"""Update non-dominated front and decomposition.

        Args:
            Y: A `(batch_shape) x n x m`-dim tensor of new, incremental outcomes.
        """
        if self._update_neg_Y(Y=Y):
            self.reset()
        else:
            if self.num_outcomes == 2 or self._neg_pareto_Y.shape[-2] == 0:
                # If there are two objective, recompute the box decomposition
                # because the partitions can be computed analytically.
                # If the current pareto set has no points, recompute the box
                # decomposition.
                self.reset()
            else:
                # only include points that are better than the reference point
                better_than_ref = (Y > self.ref_point).all(dim=-1)
                Y = Y[better_than_ref]
                Y_all = torch.cat([self._neg_pareto_Y, -Y], dim=-2)
                pareto_mask = is_non_dominated(-Y_all)
                # determine the number of points in Y that are Pareto optimal
                num_new_pareto = pareto_mask[-Y.shape[-2] :].sum()
                self._neg_pareto_Y = Y_all[pareto_mask]
                if num_new_pareto > 0:
                    # update local upper bounds for the minimization problem
                    self._U, self._Z = update_local_upper_bounds_incremental(
                        # this assumes minimization
                        new_pareto_Y=self._neg_pareto_Y[-num_new_pareto:],
                        U=self._U,
                        Z=self._Z,
                    )
                    # use the negative local upper bounds as the new pareto
                    # frontier for the minimization problem and perform
                    # box decomposition on dominated space.
                    self._get_partitioning()