Пример #1
0
    def _validate_optimization_config(
        objective: Objective,
        outcome_constraints: Optional[List[OutcomeConstraint]] = None,
    ) -> None:
        """Ensure outcome constraints are valid.

        Either one or two outcome constraints can reference one metric.
        If there are two constraints, they must have different 'ops': one
            LEQ and one GEQ.
        If there are two constraints, the bound of the GEQ op must be less
            than the bound of the LEQ op.

        Args:
            outcome_constraints: Constraints to validate.
        """
        if type(objective) == MultiObjective:
            # Raise error on exact equality; scalarizedObjective is OK
            raise ValueError(
                (
                    "OptimizationConfig does not support MultiObjective. "
                    "Use MultiObjectiveOptimizationConfig instead."
                )
            )
        outcome_constraints = outcome_constraints or []
        # only vaidate outcome_constraints
        outcome_constraints = [
            constraint
            for constraint in outcome_constraints
            if isinstance(constraint, ScalarizedOutcomeConstraint) is False
        ]
        unconstrainable_metrics = objective.get_unconstrainable_metrics()
        OptimizationConfig._validate_outcome_constraints(
            unconstrainable_metrics=unconstrainable_metrics,
            outcome_constraints=outcome_constraints,
        )
Пример #2
0
    def _validate_optimization_config(
        objective: Objective,
        outcome_constraints: Optional[List[OutcomeConstraint]] = None,
        objective_thresholds: Optional[List[ObjectiveThreshold]] = None,
    ) -> None:
        """Ensure outcome constraints are valid.

        Either one or two outcome constraints can reference one metric.
        If there are two constraints, they must have different 'ops': one
            LEQ and one GEQ.
        If there are two constraints, the bound of the GEQ op must be less
            than the bound of the LEQ op.

        Args:
            outcome_constraints: Constraints to validate.
        """
        if not isinstance(objective, MultiObjective):
            raise ValueError(
                (
                    "MultiObjectiveOptimizationConfig only not supports "
                    " MultiObjective. Use OptimizationConfig instead."
                )
            )
        outcome_constraints = outcome_constraints or []
        objective_thresholds = objective_thresholds or []

        # Verify we aren't optimizing too many objectives.
        objective_metrics_by_name = {
            metric.name: metric for metric in objective.metrics
        }
        if len(objective_metrics_by_name) > MAX_OBJECTIVES:
            raise ValueError(
                f"Objective: {objective} optimizes more than the maximum allowed "
                f"{MAX_OBJECTIVES} metrics."
            )
        # Warn if thresholds on objective_metrics bound from the wrong direction.
        for threshold in objective_thresholds:
            metric_name = threshold.metric.name
            if metric_name in objective_metrics_by_name:
                lower_is_better = threshold.metric.lower_is_better
                bounded_above = threshold.op == ComparisonOp.LEQ
                is_aligned = lower_is_better == bounded_above
                if not (is_aligned or lower_is_better is None):
                    raise ValueError(
                        make_wrong_direction_warning(
                            metric_name=metric_name,
                            bounded_above=bounded_above,
                            lower_is_better=lower_is_better,
                        )
                    )

        unconstrainable_metrics = objective.get_unconstrainable_metrics()
        OptimizationConfig._validate_outcome_constraints(
            unconstrainable_metrics=unconstrainable_metrics,
            outcome_constraints=outcome_constraints,
        )
Пример #3
0
    def _validate_optimization_config(
        objective: Objective,
        outcome_constraints: Optional[List[OutcomeConstraint]] = None,
        objective_thresholds: Optional[List[ObjectiveThreshold]] = None,
    ) -> None:
        """Ensure outcome constraints are valid.

        Either one or two outcome constraints can reference one metric.
        If there are two constraints, they must have different 'ops': one
            LEQ and one GEQ.
        If there are two constraints, the bound of the GEQ op must be less
            than the bound of the LEQ op.

        Args:
            outcome_constraints: Constraints to validate.
        """
        if not isinstance(objective, MultiObjective):
            raise TypeError(
                (
                    "`MultiObjectiveOptimizationConfig` requires an objective "
                    "of type `MultiObjective`. Use `OptimizationConfig` instead "
                    "if using a single-metric objective."
                )
            )
        outcome_constraints = outcome_constraints or []
        objective_thresholds = objective_thresholds or []

        # Verify we aren't optimizing too many objectives.
        objective_metrics_by_name = {
            metric.name: metric for metric in objective.metrics
        }
        # Warn if thresholds on objective_metrics bound from the wrong direction.
        for threshold in objective_thresholds:
            metric_name = threshold.metric.name
            if metric_name in objective_metrics_by_name:
                lower_is_better = threshold.metric.lower_is_better
                bounded_above = threshold.op == ComparisonOp.LEQ
                is_aligned = lower_is_better == bounded_above
                if not (is_aligned or lower_is_better is None):
                    raise ValueError(
                        make_wrong_direction_warning(
                            metric_name=metric_name,
                            bounded_above=bounded_above,
                            lower_is_better=lower_is_better,
                        )
                    )

        unconstrainable_metrics = objective.get_unconstrainable_metrics()
        OptimizationConfig._validate_outcome_constraints(
            unconstrainable_metrics=unconstrainable_metrics,
            outcome_constraints=outcome_constraints,
        )
Пример #4
0
    def _validate_optimization_config(
            objective: Objective,
            outcome_constraints: List[OutcomeConstraint]) -> None:
        """Ensure outcome constraints are valid.

        Either one or two outcome constraints can reference one metric.
        If there are two constraints, they must have different 'ops': one
            LEQ and one GEQ.
        If there are two constraints, the bound of the GEQ op must be less
            than the bound of the LEQ op.

        Args:
            outcome_constraints: Constraints to validate.
        """
        constraint_metrics = [
            constraint.metric.name for constraint in outcome_constraints
        ]
        unconstrainable_metrics = objective.get_unconstrainable_metrics()
        for metric in unconstrainable_metrics:
            if metric.name in constraint_metrics:
                raise ValueError("Cannot constrain on objective metric.")

        def get_metric_name(oc: OutcomeConstraint) -> str:
            return oc.metric.name

        sorted_constraints = sorted(outcome_constraints, key=get_metric_name)
        for metric_name, constraints_itr in groupby(sorted_constraints,
                                                    get_metric_name):
            constraints: List[OutcomeConstraint] = list(constraints_itr)
            constraints_len = len(constraints)
            if constraints_len == 2:
                if constraints[0].op == constraints[1].op:
                    raise ValueError(
                        f"Duplicate outcome constraints {metric_name}")
                lower_bound_idx = 0 if constraints[
                    0].op == ComparisonOp.GEQ else 1
                upper_bound_idx = 1 - lower_bound_idx
                lower_bound = constraints[lower_bound_idx].bound
                upper_bound = constraints[upper_bound_idx].bound
                if lower_bound >= upper_bound:
                    raise ValueError(
                        f"Lower bound {lower_bound} is >= upper bound " +
                        f"{upper_bound} for {metric_name}")
            elif constraints_len > 2:
                raise ValueError(
                    f"Duplicate outcome constraints {metric_name}")
Пример #5
0
    def _validate_optimization_config(
        objective: Objective,
        outcome_constraints: Optional[List[OutcomeConstraint]] = None,
        objective_thresholds: Optional[List[ObjectiveThreshold]] = None,
    ) -> None:
        """Ensure outcome constraints are valid.

        Either one or two outcome constraints can reference one metric.
        If there are two constraints, they must have different 'ops': one
            LEQ and one GEQ.
        If there are two constraints, the bound of the GEQ op must be less
            than the bound of the LEQ op.

        Args:
            outcome_constraints: Constraints to validate.
        """
        if not isinstance(objective, (MultiObjective, ScalarizedObjective)):
            raise TypeError(
                ("`MultiObjectiveOptimizationConfig` requires an objective "
                 "of type `MultiObjective` or `ScalarizedObjective`. "
                 "Use `OptimizationConfig` instead if using a "
                 "single-metric objective."))
        outcome_constraints = outcome_constraints or []
        objective_thresholds = objective_thresholds or []
        if isinstance(objective, MultiObjective):
            objectives_by_name = {
                obj.metric.name: obj
                for obj in objective.objectives
            }
            check_objective_thresholds_match_objectives(
                objectives_by_name=objectives_by_name,
                objective_thresholds=objective_thresholds,
            )

        unconstrainable_metrics = objective.get_unconstrainable_metrics()
        OptimizationConfig._validate_outcome_constraints(
            unconstrainable_metrics=unconstrainable_metrics,
            outcome_constraints=outcome_constraints,
        )
Пример #6
0
class ObjectiveTest(TestCase):
    def setUp(self):
        self.metrics = {
            "m1": Metric(name="m1"),
            "m2": Metric(name="m2", lower_is_better=True),
            "m3": Metric(name="m3", lower_is_better=False),
        }
        self.objective = Objective(metric=self.metrics["m1"], minimize=False)
        self.multi_objective = MultiObjective(metrics=[
            self.metrics["m1"], self.metrics["m2"], self.metrics["m3"]
        ])
        self.scalarized_objective = ScalarizedObjective(
            metrics=[self.metrics["m1"], self.metrics["m2"]])

    def testInit(self):
        with self.assertRaises(ValueError):
            ScalarizedObjective(
                metrics=[self.metrics["m1"], self.metrics["m2"]],
                weights=[1.0])
        warnings.resetwarnings()
        warnings.simplefilter("always", append=True)
        with warnings.catch_warnings(record=True) as ws:
            Objective(metric=self.metrics["m1"])
            self.assertTrue(
                any(issubclass(w.category, DeprecationWarning) for w in ws))
            self.assertTrue(
                any("Defaulting to `minimize=False`" in str(w.message)
                    for w in ws))
        with warnings.catch_warnings(record=True) as ws:
            Objective(Metric(name="m4", lower_is_better=True), minimize=False)
            self.assertTrue(
                any("Attempting to maximize" in str(w.message) for w in ws))
        with warnings.catch_warnings(record=True) as ws:
            Objective(Metric(name="m4", lower_is_better=False), minimize=True)
            self.assertTrue(
                any("Attempting to minimize" in str(w.message) for w in ws))
        self.assertEqual(self.objective.get_unconstrainable_metrics(),
                         [self.metrics["m1"]])

    def testMultiObjective(self):
        with self.assertRaises(NotImplementedError):
            return self.multi_objective.metric

        self.assertEqual(self.multi_objective.metrics,
                         list(self.metrics.values()))
        weights = [mw[1] for mw in self.multi_objective.metric_weights]
        self.assertEqual(weights, [1.0, -1.0, 1.0])
        self.assertEqual(self.multi_objective.clone(), self.multi_objective)
        self.assertEqual(
            str(self.multi_objective),
            "MultiObjective(metric_names=['m1', 'm2', 'm3'], minimize=False)",
        )
        self.assertEqual(self.multi_objective.get_unconstrainable_metrics(),
                         [])

    def testScalarizedObjective(self):
        with self.assertRaises(NotImplementedError):
            return self.scalarized_objective.metric

        self.assertEqual(self.scalarized_objective.metrics,
                         [self.metrics["m1"], self.metrics["m2"]])
        weights = [mw[1] for mw in self.scalarized_objective.metric_weights]
        self.assertEqual(weights, [1.0, 1.0])
        self.assertEqual(self.scalarized_objective.clone(),
                         self.scalarized_objective)
        self.assertEqual(
            str(self.scalarized_objective),
            ("ScalarizedObjective(metric_names=['m1', 'm2'], weights=[1.0, 1.0], "
             "minimize=False)"),
        )
        self.assertEqual(
            self.scalarized_objective.get_unconstrainable_metrics(), [])