Пример #1
0
 def setUp(self):
     self.metrics = {
         "m1": Metric(name="m1", lower_is_better=True),
         "m2": Metric(name="m2", lower_is_better=False),
         "m3": Metric(name="m3", lower_is_better=False),
     }
     self.objective = Objective(metric=self.metrics["m1"], minimize=False)
     self.multi_objective = MultiObjective(
         metrics=[self.metrics["m1"], self.metrics["m2"]])
     self.multi_objective_just_m2 = MultiObjective(
         metrics=[self.metrics["m2"]])
     self.outcome_constraint = OutcomeConstraint(metric=self.metrics["m2"],
                                                 op=ComparisonOp.GEQ,
                                                 bound=-0.25)
     self.additional_outcome_constraint = OutcomeConstraint(
         metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.25)
     self.outcome_constraints = [
         self.outcome_constraint,
         self.additional_outcome_constraint,
     ]
     self.objective_thresholds = [
         ObjectiveThreshold(metric=self.metrics["m2"],
                            bound=-1.0,
                            relative=False)
     ]
     self.m1_constraint = OutcomeConstraint(metric=self.metrics["m1"],
                                            op=ComparisonOp.LEQ,
                                            bound=0.1,
                                            relative=True)
     self.m3_constraint = OutcomeConstraint(metric=self.metrics["m3"],
                                            op=ComparisonOp.GEQ,
                                            bound=0.1,
                                            relative=True)
Пример #2
0
 def setUp(self):
     self.metrics = {
         "m1": Metric(name="m1"),
         "m2": Metric(name="m2", lower_is_better=True),
         "m3": Metric(name="m3", lower_is_better=False),
     }
     self.objective = Objective(metric=self.metrics["m1"], minimize=False)
     self.multi_objective = MultiObjective(metrics=[
         self.metrics["m1"], self.metrics["m2"], self.metrics["m3"]
     ])
     self.scalarized_objective = ScalarizedObjective(
         metrics=[self.metrics["m1"], self.metrics["m2"]])
Пример #3
0
    def test_extract_objective_thresholds(self):
        outcomes = ["m1", "m2", "m3", "m4"]
        objective = MultiObjective(metrics=[Metric(name) for name in outcomes[:3]])
        objective_thresholds = [
            ObjectiveThreshold(
                metric=Metric(name), op=ComparisonOp.LEQ, bound=float(i + 2)
            )
            for i, name in enumerate(outcomes[:3])
        ]

        # None of no thresholds
        self.assertIsNone(
            extract_objective_thresholds(
                objective_thresholds=[], objective=objective, outcomes=outcomes
            )
        )

        # Working case
        obj_t = extract_objective_thresholds(
            objective_thresholds=objective_thresholds,
            objective=objective,
            outcomes=outcomes,
        )
        self.assertTrue(np.array_equal(obj_t, np.array([2.0, 3.0, 4.0, 0.0])))

        # Fails if threshold not provided for all objective metrics
        with self.assertRaises(ValueError):
            extract_objective_thresholds(
                objective_thresholds=objective_thresholds[:2],
                objective=objective,
                outcomes=outcomes,
            )

        # Fails if number of thresholds doesn't equal number of objectives
        objective2 = Objective(Metric("m1"))
        with self.assertRaises(ValueError):
            extract_objective_thresholds(
                objective_thresholds=objective_thresholds,
                objective=objective2,
                outcomes=outcomes,
            )

        # Works with a single objective, single threshold
        obj_t = extract_objective_thresholds(
            objective_thresholds=objective_thresholds[:1],
            objective=objective2,
            outcomes=outcomes,
        )
        self.assertTrue(np.array_equal(obj_t, np.array([2.0, 0.0, 0.0, 0.0])))

        # Fails if relative
        objective_thresholds[2] = ObjectiveThreshold(
            metric=Metric("m3"), op=ComparisonOp.LEQ, bound=3, relative=True
        )
        with self.assertRaises(ValueError):
            extract_objective_thresholds(
                objective_thresholds=objective_thresholds,
                objective=objective,
                outcomes=outcomes,
            )
Пример #4
0
def optimization_config_from_objectives(
    objectives: List[Objective],
    objective_thresholds: List[ObjectiveThreshold],
    outcome_constraints: List[OutcomeConstraint],
) -> OptimizationConfig:
    """Parse objectives and constraints to define optimization config.

    The resulting optimization config will be regular single-objective config
    if `objectives` is a list of one element and a multi-objective config
    otherwise.

    NOTE: If passing in multiple objectives, `objective_thresholds` must be a
    non-empty list definining constraints for each objective.
    """
    if len(objectives) == 1:
        if objective_thresholds:
            raise ValueError(
                "Single-objective optimizations must not specify objective thresholds."
            )
        return OptimizationConfig(
            objective=objectives[0],
            outcome_constraints=outcome_constraints,
        )

    if not objective_thresholds:
        logger.info(
            "Due to non-specification, we will use the heuristic for selecting "
            "objective thresholds.")

    return MultiObjectiveOptimizationConfig(
        objective=MultiObjective(objectives=objectives),
        outcome_constraints=outcome_constraints,
        objective_thresholds=objective_thresholds,
    )
Пример #5
0
    def testMultiObjectiveBackwardsCompatibility(self):
        multi_objective = MultiObjective(metrics=[
            self.metrics["m1"], self.metrics["m2"], self.metrics["m3"]
        ])
        minimizes = [obj.minimize for obj in multi_objective.objectives]
        self.assertEqual(multi_objective.metrics, list(self.metrics.values()))
        self.assertEqual(minimizes, [False, True, False])

        multi_objective_min = MultiObjective(
            metrics=[
                self.metrics["m1"], self.metrics["m2"], self.metrics["m3"]
            ],
            minimize=True,
        )
        minimizes = [obj.minimize for obj in multi_objective_min.objectives]
        self.assertEqual(minimizes, [True, False, True])
Пример #6
0
 def setUp(self):
     self.metrics = {"m1": Metric(name="m1"), "m2": Metric(name="m2")}
     self.objective = Objective(metric=self.metrics["m1"], minimize=False)
     self.alt_objective = Objective(metric=self.metrics["m2"],
                                    minimize=False)
     self.multi_objective = MultiObjective(
         metrics=[self.metrics["m1"], self.metrics["m2"]])
     self.m2_objective = ScalarizedObjective(
         metrics=[self.metrics["m1"], self.metrics["m2"]])
     self.outcome_constraint = OutcomeConstraint(metric=self.metrics["m2"],
                                                 op=ComparisonOp.GEQ,
                                                 bound=-0.25)
     self.additional_outcome_constraint = OutcomeConstraint(
         metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.25)
     self.scalarized_outcome_constraint = ScalarizedOutcomeConstraint(
         metrics=[self.metrics["m1"], self.metrics["m2"]],
         weights=[0.5, 0.5],
         op=ComparisonOp.GEQ,
         bound=-0.25,
     )
     self.outcome_constraints = [
         self.outcome_constraint,
         self.additional_outcome_constraint,
         self.scalarized_outcome_constraint,
     ]
Пример #7
0
 def testTransformOptimizationConfigMOO(self):
     m1 = Metric(name="m1", lower_is_better=False)
     m2 = Metric(name="m2", lower_is_better=True)
     mo = MultiObjective(
         objectives=[
             Objective(metric=m1, minimize=False),
             Objective(metric=m2, minimize=True),
         ],
     )
     objective_thresholds = [
         ObjectiveThreshold(metric=m1, bound=1.234, relative=False),
         ObjectiveThreshold(metric=m2, bound=3.456, relative=False),
     ]
     oc = MultiObjectiveOptimizationConfig(
         objective=mo,
         objective_thresholds=objective_thresholds,
     )
     tf = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     oc.objective_thresholds[0].bound = math.log(1.234)
     self.assertEqual(oc_tf, oc)
Пример #8
0
def get_branin_multi_objective() -> Objective:
    return MultiObjective(
        objectives=[
            Objective(metric=get_branin_metric(name="branin_a")),
            Objective(metric=get_branin_metric(name="branin_b")),
        ],
    )
Пример #9
0
def get_multi_objective() -> Objective:
    return MultiObjective(
        objectives=[
            Objective(metric=Metric(name="m1")),
            Objective(metric=Metric(name="m3", lower_is_better=True), minimize=True),
        ],
    )
Пример #10
0
def get_branin_multi_objective() -> Objective:
    return MultiObjective(
        metrics=[
            get_branin_metric(name="branin_a"),
            get_branin_metric(name="branin_b"),
        ],
        minimize=False,
    )
Пример #11
0
def get_branin_multi_objective(num_objectives: int = 2) -> Objective:
    _validate_num_objectives(num_objectives=num_objectives)
    objectives = [
        Objective(metric=get_branin_metric(name="branin_a")),
        Objective(metric=get_branin_metric(name="branin_b")),
    ]
    if num_objectives == 3:
        objectives.append(Objective(metric=get_branin_metric(name="branin_c")))
    return MultiObjective(objectives=objectives)
Пример #12
0
 def test_feasible_hypervolume(self):
     ma = Metric(name="a", lower_is_better=False)
     mb = Metric(name="b", lower_is_better=True)
     mc = Metric(name="c", lower_is_better=False)
     optimization_config = MultiObjectiveOptimizationConfig(
         objective=MultiObjective(metrics=[ma, mb]),
         outcome_constraints=[
             OutcomeConstraint(
                 mc,
                 op=ComparisonOp.GEQ,
                 bound=0,
                 relative=False,
             )
         ],
         objective_thresholds=[
             ObjectiveThreshold(
                 ma,
                 bound=1.0,
             ),
             ObjectiveThreshold(
                 mb,
                 bound=1.0,
             ),
         ],
     )
     feas_hv = feasible_hypervolume(
         optimization_config,
         values={
             "a": np.array(
                 [
                     1.0,
                     3.0,
                     2.0,
                     2.0,
                 ]
             ),
             "b": np.array(
                 [
                     0.0,
                     1.0,
                     0.0,
                     0.0,
                 ]
             ),
             "c": np.array(
                 [
                     0.0,
                     -0.0,
                     1.0,
                     -2.0,
                 ]
             ),
         },
     )
     self.assertEqual(list(feas_hv), [0.0, 0.0, 1.0, 1.0])
Пример #13
0
 def test_MOO_with_more_outcomes_than_thresholds(self):
     experiment = get_branin_experiment_with_multi_objective(
         has_optimization_config=False)
     metric_c = Metric(name="c", lower_is_better=False)
     metric_a = Metric(name="a", lower_is_better=False)
     objective_thresholds = [
         ObjectiveThreshold(
             metric=metric_c,
             bound=2.0,
             relative=False,
         ),
         ObjectiveThreshold(
             metric=metric_a,
             bound=1.0,
             relative=False,
         ),
     ]
     experiment.optimization_config = MultiObjectiveOptimizationConfig(
         objective=MultiObjective(objectives=[
             Objective(metric=metric_a),
             Objective(metric=metric_c),
         ]),
         objective_thresholds=objective_thresholds,
     )
     experiment.add_tracking_metric(Metric(name="b", lower_is_better=False))
     sobol = get_sobol(search_space=experiment.search_space, )
     sobol_run = sobol.gen(1)
     experiment.new_batch_trial().add_generator_run(
         sobol_run).run().mark_completed()
     data = Data(
         pd.DataFrame(
             data={
                 "arm_name": ["0_0", "0_0", "0_0"],
                 "metric_name": ["a", "b", "c"],
                 "mean": [1.0, 2.0, 3.0],
                 "trial_index": [0, 0, 0],
                 "sem": [0, 0, 0],
             }))
     test_names_to_fns = {
         "MOO_NEHVI": get_MOO_NEHVI,
         "MOO_EHVI": get_MOO_NEHVI,
         "MOO_PAREGO": get_MOO_PAREGO,
         "MOO_RS": get_MOO_RS,
     }
     for test_name, factory_fn in test_names_to_fns.items():
         with self.subTest(test_name):
             moo_model = factory_fn(
                 experiment=experiment,
                 data=data,
             )
             moo_gr = moo_model.gen(n=1)
             obj_t = moo_gr.gen_metadata["objective_thresholds"]
             self.assertEqual(obj_t[0], objective_thresholds[1])
             self.assertEqual(obj_t[1], objective_thresholds[0])
             self.assertEqual(len(obj_t), 2)
    def test_transform_ref_point(self, _mock_fit, _mock_predict, _mock_unwrap):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=False)
        metrics = exp.optimization_config.objective.metrics
        ref_point = {metrics[0].name: 0.0, metrics[1].name: 0.0}
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point=ref_point,
        )
        self.assertIsNone(modelbridge._transformed_ref_point)
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True)
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials))
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point=ref_point,
        )
        self.assertIsNotNone(modelbridge._transformed_ref_point)
        self.assertEqual(2, len(modelbridge._transformed_ref_point))

        mixed_objective_constraints_optimization_config = OptimizationConfig(
            objective=MultiObjective(
                metrics=[get_branin_metric(name="branin_b")], minimize=False),
            outcome_constraints=[
                OutcomeConstraint(metric=Metric(name="branin_a"),
                                  op=ComparisonOp.LEQ,
                                  bound=1)
            ],
        )
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=mixed_objective_constraints_optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point={"branin_b": 0.0},
        )
        self.assertEqual({"branin_a", "branin_b"}, modelbridge._metric_names)
        self.assertEqual(["branin_b"], modelbridge._objective_metric_names)
        self.assertIsNotNone(modelbridge._transformed_ref_point)
        self.assertEqual(1, len(modelbridge._transformed_ref_point))
Пример #15
0
    def get_experiment(self):
        """ Creates the experiment defining the metrics and the configuration"""
        metric_list = [
            AccuracyMetric(self.epochs,
                           name="error",
                           pruning=self.pruning,
                           datasets=self.datasets,
                           classes=self.classes,
                           net=self.net,
                           quant_scheme=self.quant_scheme,
                           quant_params=self.quant_params,
                           collate_fn=self.collate_fn,
                           splitter=self.splitter,
                           models_path=self.models_path,
                           cuda=self.cuda,
                           trainer=self.trainer),
            WeightMetric(name="weight",
                         datasets=self.datasets,
                         classes=self.classes,
                         net=self.net,
                         collate_fn=self.collate_fn,
                         splitter=self.splitter,
                         trainer=self.trainer),
            FeatureMapMetric(name="ram",
                             datasets=self.datasets,
                             classes=self.classes,
                             net=self.net,
                             collate_fn=self.collate_fn,
                             splitter=self.splitter,
                             trainer=self.trainer),
            LatencyMetric(name="latency",
                          datasets=self.datasets,
                          classes=self.classes,
                          net=self.net,
                          flops_capacity=self.flops,
                          collate_fn=self.collate_fn,
                          splitter=self.splitter,
                          trainer=self.trainer),
        ]
        experiment = Experiment(name="experiment_building_blocks",
                                search_space=self.search_space)
        metrics = list(itemgetter(*self.objectives)(metric_list))
        if len(self.objectives) > 1:
            objective = MultiObjective(metrics=metrics, minimize=True)
        else:
            objective = Objective(metric=metrics[0], minimize=True)

        optimization_config = OptimizationConfig(objective=objective)
        experiment.optimization_config = optimization_config
        experiment.runner = MyRunner()
        return experiment
Пример #16
0
class ObjectiveTest(TestCase):
    def setUp(self):
        self.metrics = {
            "m1": Metric(name="m1"),
            "m2": Metric(name="m2", lower_is_better=True),
            "m3": Metric(name="m3", lower_is_better=False),
        }
        self.objective = Objective(metric=self.metrics["m1"], minimize=False)
        self.multi_objective = MultiObjective(metrics=[
            self.metrics["m1"], self.metrics["m2"], self.metrics["m3"]
        ])
        self.scalarized_objective = ScalarizedObjective(
            metrics=[self.metrics["m1"], self.metrics["m2"]])

    def testBadInit(self):
        with self.assertRaises(ValueError):
            self.scalarized_objective_weighted = ScalarizedObjective(
                metrics=[self.metrics["m1"], self.metrics["m2"]],
                weights=[1.0])

    def testMultiObjective(self):
        with self.assertRaises(NotImplementedError):
            return self.multi_objective.metric

        self.assertEqual(self.multi_objective.metrics,
                         list(self.metrics.values()))
        weights = [mw[1] for mw in self.multi_objective.metric_weights]
        self.assertEqual(weights, [1.0, -1.0, 1.0])
        self.assertEqual(self.multi_objective.clone(), self.multi_objective)
        self.assertEqual(
            str(self.multi_objective),
            "MultiObjective(metric_names=['m1', 'm2', 'm3'], minimize=False)",
        )

    def testScalarizedObjective(self):
        with self.assertRaises(NotImplementedError):
            return self.scalarized_objective.metric

        self.assertEqual(self.scalarized_objective.metrics,
                         [self.metrics["m1"], self.metrics["m2"]])
        weights = [mw[1] for mw in self.scalarized_objective.metric_weights]
        self.assertEqual(weights, [1.0, 1.0])
        self.assertEqual(self.scalarized_objective.clone(),
                         self.scalarized_objective)
        self.assertEqual(
            str(self.scalarized_objective),
            ("ScalarizedObjective(metric_names=['m1', 'm2'], weights=[1.0, 1.0], "
             "minimize=False)"),
        )
Пример #17
0
 def test_choose_generation_strategy(self):
     with self.subTest("GPEI"):
         sobol_gpei = choose_generation_strategy(
             search_space=get_branin_search_space()
         )
         self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol")
         self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
         self.assertEqual(sobol_gpei._steps[1].model.value, "GPEI")
     with self.subTest("MOO"):
         sobol_gpei = choose_generation_strategy(
             search_space=get_branin_search_space(),
             optimization_config=MultiObjectiveOptimizationConfig(
                 objective=MultiObjective(objectives=[])
             ),
         )
         self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol")
         self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
         self.assertEqual(sobol_gpei._steps[1].model.value, "MOO")
     with self.subTest("Sobol (we can try every option)"):
         sobol = choose_generation_strategy(
             search_space=get_factorial_search_space(), num_trials=1000
         )
         self.assertEqual(sobol._steps[0].model.value, "Sobol")
         self.assertEqual(len(sobol._steps), 1)
     with self.subTest("Sobol (because of too many categories)"):
         sobol_large = choose_generation_strategy(
             search_space=get_large_factorial_search_space()
         )
         self.assertEqual(sobol_large._steps[0].model.value, "Sobol")
         self.assertEqual(len(sobol_large._steps), 1)
     with self.subTest("GPEI-Batched"):
         sobol_gpei_batched = choose_generation_strategy(
             search_space=get_branin_search_space(), use_batch_trials=3
         )
         self.assertEqual(sobol_gpei_batched._steps[0].num_trials, 1)
     with self.subTest("BO_MIXED (purely categorical)"):
         bo_mixed = choose_generation_strategy(
             search_space=get_factorial_search_space()
         )
         self.assertEqual(bo_mixed._steps[0].model.value, "Sobol")
         self.assertEqual(bo_mixed._steps[0].num_trials, 5)
         self.assertEqual(bo_mixed._steps[1].model.value, "BO_MIXED")
     with self.subTest("BO_MIXED (mixed search space)"):
         bo_mixed_2 = choose_generation_strategy(
             search_space=get_branin_search_space(with_choice_parameter=True)
         )
         self.assertEqual(bo_mixed_2._steps[0].model.value, "Sobol")
         self.assertEqual(bo_mixed_2._steps[0].num_trials, 5)
         self.assertEqual(bo_mixed_2._steps[1].model.value, "BO_MIXED")
Пример #18
0
    def testHasGoodOptConfigModelFit(self):
        # Construct diagnostics
        result = []
        for i, obs in enumerate(self.training_data):
            result.append(
                CVResult(observed=obs, predicted=self.observation_data[i]))
        diag = compute_diagnostics(result=result)
        assess_model_fit_result = assess_model_fit(
            diagnostics=diag,
            significance_level=0.05,
        )

        # Test single objective
        optimization_config = OptimizationConfig(objective=Objective(
            metric=Metric("a")))
        has_good_fit = has_good_opt_config_model_fit(
            optimization_config=optimization_config,
            assess_model_fit_result=assess_model_fit_result,
        )
        self.assertFalse(has_good_fit)

        # Test multi objective
        optimization_config = MultiObjectiveOptimizationConfig(
            objective=MultiObjective(
                metrics=[Metric("a"), Metric("b")]))
        has_good_fit = has_good_opt_config_model_fit(
            optimization_config=optimization_config,
            assess_model_fit_result=assess_model_fit_result,
        )
        self.assertFalse(has_good_fit)

        # Test constraints
        optimization_config = OptimizationConfig(
            objective=Objective(metric=Metric("a")),
            outcome_constraints=[
                OutcomeConstraint(metric=Metric("b"),
                                  op=ComparisonOp.GEQ,
                                  bound=0.1)
            ],
        )
        has_good_fit = has_good_opt_config_model_fit(
            optimization_config=optimization_config,
            assess_model_fit_result=assess_model_fit_result,
        )
        self.assertFalse(has_good_fit)
Пример #19
0
def optimization_config_from_objectives(
    objectives: List[Metric],
    objective_thresholds: List[ObjectiveThreshold],
    outcome_constraints: List[OutcomeConstraint],
) -> OptimizationConfig:
    """Parse objectives and constraints to define optimization config.

    The resulting optimization config will be regular single-objective config
    if `objectives` is a list of one element and a multi-objective config
    otherwise.

    NOTE: If passing in multiple objectives, `objective_thresholds` must be a
    non-empty list definining constraints for each objective.
    """
    if len(objectives) == 1:
        if objective_thresholds:
            raise ValueError(
                "Single-objective optimizations must not specify objective thresholds."
            )
        return OptimizationConfig(
            objective=Objective(
                metric=objectives[0],
            ),
            outcome_constraints=outcome_constraints,
        )
    else:
        objective_names = {m.name for m in objectives}
        threshold_names = {oc.metric.name for oc in objective_thresholds}
        if objective_names != threshold_names:
            diff = objective_names.symmetric_difference(threshold_names)
            raise ValueError(
                "Multi-objective optimization requires one objective threshold "
                f"per objective metric; unmatched names are {diff}"
            )

        return MultiObjectiveOptimizationConfig(
            objective=MultiObjective(metrics=objectives),
            outcome_constraints=outcome_constraints,
            objective_thresholds=objective_thresholds,
        )
Пример #20
0
    def __init__(self, serialized_filepath=None):
        # Give ourselves the ability to resume this experiment later.
        self.serialized_filepath = serialized_filepath
        if serialized_filepath is not None and os.path.exists(
                serialized_filepath):
            with open(serialized_filepath, "r") as f:
                serialized = json.load(f)
            self.initialize_from_json_snapshot(serialized)
        else:
            # Create a CoreAxClient.
            search_space = SearchSpace(parameters=[
                RangeParameter(
                    "x", ParameterType.FLOAT, lower=12.2, upper=602.2),
            ])

            optimization_config = OptimizationConfig(
                objective=MultiObjective(
                    metrics=[
                        # Currently MultiObjective doesn't work with
                        # lower_is_better=True.
                        # https://github.com/facebook/Ax/issues/289
                        Metric(name="neg_distance17", lower_is_better=False),
                        Metric(name="neg_distance33", lower_is_better=False)
                    ],
                    minimize=False,
                ), )

            generation_strategy = choose_generation_strategy(
                search_space,
                enforce_sequential_optimization=False,
                no_max_parallelism=True,
                num_trials=NUM_TRIALS,
                num_initialization_trials=NUM_RANDOM)

            super().__init__(experiment=Experiment(
                search_space=search_space,
                optimization_config=optimization_config),
                             generation_strategy=generation_strategy,
                             verbose=True)
Пример #21
0
    def testObservedParetoFrontiers(self):
        experiment = get_branin_experiment(
            with_batch=True, has_optimization_config=False, with_status_quo=True
        )

        # Optimization config is not optional
        with self.assertRaises(ValueError):
            get_observed_pareto_frontiers(experiment=experiment, data=Data())

        metrics = [
            BraninMetric(name="m1", param_names=["x1", "x2"], lower_is_better=True),
            NegativeBraninMetric(
                name="m2", param_names=["x1", "x2"], lower_is_better=True
            ),
            BraninMetric(name="m3", param_names=["x1", "x2"], lower_is_better=True),
        ]
        bounds = [0, -100, 0]
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metric,
                bound=bounds[i],
                relative=True,
                op=ComparisonOp.LEQ,
            )
            for i, metric in enumerate(metrics)
        ]
        objective = MultiObjective(metrics=metrics, minimize=True)
        optimization_config = MultiObjectiveOptimizationConfig(
            objective=objective,
            objective_thresholds=objective_thresholds,
        )
        experiment.optimization_config = optimization_config
        experiment.trials[0].run()

        # For the check below, compute which arms are better than SQ
        df = experiment.fetch_data().df
        df["sem"] = np.nan
        data = Data(df)
        sq_val = df[(df["arm_name"] == "status_quo") & (df["metric_name"] == "m1")][
            "mean"
        ].values[0]
        pareto_arms = sorted(
            df[(df["mean"] <= sq_val) & (df["metric_name"] == "m1")]["arm_name"]
            .unique()
            .tolist()
        )

        pfrs = get_observed_pareto_frontiers(experiment=experiment, data=data)
        # We have all pairs of metrics
        self.assertEqual(len(pfrs), 3)
        true_pairs = [("m1", "m2"), ("m1", "m3"), ("m2", "m3")]
        for i, pfr in enumerate(pfrs):
            self.assertEqual(pfr.primary_metric, true_pairs[i][0])
            self.assertEqual(pfr.secondary_metric, true_pairs[i][1])
            self.assertEqual(pfr.absolute_metrics, [])
            self.assertEqual(list(pfr.means.keys()), ["m1", "m2", "m3"])
            self.assertEqual(len(pfr.means["m1"]), len(pareto_arms))
            self.assertTrue(np.isnan(pfr.sems["m1"]).all())
            self.assertEqual(len(pfr.arm_names), len(pareto_arms))
            arm_idx = np.argsort(pfr.arm_names)
            for i, idx in enumerate(arm_idx):
                name = pareto_arms[i]
                self.assertEqual(pfr.arm_names[idx], name)
                self.assertEqual(
                    pfr.param_dicts[idx], experiment.arms_by_name[name].parameters
                )
Пример #22
0
def get_multi_objective() -> Objective:
    return MultiObjective(
        metrics=[Metric(name="m1"), Metric(name="m3", lower_is_better=True)],
        minimize=False,
    )
Пример #23
0
def get_branin_multi_objective() -> Objective:
    return MultiObjective(
        metrics=[get_branin_metric(), get_branin_metric()], minimize=False
    )
Пример #24
0
def get_multi_objective() -> Objective:
    return MultiObjective(metrics=[Metric(name="m1"),
                                   Metric(name="m2")],
                          minimize=False)
Пример #25
0
    def metric_from_sqa(
            self, metric_sqa: SQAMetric
    ) -> Union[Metric, Objective, OutcomeConstraint]:
        """Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint."""

        metric = self.metric_from_sqa_util(metric_sqa)

        if metric_sqa.intent == MetricIntent.TRACKING:
            return metric
        elif metric_sqa.intent == MetricIntent.OBJECTIVE:
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Objective because minimize is None."
                )
            if metric_sqa.scalarized_objective_weight is not None:
                raise SQADecodeError(  # pragma: no cover
                    "The metric corresponding to regular objective does not \
                    have weight attribute")
            return Objective(metric=metric, minimize=metric_sqa.minimize)
        elif (metric_sqa.intent == MetricIntent.MULTI_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in MultiObjective
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to MultiObjective \
                    because minimize is None.")
            metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to MultiObjective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics = [
                self.metric_from_sqa_util(child)
                for child in metrics_sqa_children
            ]

            return MultiObjective(
                metrics=list(metrics),
                # pyre-fixme[6]: Expected `bool` for 2nd param but got `Optional[bool]`.
                minimize=metric_sqa.minimize,
            )
        elif (metric_sqa.intent == MetricIntent.SCALARIZED_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in Scalarized Objective
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because minimize is None.")
            metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_objective_weight,
            ) for child in metrics_sqa_children])
            return ScalarizedObjective(
                metrics=list(metrics),
                weights=list(weights),
                # pyre-fixme[6]: Expected `bool` for 3nd param but got `Optional[bool]`.
                minimize=metric_sqa.minimize,
            )
        elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to OutcomeConstraint because "
                    "bound, op, or relative is None.")
            return OutcomeConstraint(
                metric=metric,
                # pyre-fixme[6]: Expected `float` for 2nd param but got
                #  `Optional[float]`.
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )
        elif metric_sqa.intent == MetricIntent.SCALARIZED_OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint because "
                    "bound, op, or relative is None.")
            metrics_sqa_children = (
                metric_sqa.scalarized_outcome_constraint_children_metrics)
            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_outcome_constraint_weight,
            ) for child in metrics_sqa_children])
            return ScalarizedOutcomeConstraint(
                metrics=list(metrics),
                weights=list(weights),
                # pyre-fixme[6]: Expected `float` for 2nd param but got
                #  `Optional[float]`.
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )

        elif metric_sqa.intent == MetricIntent.OBJECTIVE_THRESHOLD:
            if metric_sqa.bound is None or metric_sqa.relative is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to ObjectiveThreshold because "
                    "bound, op, or relative is None.")
            return ObjectiveThreshold(
                metric=metric,
                # pyre-fixme[6]: Expected `float` for 2nd param but got
                #  `Optional[float]`.
                bound=metric_sqa.bound,
                relative=metric_sqa.relative,
                op=metric_sqa.op,
            )
        else:
            raise SQADecodeError(
                f"Cannot decode SQAMetric because {metric_sqa.intent} "
                f"is an invalid intent.")
Пример #26
0
    def testGen(self, mock_init, mock_best_point, mock_gen):
        # Test with constraints
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=True),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        ma = NumpyModelBridge()
        ma.parameters = ["x", "y", "z"]
        ma.outcomes = ["a", "b"]
        ma.transforms = OrderedDict()
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=optimization_config,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[0][2]
        self.assertEqual(gen_args["n"], 3)
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([-1.0,
                                                                    0.0])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][0],
                           np.array([[0.0, -1.0]])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][1],
                           np.array([[-2]])))
        self.assertTrue(
            np.array_equal(
                gen_args["linear_constraints"][0],
                np.array([[1.0, -1, 0.0], [-1.0, 0.0, -1.0]]),
            ))
        self.assertTrue(
            np.array_equal(gen_args["linear_constraints"][1],
                           np.array([[0.0], [-3.5]])))
        self.assertEqual(gen_args["fixed_features"], {2: 3.0})
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][0], np.array([])))
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][1],
                           np.array([[0.6, 1.6, 3.0]])))
        self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
        self.assertEqual(observation_features[0].parameters, {
            "x": 1.0,
            "y": 2.0,
            "z": 3.0
        })
        self.assertEqual(observation_features[1].parameters, {
            "x": 3.0,
            "y": 4.0,
            "z": 3.0
        })
        self.assertTrue(np.array_equal(weights, np.array([1.0, 2.0])))

        # Test with multiple objectives.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="a"), Metric(name="b")], minimize=True))
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=oc2,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[1][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"],
                           np.array([-1.0, -1.0])))

        # Test with MultiObjective (unweighted multiple objectives)
        oc3 = MultiObjectiveOptimizationConfig(objective=MultiObjective(
            metrics=[Metric(name="a"),
                     Metric(name="b", lower_is_better=True)],
            minimize=True,
        ))
        search_space = SearchSpace(self.parameters)  # Unconstrained
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=search_space,
            optimization_config=oc3,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[2][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([1.0,
                                                                    -1.0])))

        # Test with no constraints, no fixed feature, no pending observations
        search_space = SearchSpace(self.parameters[:2])
        optimization_config.outcome_constraints = []
        ma.parameters = ["x", "y"]
        ma._gen(3, search_space, {}, ObservationFeatures({}), None,
                optimization_config)
        gen_args = mock_gen.mock_calls[3][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertIsNone(gen_args["linear_constraints"])
        self.assertIsNone(gen_args["fixed_features"])
        self.assertIsNone(gen_args["pending_observations"])

        # Test validation
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=False),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
        optimization_config.objective.minimize = True
        optimization_config.outcome_constraints[0].relative = True
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
Пример #27
0
 def test_choose_generation_strategy(self):
     with self.subTest("GPEI"):
         sobol_gpei = choose_generation_strategy(
             search_space=get_branin_search_space())
         self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol")
         self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
         self.assertEqual(sobol_gpei._steps[1].model.value, "GPEI")
         self.assertIsNone(sobol_gpei._steps[1].model_kwargs)
         sobol_gpei = choose_generation_strategy(
             search_space=get_branin_search_space(), verbose=True)
         self.assertIsNone(sobol_gpei._steps[1].model_kwargs)
     with self.subTest("MOO"):
         optimization_config = MultiObjectiveOptimizationConfig(
             objective=MultiObjective(objectives=[]))
         sobol_gpei = choose_generation_strategy(
             search_space=get_branin_search_space(),
             optimization_config=optimization_config,
         )
         self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol")
         self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
         self.assertEqual(sobol_gpei._steps[1].model.value, "MOO")
         model_kwargs = sobol_gpei._steps[1].model_kwargs
         self.assertEqual(list(model_kwargs.keys()),
                          ["transforms", "transform_configs"])
         self.assertGreater(len(model_kwargs["transforms"]), 0)
         transform_config_dict = {
             "Winsorize": {
                 "optimization_config": optimization_config
             }
         }
         self.assertEqual(model_kwargs["transform_configs"],
                          transform_config_dict)
     with self.subTest("Sobol (we can try every option)"):
         sobol = choose_generation_strategy(
             search_space=get_factorial_search_space(), num_trials=1000)
         self.assertEqual(sobol._steps[0].model.value, "Sobol")
         self.assertEqual(len(sobol._steps), 1)
     with self.subTest("Sobol (because of too many categories)"):
         ss = get_large_factorial_search_space()
         sobol_large = choose_generation_strategy(
             search_space=get_large_factorial_search_space(), verbose=True)
         self.assertEqual(sobol_large._steps[0].model.value, "Sobol")
         self.assertEqual(len(sobol_large._steps), 1)
     with self.subTest("GPEI-Batched"):
         sobol_gpei_batched = choose_generation_strategy(
             search_space=get_branin_search_space(), use_batch_trials=3)
         self.assertEqual(sobol_gpei_batched._steps[0].num_trials, 1)
     with self.subTest("BO_MIXED (purely categorical)"):
         bo_mixed = choose_generation_strategy(
             search_space=get_factorial_search_space())
         self.assertEqual(bo_mixed._steps[0].model.value, "Sobol")
         self.assertEqual(bo_mixed._steps[0].num_trials, 6)
         self.assertEqual(bo_mixed._steps[1].model.value, "BO_MIXED")
         self.assertIsNone(bo_mixed._steps[1].model_kwargs)
     with self.subTest("BO_MIXED (mixed search space)"):
         ss = get_branin_search_space(with_choice_parameter=True)
         ss.parameters["x2"]._is_ordered = False
         bo_mixed_2 = choose_generation_strategy(search_space=ss)
         self.assertEqual(bo_mixed_2._steps[0].model.value, "Sobol")
         self.assertEqual(bo_mixed_2._steps[0].num_trials, 5)
         self.assertEqual(bo_mixed_2._steps[1].model.value, "BO_MIXED")
         self.assertIsNone(bo_mixed_2._steps[1].model_kwargs)
     with self.subTest("BO_MIXED (mixed multi-objective optimization)"):
         search_space = get_branin_search_space(with_choice_parameter=True)
         search_space.parameters["x2"]._is_ordered = False
         optimization_config = MultiObjectiveOptimizationConfig(
             objective=MultiObjective(objectives=[]))
         moo_mixed = choose_generation_strategy(
             search_space=search_space,
             optimization_config=optimization_config)
         self.assertEqual(moo_mixed._steps[0].model.value, "Sobol")
         self.assertEqual(moo_mixed._steps[0].num_trials, 5)
         self.assertEqual(moo_mixed._steps[1].model.value, "BO_MIXED")
         model_kwargs = moo_mixed._steps[1].model_kwargs
         self.assertEqual(list(model_kwargs.keys()),
                          ["transforms", "transform_configs"])
         self.assertGreater(len(model_kwargs["transforms"]), 0)
         transform_config_dict = {
             "Winsorize": {
                 "optimization_config": optimization_config
             }
         }
         self.assertEqual(model_kwargs["transform_configs"],
                          transform_config_dict)
     with self.subTest("SAASBO"):
         sobol_fullybayesian = choose_generation_strategy(
             search_space=get_branin_search_space(),
             use_batch_trials=True,
             num_initialization_trials=3,
             use_saasbo=True,
         )
         self.assertEqual(sobol_fullybayesian._steps[0].model.value,
                          "Sobol")
         self.assertEqual(sobol_fullybayesian._steps[0].num_trials, 3)
         self.assertEqual(sobol_fullybayesian._steps[1].model.value,
                          "FullyBayesian")
         self.assertTrue(
             sobol_fullybayesian._steps[1].model_kwargs["verbose"])
     with self.subTest("SAASBO MOO"):
         sobol_fullybayesianmoo = choose_generation_strategy(
             search_space=get_branin_search_space(),
             use_batch_trials=True,
             num_initialization_trials=3,
             use_saasbo=True,
             optimization_config=MultiObjectiveOptimizationConfig(
                 objective=MultiObjective(objectives=[])),
         )
         self.assertEqual(sobol_fullybayesianmoo._steps[0].model.value,
                          "Sobol")
         self.assertEqual(sobol_fullybayesianmoo._steps[0].num_trials, 3)
         self.assertEqual(sobol_fullybayesianmoo._steps[1].model.value,
                          "FullyBayesianMOO")
         self.assertTrue(
             sobol_fullybayesianmoo._steps[1].model_kwargs["verbose"])
     with self.subTest("SAASBO"):
         sobol_fullybayesian_large = choose_generation_strategy(
             search_space=get_large_ordinal_search_space(
                 n_ordinal_choice_parameters=5,
                 n_continuous_range_parameters=10),
             use_saasbo=True,
         )
         self.assertEqual(sobol_fullybayesian_large._steps[0].model.value,
                          "Sobol")
         self.assertEqual(sobol_fullybayesian_large._steps[0].num_trials,
                          30)
         self.assertEqual(sobol_fullybayesian_large._steps[1].model.value,
                          "FullyBayesian")
         self.assertTrue(
             sobol_fullybayesian_large._steps[1].model_kwargs["verbose"])
     with self.subTest("num_initialization_trials"):
         ss = get_large_factorial_search_space()
         for _, param in ss.parameters.items():
             param._is_ordered = True
         # 2 * len(ss.parameters) init trials are performed if num_trials is large
         gs_12_init_trials = choose_generation_strategy(search_space=ss,
                                                        num_trials=100)
         self.assertEqual(gs_12_init_trials._steps[0].model.value, "Sobol")
         self.assertEqual(gs_12_init_trials._steps[0].num_trials, 12)
         self.assertEqual(gs_12_init_trials._steps[1].model.value, "GPEI")
         # at least 5 initialization trials are performed
         gs_5_init_trials = choose_generation_strategy(search_space=ss,
                                                       num_trials=0)
         self.assertEqual(gs_5_init_trials._steps[0].model.value, "Sobol")
         self.assertEqual(gs_5_init_trials._steps[0].num_trials, 5)
         self.assertEqual(gs_5_init_trials._steps[1].model.value, "GPEI")
         # avoid spending >20% of budget on initialization trials if there are
         # more than 5 initialization trials
         gs_6_init_trials = choose_generation_strategy(search_space=ss,
                                                       num_trials=30)
         self.assertEqual(gs_6_init_trials._steps[0].model.value, "Sobol")
         self.assertEqual(gs_6_init_trials._steps[0].num_trials, 6)
         self.assertEqual(gs_6_init_trials._steps[1].model.value, "GPEI")
Пример #28
0
class ObjectiveTest(TestCase):
    def setUp(self):
        self.metrics = {
            "m1": Metric(name="m1"),
            "m2": Metric(name="m2", lower_is_better=True),
            "m3": Metric(name="m3", lower_is_better=False),
        }
        self.objective = Objective(metric=self.metrics["m1"], minimize=False)
        self.multi_objective = MultiObjective(metrics=[
            self.metrics["m1"], self.metrics["m2"], self.metrics["m3"]
        ])
        self.scalarized_objective = ScalarizedObjective(
            metrics=[self.metrics["m1"], self.metrics["m2"]])

    def testInit(self):
        with self.assertRaises(ValueError):
            ScalarizedObjective(
                metrics=[self.metrics["m1"], self.metrics["m2"]],
                weights=[1.0])
        warnings.resetwarnings()
        warnings.simplefilter("always", append=True)
        with warnings.catch_warnings(record=True) as ws:
            Objective(metric=self.metrics["m1"])
            self.assertTrue(
                any(issubclass(w.category, DeprecationWarning) for w in ws))
            self.assertTrue(
                any("Defaulting to `minimize=False`" in str(w.message)
                    for w in ws))
        with warnings.catch_warnings(record=True) as ws:
            Objective(Metric(name="m4", lower_is_better=True), minimize=False)
            self.assertTrue(
                any("Attempting to maximize" in str(w.message) for w in ws))
        with warnings.catch_warnings(record=True) as ws:
            Objective(Metric(name="m4", lower_is_better=False), minimize=True)
            self.assertTrue(
                any("Attempting to minimize" in str(w.message) for w in ws))
        self.assertEqual(self.objective.get_unconstrainable_metrics(),
                         [self.metrics["m1"]])

    def testMultiObjective(self):
        with self.assertRaises(NotImplementedError):
            return self.multi_objective.metric

        self.assertEqual(self.multi_objective.metrics,
                         list(self.metrics.values()))
        weights = [mw[1] for mw in self.multi_objective.metric_weights]
        self.assertEqual(weights, [1.0, -1.0, 1.0])
        self.assertEqual(self.multi_objective.clone(), self.multi_objective)
        self.assertEqual(
            str(self.multi_objective),
            "MultiObjective(metric_names=['m1', 'm2', 'm3'], minimize=False)",
        )
        self.assertEqual(self.multi_objective.get_unconstrainable_metrics(),
                         [])

    def testScalarizedObjective(self):
        with self.assertRaises(NotImplementedError):
            return self.scalarized_objective.metric

        self.assertEqual(self.scalarized_objective.metrics,
                         [self.metrics["m1"], self.metrics["m2"]])
        weights = [mw[1] for mw in self.scalarized_objective.metric_weights]
        self.assertEqual(weights, [1.0, 1.0])
        self.assertEqual(self.scalarized_objective.clone(),
                         self.scalarized_objective)
        self.assertEqual(
            str(self.scalarized_objective),
            ("ScalarizedObjective(metric_names=['m1', 'm2'], weights=[1.0, 1.0], "
             "minimize=False)"),
        )
        self.assertEqual(
            self.scalarized_objective.get_unconstrainable_metrics(), [])
Пример #29
0
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

import warnings

from ax.core.metric import Metric
from ax.core.objective import MultiObjective, Objective, ScalarizedObjective
from ax.utils.common.testutils import TestCase

MULTI_OBJECTIVE_REPR = """
MultiObjective(objectives=
[Objective(metric_name="m1", minimize=False),
Objective(metric_name="m2", minimize=True),
Objective(metric_name="m3", minimize=False)])
"""


class ObjectiveTest(TestCase):
    def setUp(self):
        self.metrics = {
            "m1": Metric(name="m1"),
            "m2": Metric(name="m2", lower_is_better=True),
            "m3": Metric(name="m3", lower_is_better=False),
        }
        self.objectives = {
            "o1": Objective(metric=self.metrics["m1"]),
            "o2": Objective(metric=self.metrics["m2"], minimize=True),
            "o3": Objective(metric=self.metrics["m3"], minimize=False),
Пример #30
0
    def metric_from_sqa(
            self, metric_sqa: SQAMetric
    ) -> Union[Metric, Objective, OutcomeConstraint]:
        """Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint."""

        metric = self.metric_from_sqa_util(metric_sqa)

        if metric_sqa.intent == MetricIntent.TRACKING:
            return metric
        elif metric_sqa.intent == MetricIntent.OBJECTIVE:
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Objective because minimize is None."
                )
            if metric_sqa.scalarized_objective_weight is not None:
                raise SQADecodeError(  # pragma: no cover
                    "The metric corresponding to regular objective does not \
                    have weight attribute")
            return Objective(metric=metric, minimize=metric_sqa.minimize)
        elif (metric_sqa.intent == MetricIntent.MULTI_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in MultiObjective
            try:
                metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            except DetachedInstanceError:
                metrics_sqa_children = _get_scalarized_objective_children_metrics(
                    metric_id=metric_sqa.id, decoder=self)

            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to MultiObjective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            objectives = [
                Objective(
                    metric=self.metric_from_sqa_util(metric_sqa),
                    minimize=metric_sqa.minimize,
                ) for metric_sqa in metrics_sqa_children
            ]

            multi_objective = MultiObjective(objectives=objectives)
            multi_objective.db_id = metric_sqa.id
            return multi_objective
        elif (metric_sqa.intent == MetricIntent.SCALARIZED_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in Scalarized Objective
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because minimize is None.")

            try:
                metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            except DetachedInstanceError:
                metrics_sqa_children = _get_scalarized_objective_children_metrics(
                    metric_id=metric_sqa.id, decoder=self)

            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_objective_weight,
            ) for child in metrics_sqa_children])
            scalarized_objective = ScalarizedObjective(
                metrics=list(metrics),
                weights=list(weights),
                minimize=not_none(metric_sqa.minimize),
            )
            scalarized_objective.db_id = metric_sqa.id
            return scalarized_objective
        elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to OutcomeConstraint because "
                    "bound, op, or relative is None.")
            return OutcomeConstraint(
                metric=metric,
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )
        elif metric_sqa.intent == MetricIntent.SCALARIZED_OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint because "
                    "bound, op, or relative is None.")

            try:
                metrics_sqa_children = (
                    metric_sqa.scalarized_outcome_constraint_children_metrics)
            except DetachedInstanceError:
                metrics_sqa_children = (
                    _get_scalarized_outcome_constraint_children_metrics(
                        metric_id=metric_sqa.id, decoder=self))

            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_outcome_constraint_weight,
            ) for child in metrics_sqa_children])
            scalarized_outcome_constraint = ScalarizedOutcomeConstraint(
                metrics=list(metrics),
                weights=list(weights),
                bound=not_none(metric_sqa.bound),
                op=not_none(metric_sqa.op),
                relative=not_none(metric_sqa.relative),
            )
            scalarized_outcome_constraint.db_id = metric_sqa.id
            return scalarized_outcome_constraint
        elif metric_sqa.intent == MetricIntent.OBJECTIVE_THRESHOLD:
            if metric_sqa.bound is None or metric_sqa.relative is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to ObjectiveThreshold because "
                    "bound, op, or relative is None.")
            ot = ObjectiveThreshold(
                metric=metric,
                bound=metric_sqa.bound,
                relative=metric_sqa.relative,
                op=metric_sqa.op,
            )
            # ObjectiveThreshold constructor clones the passed-in metric, which means
            # the db id gets lost and so we need to reset it
            ot.metric._db_id = metric.db_id
            return ot
        else:
            raise SQADecodeError(
                f"Cannot decode SQAMetric because {metric_sqa.intent} "
                f"is an invalid intent.")