def test_best_raw_objective_point_scalarized_multi(self): exp = get_branin_experiment() exp.optimization_config = OptimizationConfig( ScalarizedObjective( metrics=[get_branin_metric(), get_branin_metric()], weights=[0.1, -0.9], minimize=False, ) ) with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point(exp) self.assertEqual(get_best_parameters(exp, Models), None) exp.new_trial( generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})]) ).run() self.assertEqual(get_best_raw_objective_point(exp)[0], {"x1": 5.0, "x2": 5.0})
def test_best_raw_objective_point_unsatisfiable_relative(self): exp = get_branin_experiment() # Optimization config with unsatisfiable constraint opt_conf = exp.optimization_config.clone() opt_conf.outcome_constraints.append( OutcomeConstraint( metric=get_branin_metric(), op=ComparisonOp.GEQ, bound=9999, relative=True, ) ) trial = exp.new_trial( generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})]) ).run() trial.mark_completed() with self.assertLogs(logger="ax.service.utils.best_point", level="WARN") as lg: get_best_raw_objective_point(exp, opt_conf) self.assertTrue( any("No status quo provided" in warning for warning in lg.output), msg=lg.output, ) exp.status_quo = Arm(parameters={"x1": 0, "x2": 0}, name="status_quo") sq_trial = exp.new_trial( generator_run=GeneratorRun(arms=[exp.status_quo]) ).run() sq_trial.mark_completed() with self.assertRaisesRegex(ValueError, "No points satisfied"): get_best_raw_objective_point(exp, opt_conf)
def testClone(self): metric1 = Metric(name="m1", lower_is_better=False) self.assertEqual(metric1, metric1.clone()) metric2 = get_branin_metric(name="branin") self.assertEqual(metric2, metric2.clone()) metric3 = get_factorial_metric(name="factorial") self.assertEqual(metric3, metric3.clone())
def test_transform_ref_point(self, _mock_fit, _mock_predict, _mock_unwrap): exp = get_branin_experiment_with_multi_objective( has_optimization_config=True, with_batch=False) metrics = exp.optimization_config.objective.metrics ref_point = {metrics[0].name: 0.0, metrics[1].name: 0.0} modelbridge = MultiObjectiveTorchModelBridge( search_space=exp.search_space, model=MultiObjectiveBotorchModel(), optimization_config=exp.optimization_config, transforms=[t1, t2], experiment=exp, data=exp.fetch_data(), ref_point=ref_point, ) self.assertIsNone(modelbridge._transformed_ref_point) exp = get_branin_experiment_with_multi_objective( has_optimization_config=True, with_batch=True) exp.attach_data( get_branin_data_multi_objective(trial_indices=exp.trials)) modelbridge = MultiObjectiveTorchModelBridge( search_space=exp.search_space, model=MultiObjectiveBotorchModel(), optimization_config=exp.optimization_config, transforms=[t1, t2], experiment=exp, data=exp.fetch_data(), ref_point=ref_point, ) self.assertIsNotNone(modelbridge._transformed_ref_point) self.assertEqual(2, len(modelbridge._transformed_ref_point)) mixed_objective_constraints_optimization_config = OptimizationConfig( objective=MultiObjective( metrics=[get_branin_metric(name="branin_b")], minimize=False), outcome_constraints=[ OutcomeConstraint(metric=Metric(name="branin_a"), op=ComparisonOp.LEQ, bound=1) ], ) modelbridge = MultiObjectiveTorchModelBridge( search_space=exp.search_space, model=MultiObjectiveBotorchModel(), optimization_config=mixed_objective_constraints_optimization_config, transforms=[t1, t2], experiment=exp, data=exp.fetch_data(), ref_point={"branin_b": 0.0}, ) self.assertEqual({"branin_a", "branin_b"}, modelbridge._metric_names) self.assertEqual(["branin_b"], modelbridge._objective_metric_names) self.assertIsNotNone(modelbridge._transformed_ref_point) self.assertEqual(1, len(modelbridge._transformed_ref_point))
def testMetricDecodeFailure(self): metric = get_branin_metric() sqa_metric = self.encoder.metric_to_sqa(metric) sqa_metric.metric_type = "foobar" with self.assertRaises(SQADecodeError): self.decoder.metric_from_sqa(sqa_metric) sqa_metric.metric_type = METRIC_REGISTRY[BraninMetric] sqa_metric.intent = "foobar" with self.assertRaises(SQADecodeError): self.decoder.metric_from_sqa(sqa_metric) sqa_metric.intent = MetricIntent.TRACKING sqa_metric.properties = {} with self.assertRaises(ValueError): self.decoder.metric_from_sqa(sqa_metric)
def test_best_raw_objective_point_unsatisfiable(self): exp = get_branin_experiment() trial = exp.new_trial( generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})]) ).run() trial.mark_completed() opt_conf = exp.optimization_config.clone() opt_conf.outcome_constraints.append( OutcomeConstraint( metric=get_branin_metric(), op=ComparisonOp.LEQ, bound=0, relative=False ) ) with self.assertRaisesRegex(ValueError, "No points satisfied"): get_best_raw_objective_point(exp, opt_conf)
def testMetricEncodeFailure(self): metric = get_branin_metric() del metric.__dict__["param_names"] with self.assertRaises(AttributeError): self.encoder.metric_to_sqa(metric)