Ejemplo n.º 1
0
    def _validate_optimization_config(
        objective: Objective,
        outcome_constraints: Optional[List[OutcomeConstraint]] = None,
        objective_thresholds: Optional[List[ObjectiveThreshold]] = None,
    ) -> None:
        """Ensure outcome constraints are valid.

        Either one or two outcome constraints can reference one metric.
        If there are two constraints, they must have different 'ops': one
            LEQ and one GEQ.
        If there are two constraints, the bound of the GEQ op must be less
            than the bound of the LEQ op.

        Args:
            outcome_constraints: Constraints to validate.
        """
        if not isinstance(objective, MultiObjective):
            raise TypeError(
                (
                    "`MultiObjectiveOptimizationConfig` requires an objective "
                    "of type `MultiObjective`. Use `OptimizationConfig` instead "
                    "if using a single-metric objective."
                )
            )
        outcome_constraints = outcome_constraints or []
        objective_thresholds = objective_thresholds or []

        # Verify we aren't optimizing too many objectives.
        objective_metrics_by_name = {
            metric.name: metric for metric in objective.metrics
        }
        # Warn if thresholds on objective_metrics bound from the wrong direction.
        for threshold in objective_thresholds:
            metric_name = threshold.metric.name
            if metric_name in objective_metrics_by_name:
                lower_is_better = threshold.metric.lower_is_better
                bounded_above = threshold.op == ComparisonOp.LEQ
                is_aligned = lower_is_better == bounded_above
                if not (is_aligned or lower_is_better is None):
                    raise ValueError(
                        make_wrong_direction_warning(
                            metric_name=metric_name,
                            bounded_above=bounded_above,
                            lower_is_better=lower_is_better,
                        )
                    )

        unconstrainable_metrics = objective.get_unconstrainable_metrics()
        OptimizationConfig._validate_outcome_constraints(
            unconstrainable_metrics=unconstrainable_metrics,
            outcome_constraints=outcome_constraints,
        )
Ejemplo n.º 2
0
 def testInit(self):
     with self.assertRaises(ValueError):
         ScalarizedObjective(
             metrics=[self.metrics["m1"], self.metrics["m2"]],
             weights=[1.0])
     warnings.resetwarnings()
     warnings.simplefilter("always", append=True)
     with warnings.catch_warnings(record=True) as ws:
         Objective(metric=self.metrics["m1"])
         self.assertTrue(
             any(issubclass(w.category, DeprecationWarning) for w in ws))
         self.assertTrue(
             any("Defaulting to `minimize=False`" in str(w.message)
                 for w in ws))
     with warnings.catch_warnings(record=True) as ws:
         Objective(Metric(name="m4", lower_is_better=True), minimize=False)
         self.assertTrue(
             any("Attempting to maximize" in str(w.message) for w in ws))
     with warnings.catch_warnings(record=True) as ws:
         Objective(Metric(name="m4", lower_is_better=False), minimize=True)
         self.assertTrue(
             any("Attempting to minimize" in str(w.message) for w in ws))
Ejemplo n.º 3
0
    def get_experiment(self):
        """ Creates the experiment defining the metrics and the configuration"""
        metric_list = [
            AccuracyMetric(self.epochs,
                           name="error",
                           pruning=self.pruning,
                           datasets=self.datasets,
                           classes=self.classes,
                           net=self.net,
                           quant_scheme=self.quant_scheme,
                           quant_params=self.quant_params,
                           collate_fn=self.collate_fn,
                           splitter=self.splitter,
                           models_path=self.models_path,
                           cuda=self.cuda,
                           trainer=self.trainer),
            WeightMetric(name="weight",
                         datasets=self.datasets,
                         classes=self.classes,
                         net=self.net,
                         collate_fn=self.collate_fn,
                         splitter=self.splitter,
                         trainer=self.trainer),
            FeatureMapMetric(name="ram",
                             datasets=self.datasets,
                             classes=self.classes,
                             net=self.net,
                             collate_fn=self.collate_fn,
                             splitter=self.splitter,
                             trainer=self.trainer),
            LatencyMetric(name="latency",
                          datasets=self.datasets,
                          classes=self.classes,
                          net=self.net,
                          flops_capacity=self.flops,
                          collate_fn=self.collate_fn,
                          splitter=self.splitter,
                          trainer=self.trainer),
        ]
        experiment = Experiment(name="experiment_building_blocks",
                                search_space=self.search_space)
        metrics = list(itemgetter(*self.objectives)(metric_list))
        if len(self.objectives) > 1:
            objective = MultiObjective(metrics=metrics, minimize=True)
        else:
            objective = Objective(metric=metrics[0], minimize=True)

        optimization_config = OptimizationConfig(objective=objective)
        experiment.optimization_config = optimization_config
        experiment.runner = MyRunner()
        return experiment
Ejemplo n.º 4
0
 def setUp(self):
     self.metrics = {
         "m1": Metric(name="m1", lower_is_better=True),
         "m2": Metric(name="m2", lower_is_better=False),
         "m3": Metric(name="m3", lower_is_better=False),
     }
     self.objectives = {
         "o1": Objective(metric=self.metrics["m1"]),
         "o2": Objective(metric=self.metrics["m2"], minimize=False),
         "o3": Objective(metric=self.metrics["m3"], minimize=False),
     }
     self.objective = Objective(metric=self.metrics["m1"], minimize=False)
     self.multi_objective = MultiObjective(
         objectives=[self.objectives["o1"], self.objectives["o2"]])
     self.multi_objective_just_m2 = MultiObjective(
         objectives=[self.objectives["o2"]])
     self.outcome_constraint = OutcomeConstraint(metric=self.metrics["m2"],
                                                 op=ComparisonOp.GEQ,
                                                 bound=-0.25)
     self.additional_outcome_constraint = OutcomeConstraint(
         metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.25)
     self.outcome_constraints = [
         self.outcome_constraint,
         self.additional_outcome_constraint,
     ]
     self.objective_thresholds = [
         ObjectiveThreshold(metric=self.metrics["m2"],
                            bound=-1.0,
                            relative=False)
     ]
     self.m1_constraint = OutcomeConstraint(metric=self.metrics["m1"],
                                            op=ComparisonOp.LEQ,
                                            bound=0.1,
                                            relative=True)
     self.m3_constraint = OutcomeConstraint(metric=self.metrics["m3"],
                                            op=ComparisonOp.GEQ,
                                            bound=0.1,
                                            relative=True)
Ejemplo n.º 5
0
def make_objectives(objectives: Dict[str, str]) -> List[Objective]:
    try:
        return [
            Objective(
                metric=Metric(name=metric_name, ),
                minimize=(MetricObjective[min_or_max.upper()] ==
                          MetricObjective.MINIMIZE),
            ) for metric_name, min_or_max in objectives.items()
        ]
    except KeyError as k:
        raise ValueError(
            f"Objective values should specify '{MetricObjective.MINIMIZE.name.lower()}'"
            f" or '{MetricObjective.MAXIMIZE.name.lower()}', got {k} in"
            f" objectives({objectives})")
Ejemplo n.º 6
0
Archivo: decoder.py Proyecto: HaysS/Ax
    def metric_from_sqa(
        self, metric_sqa: SQAMetric
    ) -> Union[Metric, Objective, OutcomeConstraint]:
        """Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint."""
        metric_class = REVERSE_METRIC_REGISTRY.get(metric_sqa.metric_type)
        if metric_class is None:
            raise SQADecodeError(
                f"Cannot decode SQAMetric because {metric_sqa.metric_type} "
                f"is an invalid type."
            )

        args = self.get_init_args_from_properties(
            # pyre-fixme[6]: Expected `SQABase` for ...es` but got `SQAMetric`.
            object_sqa=metric_sqa,
            class_=metric_class,
        )
        metric = metric_class(**args)

        if metric_sqa.intent == MetricIntent.TRACKING:
            return metric
        elif metric_sqa.intent == MetricIntent.OBJECTIVE:
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Objective because minimize is None."
                )
            # pyre-fixme[6]: Expected `bool` for 2nd param but got `Optional[bool]`.
            return Objective(metric=metric, minimize=metric_sqa.minimize)
        elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT:
            if (
                metric_sqa.bound is None
                or metric_sqa.op is None
                or metric_sqa.relative is None
            ):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to OutcomeConstraint because "
                    "bound, op, or relative is None."
                )
            return OutcomeConstraint(
                metric=metric,
                # pyre-fixme[6]: Expected `float` for 2nd param but got
                #  `Optional[float]`.
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )
        else:
            raise SQADecodeError(
                f"Cannot decode SQAMetric because {metric_sqa.intent} "
                f"is an invalid intent."
            )
 def setUp(self):
     self.metrics = {"m1": Metric(name="m1"), "m2": Metric(name="m2")}
     self.objective = Objective(metric=self.metrics["m1"], minimize=False)
     self.m2_objective = ScalarizedObjective(
         metrics=[self.metrics["m1"], self.metrics["m2"]])
     self.outcome_constraint = OutcomeConstraint(metric=self.metrics["m2"],
                                                 op=ComparisonOp.GEQ,
                                                 bound=-0.25)
     self.additional_outcome_constraint = OutcomeConstraint(
         metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.25)
     self.outcome_constraints = [
         self.outcome_constraint,
         self.additional_outcome_constraint,
     ]
    def testTransformOptimizationConfig(self):
        m1 = Metric(name="m1")
        m2 = Metric(name="m2")
        m3 = Metric(name="m3")
        objective = Objective(metric=m3, minimize=False)
        cons = [
            OutcomeConstraint(
                metric=m1, op=ComparisonOp.GEQ, bound=2.0, relative=False
            ),
            OutcomeConstraint(
                metric=m2, op=ComparisonOp.LEQ, bound=3.5, relative=False
            ),
            ScalarizedOutcomeConstraint(
                metrics=[m1, m2],
                weights=[0.5, 0.5],
                op=ComparisonOp.LEQ,
                bound=3.5,
                relative=False,
            ),
        ]
        oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
        oc = self.t.transform_optimization_config(oc, None, None)
        cons_t = [
            OutcomeConstraint(
                metric=m1, op=ComparisonOp.GEQ, bound=1.0, relative=False
            ),
            OutcomeConstraint(
                metric=m2,
                op=ComparisonOp.LEQ,
                bound=2.0 * sqrt(3),  # (3.5 - 1.5) / sqrt(1/3)
                relative=False,
            ),
            ScalarizedOutcomeConstraint(
                metrics=[m1, m2],
                weights=[0.5 * 1.0, 0.5 * sqrt(1 / 3)],
                op=ComparisonOp.LEQ,
                bound=2.25,  # 3.5 - (0.5 * 1.0 + 0.5 * 1.5)
                relative=False,
            ),
        ]
        self.assertTrue(oc.outcome_constraints == cons_t)
        self.assertTrue(oc.objective == objective)

        # Check fail with relative
        con = OutcomeConstraint(
            metric=m1, op=ComparisonOp.GEQ, bound=2.0, relative=True
        )
        oc = OptimizationConfig(objective=objective, outcome_constraints=[con])
        with self.assertRaises(ValueError):
            oc = self.t.transform_optimization_config(oc, None, None)
Ejemplo n.º 9
0
    def testTransformOptimizationConfig(self):
        m1 = Metric(name="m1")
        m2 = Metric(name="m2")
        m3 = Metric(name="m3")
        objective = Objective(metric=m3, minimize=False)
        cons = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=2.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=3.5,
                              relative=False),
        ]
        oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
        fixed_features = ObservationFeatures({"z": "a"})
        oc = self.t.transform_optimization_config(oc, None, fixed_features)
        cons_t = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=1.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=-0.5,
                              relative=False),
        ]
        self.assertTrue(oc.outcome_constraints == cons_t)
        self.assertTrue(oc.objective == objective)

        # No constraints
        oc2 = OptimizationConfig(objective=objective)
        oc3 = deepcopy(oc2)
        oc3 = self.t.transform_optimization_config(oc3, None, fixed_features)
        self.assertTrue(oc2 == oc3)

        # Check fail with relative
        con = OutcomeConstraint(metric=m1,
                                op=ComparisonOp.GEQ,
                                bound=2.0,
                                relative=True)
        oc = OptimizationConfig(objective=objective, outcome_constraints=[con])
        with self.assertRaises(ValueError):
            oc = self.t.transform_optimization_config(oc, None, fixed_features)
        # Fail without strat param fixed
        fixed_features = ObservationFeatures({"x": 2.0})
        with self.assertRaises(ValueError):
            oc = self.t.transform_optimization_config(oc, None, fixed_features)
Ejemplo n.º 10
0
 def testTransformOptimizationConfigMOO(self):
     m1 = Metric(name="m1", lower_is_better=False)
     m2 = Metric(name="m2", lower_is_better=True)
     mo = MultiObjective(objectives=[
         Objective(metric=m1, minimize=False),
         Objective(metric=m2, minimize=True),
     ], )
     objective_thresholds = [
         ObjectiveThreshold(metric=m1, bound=1.234, relative=False),
         ObjectiveThreshold(metric=m2, bound=3.456, relative=False),
     ]
     oc = MultiObjectiveOptimizationConfig(
         objective=mo,
         objective_thresholds=objective_thresholds,
     )
     tf = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     oc.objective_thresholds[0].bound = math.log(1.234)
     self.assertEqual(oc_tf, oc)
Ejemplo n.º 11
0
    def _validate_optimization_config(
            objective: Objective,
            outcome_constraints: List[OutcomeConstraint]) -> None:
        """Ensure outcome constraints are valid.

        Either one or two outcome constraints can reference one metric.
        If there are two constraints, they must have different 'ops': one
            LEQ and one GEQ.
        If there are two constraints, the bound of the GEQ op must be less
            than the bound of the LEQ op.

        Args:
            outcome_constraints: Constraints to validate.
        """
        constraint_metrics = [
            constraint.metric.name for constraint in outcome_constraints
        ]
        unconstrainable_metrics = objective.get_unconstrainable_metrics()
        for metric in unconstrainable_metrics:
            if metric.name in constraint_metrics:
                raise ValueError("Cannot constrain on objective metric.")

        def get_metric_name(oc: OutcomeConstraint) -> str:
            return oc.metric.name

        sorted_constraints = sorted(outcome_constraints, key=get_metric_name)
        for metric_name, constraints_itr in groupby(sorted_constraints,
                                                    get_metric_name):
            constraints: List[OutcomeConstraint] = list(constraints_itr)
            constraints_len = len(constraints)
            if constraints_len == 2:
                if constraints[0].op == constraints[1].op:
                    raise ValueError(
                        f"Duplicate outcome constraints {metric_name}")
                lower_bound_idx = 0 if constraints[
                    0].op == ComparisonOp.GEQ else 1
                upper_bound_idx = 1 - lower_bound_idx
                lower_bound = constraints[lower_bound_idx].bound
                upper_bound = constraints[upper_bound_idx].bound
                if lower_bound >= upper_bound:
                    raise ValueError(
                        f"Lower bound {lower_bound} is >= upper bound " +
                        f"{upper_bound} for {metric_name}")
            elif constraints_len > 2:
                raise ValueError(
                    f"Duplicate outcome constraints {metric_name}")
Ejemplo n.º 12
0
 def testGenWithDefaults(self, _, mock_gen):
     exp = get_experiment()
     exp.optimization_config = get_optimization_config()
     ss = search_space_for_range_value()
     modelbridge = ModelBridge(ss, None, [], exp)
     modelbridge.gen(1)
     mock_gen.assert_called_with(
         modelbridge,
         n=1,
         search_space=ss,
         fixed_features=ObservationFeatures(parameters={}),
         model_gen_options=None,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("test_metric"), minimize=False),
             outcome_constraints=[],
         ),
         pending_observations={},
     )
Ejemplo n.º 13
0
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    objective_name: Optional[str] = None,
    minimize: bool = False,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
    experiment_type: Optional[str] = None,
) -> Experiment:
    """Instantiation wrapper that allows for creation of SimpleExperiment
    without importing or instantiating any Ax classes."""

    exp_parameters: List[Parameter] = [
        parameter_from_json(p) for p in parameters
    ]
    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)
    parameter_map = {p.name: p for p in exp_parameters}
    ocs = [outcome_constraint_from_str(c) for c in (outcome_constraints or [])]
    if status_quo_arm is None and any(oc.relative for oc in ocs):
        raise ValueError(
            "Must set status_quo to have relative outcome constraints.")
    return Experiment(
        name=name,
        search_space=SearchSpace(
            parameters=exp_parameters,
            parameter_constraints=None if parameter_constraints is None else [
                constraint_from_str(c, parameter_map)
                for c in parameter_constraints
            ],
        ),
        optimization_config=OptimizationConfig(
            objective=Objective(
                metric=Metric(
                    name=objective_name or DEFAULT_OBJECTIVE_NAME,
                    lower_is_better=minimize,
                ),
                minimize=minimize,
            ),
            outcome_constraints=ocs,
        ),
        status_quo=status_quo_arm,
        experiment_type=experiment_type,
    )
Ejemplo n.º 14
0
 def testErrors(self):
     t = Derelativize(search_space=None,
                      observation_features=None,
                      observation_data=None)
     oc = OptimizationConfig(
         objective=Objective(Metric("c")),
         outcome_constraints=[
             OutcomeConstraint(Metric("a"),
                               ComparisonOp.LEQ,
                               bound=2,
                               relative=True)
         ],
     )
     search_space = SearchSpace(
         parameters=[RangeParameter("x", ParameterType.FLOAT, 0, 20)])
     g = ModelBridge(search_space, None, [])
     with self.assertRaises(ValueError):
         t.transform_optimization_config(oc, None, None)
     with self.assertRaises(ValueError):
         t.transform_optimization_config(oc, g, None)
Ejemplo n.º 15
0
def optimization_config_from_objectives(
    objectives: List[Metric],
    objective_thresholds: List[ObjectiveThreshold],
    outcome_constraints: List[OutcomeConstraint],
) -> OptimizationConfig:
    """Parse objectives and constraints to define optimization config.

    The resulting optimization config will be regular single-objective config
    if `objectives` is a list of one element and a multi-objective config
    otherwise.

    NOTE: If passing in multiple objectives, `objective_thresholds` must be a
    non-empty list definining constraints for each objective.
    """
    if len(objectives) == 1:
        if objective_thresholds:
            raise ValueError(
                "Single-objective optimizations must not specify objective thresholds."
            )
        return OptimizationConfig(
            objective=Objective(
                metric=objectives[0],
            ),
            outcome_constraints=outcome_constraints,
        )
    else:
        objective_names = {m.name for m in objectives}
        threshold_names = {oc.metric.name for oc in objective_thresholds}
        if objective_names != threshold_names:
            diff = objective_names.symmetric_difference(threshold_names)
            raise ValueError(
                "Multi-objective optimization requires one objective threshold "
                f"per objective metric; unmatched names are {diff}"
            )

        return MultiObjectiveOptimizationConfig(
            objective=MultiObjective(metrics=objectives),
            outcome_constraints=outcome_constraints,
            objective_thresholds=objective_thresholds,
        )
Ejemplo n.º 16
0
 def testTransformOptimizationConfig(self):
     # basic test
     m1 = Metric(name="m1")
     objective = Objective(metric=m1, minimize=False)
     oc = OptimizationConfig(objective=objective, outcome_constraints=[])
     tf = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(oc, None, None)
     self.assertTrue(oc_tf == oc)
     # test error if transformed metric appears in outcome constraints
     m2 = Metric(name="m2")
     cons = [
         OutcomeConstraint(metric=m2,
                           op=ComparisonOp.GEQ,
                           bound=0.0,
                           relative=False)
     ]
     oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
     oc_tf = tf.transform_optimization_config(oc, None, None)
     self.assertTrue(oc_tf == oc)
     m2 = Metric(name="m2")
     cons = [
         OutcomeConstraint(metric=m2,
                           op=ComparisonOp.GEQ,
                           bound=0.0,
                           relative=False)
     ]
     oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
     tf2 = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m2"]},
     )
     with self.assertRaises(ValueError):
         tf2.transform_optimization_config(oc, None, None)
Ejemplo n.º 17
0
def get_branin_experiment_with_timestamp_map_metric(
    rate: Optional[float] = None,
    incremental: Optional[bool] = False,
):
    metric_cls = (
        BraninTimestampMapMetric
        if not incremental
        else BraninIncrementalTimestampMapMetric
    )
    return Experiment(
        name="branin_with_timestamp_map_metric",
        search_space=get_branin_search_space(),
        optimization_config=OptimizationConfig(
            objective=Objective(
                metric=metric_cls(name="branin", param_names=["x1", "x2"], rate=rate),
                minimize=True,
            )
        ),
        tracking_metrics=[metric_cls(name="b", param_names=["x1", "x2"])],
        runner=SyntheticRunner(),
        default_data_type=DataType.MAP_DATA,
    )
Ejemplo n.º 18
0
    def _validate_optimization_config(
        objective: Objective,
        outcome_constraints: Optional[List[OutcomeConstraint]] = None,
        objective_thresholds: Optional[List[ObjectiveThreshold]] = None,
    ) -> None:
        """Ensure outcome constraints are valid.

        Either one or two outcome constraints can reference one metric.
        If there are two constraints, they must have different 'ops': one
            LEQ and one GEQ.
        If there are two constraints, the bound of the GEQ op must be less
            than the bound of the LEQ op.

        Args:
            outcome_constraints: Constraints to validate.
        """
        if not isinstance(objective, (MultiObjective, ScalarizedObjective)):
            raise TypeError(
                ("`MultiObjectiveOptimizationConfig` requires an objective "
                 "of type `MultiObjective` or `ScalarizedObjective`. "
                 "Use `OptimizationConfig` instead if using a "
                 "single-metric objective."))
        outcome_constraints = outcome_constraints or []
        objective_thresholds = objective_thresholds or []
        if isinstance(objective, MultiObjective):
            objectives_by_name = {
                obj.metric.name: obj
                for obj in objective.objectives
            }
            check_objective_thresholds_match_objectives(
                objectives_by_name=objectives_by_name,
                objective_thresholds=objective_thresholds,
            )

        unconstrainable_metrics = objective.get_unconstrainable_metrics()
        OptimizationConfig._validate_outcome_constraints(
            unconstrainable_metrics=unconstrainable_metrics,
            outcome_constraints=outcome_constraints,
        )
Ejemplo n.º 19
0
 def testRelativeConstraint(self):
     branin_rel = BenchmarkProblem(
         name="constrained_branin",
         fbest=0.397887,
         optimization_config=OptimizationConfig(
             objective=Objective(
                 metric=BraninMetric(name="branin_objective",
                                     param_names=["x1", "x2"],
                                     noise_sd=5.0),
                 minimize=True,
             ),
             outcome_constraints=[
                 OutcomeConstraint(
                     metric=L2NormMetric(
                         name="branin_constraint",
                         param_names=["x1", "x2"],
                         noise_sd=5.0,
                     ),
                     op=ComparisonOp.LEQ,
                     bound=5.0,
                     relative=True,
                 )
             ],
         ),
         search_space=get_branin_search_space(),
     )
     suite = BOBenchmarkingSuite()
     suite.run(
         num_runs=1,
         total_iterations=5,
         bo_strategies=[
             GenerationStrategy(
                 [GenerationStep(model=Models.SOBOL, num_arms=5)])
         ],
         bo_problems=[branin_rel],
     )
     with self.assertRaises(ValueError):
         suite.generate_report()
Ejemplo n.º 20
0
 def testLowerBound(self):
     branin_lb = BenchmarkProblem(
         name="constrained_branin",
         fbest=0.397887,
         optimization_config=OptimizationConfig(
             objective=Objective(
                 metric=BraninMetric(name="branin_objective",
                                     param_names=["x1", "x2"],
                                     noise_sd=5.0),
                 minimize=True,
             ),
             outcome_constraints=[
                 OutcomeConstraint(
                     metric=L2NormMetric(
                         name="branin_constraint",
                         param_names=["x1", "x2"],
                         noise_sd=5.0,
                     ),
                     op=ComparisonOp.GEQ,
                     bound=5.0,
                     relative=False,
                 )
             ],
         ),
         search_space=get_branin_search_space(),
     )
     suite = BOBenchmarkingSuite()
     suite.run(
         num_runs=1,
         batch_size=2,
         total_iterations=4,
         bo_strategies=[
             GenerationStrategy(
                 [GenerationStep(model=Models.SOBOL, num_arms=5)])
         ],
         bo_problems=[branin_lb],
     )
     suite.generate_report(include_individual=True)
Ejemplo n.º 21
0
    def testTransformOptimizationConfig(self):
        m1 = Metric(name="m1")
        m2 = Metric(name="m2")
        m3 = Metric(name="m3")
        objective = Objective(metric=m3, minimize=False)
        cons = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=2.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=3.5,
                              relative=False),
        ]
        oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
        oc = self.t.transform_optimization_config(oc, None, None)
        cons_t = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=1.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=4.0,
                              relative=False),
        ]
        self.assertTrue(oc.outcome_constraints == cons_t)
        self.assertTrue(oc.objective == objective)

        # Check fail with relative
        con = OutcomeConstraint(metric=m1,
                                op=ComparisonOp.GEQ,
                                bound=2.0,
                                relative=True)
        oc = OptimizationConfig(objective=objective, outcome_constraints=[con])
        with self.assertRaises(ValueError):
            oc = self.t.transform_optimization_config(oc, None, None)
Ejemplo n.º 22
0
def get_branin_experiment_with_timestamp_map_metric(
    with_status_quo: bool = False,
    rate: Optional[float] = None,
) -> Experiment:
    exp = Experiment(
        name="branin_with_timestamp_map_metric",
        search_space=get_branin_search_space(),
        optimization_config=OptimizationConfig(objective=Objective(
            metric=BraninTimestampMapMetric(
                name="branin_map", param_names=["x1", "x2"], rate=rate),
            minimize=True,
        )),
        tracking_metrics=[
            BraninMetric(name="branin", param_names=["x1", "x2"])
        ],
        runner=SyntheticRunner(),
        default_data_type=DataType.MAP_DATA,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    return exp
Ejemplo n.º 23
0
 def test_best_point(
     self,
     _mock_gen,
     _mock_best_point,
     _mock_fit,
     _mock_predict,
     _mock_gen_arms,
     _mock_unwrap,
     _mock_obs_from_data,
 ):
     exp = Experiment(search_space=get_search_space_for_range_value(),
                      name="test")
     modelbridge = ArrayModelBridge(
         search_space=get_search_space_for_range_value(),
         model=NumpyModel(),
         transforms=[t1, t2],
         experiment=exp,
         data=Data(),
     )
     self.assertEqual(list(modelbridge.transforms.keys()),
                      ["Cast", "t1", "t2"])
     # _fit is mocked, which typically sets this.
     modelbridge.outcomes = ["a"]
     run = modelbridge.gen(
         n=1,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("a"), minimize=False),
             outcome_constraints=[],
         ),
     )
     arm, predictions = run.best_arm_predictions
     self.assertEqual(arm.parameters, {})
     self.assertEqual(predictions[0], {"m": 1.0})
     self.assertEqual(predictions[1], {"m": {"m": 2.0}})
     # test check that optimization config is required
     with self.assertRaises(ValueError):
         run = modelbridge.gen(n=1, optimization_config=None)
Ejemplo n.º 24
0
def get_factorial_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_status_quo: bool = False,
) -> Experiment:
    exp = Experiment(
        name="factorial_test_experiment",
        search_space=get_factorial_search_space(),
        optimization_config=OptimizationConfig(
            objective=Objective(metric=get_factorial_metric())
        )
        if has_optimization_config
        else None,
        runner=SyntheticRunner(),
        is_test=True,
        # pyre-fixme[6]: Expected `typing.Option...`List[FactorialMetric]`.
        tracking_metrics=[get_factorial_metric("secondary_metric")],
    )

    if with_status_quo:
        exp.status_quo = Arm(
            parameters={
                "factor1": "level11",
                "factor2": "level21",
                "factor3": "level31",
            }
        )

    if with_batch:
        factorial_generator = get_factorial(search_space=exp.search_space)
        factorial_run = factorial_generator.gen(n=-1)
        exp.new_batch_trial(optimize_for_power=with_status_quo).add_generator_run(
            factorial_run
        )

    return exp
Ejemplo n.º 25
0
 def __init__(
     self,
     search_space: SearchSpace,
     name: Optional[str] = None,
     objective_name: Optional[str] = None,
     evaluation_function: TEvaluationFunction = unimplemented_evaluation_function,
     minimize: bool = False,
     outcome_constraints: Optional[List[OutcomeConstraint]] = None,
     status_quo: Optional[Arm] = None,
 ) -> None:
     optimization_config = OptimizationConfig(
         objective=Objective(
             metric=Metric(name=objective_name or DEFAULT_OBJECTIVE_NAME),
             minimize=minimize,
         ),
         outcome_constraints=outcome_constraints,
     )
     super().__init__(
         name=name,
         search_space=search_space,
         optimization_config=optimization_config,
         status_quo=status_quo,
     )
     self._evaluation_function = evaluation_function
Ejemplo n.º 26
0
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    objective_name: Optional[str] = None,
    minimize: bool = False,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
) -> Experiment:
    """Instantiation wrapper that allows for creation of SimpleExperiment without
    importing or instantiating any Ax classes."""

    exp_parameters: List[Parameter] = [
        parameter_from_json(p) for p in parameters
    ]
    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)
    parameter_map = {p.name: p for p in exp_parameters}
    return Experiment(
        name=name,
        search_space=SearchSpace(
            parameters=exp_parameters,
            parameter_constraints=None if parameter_constraints is None else [
                constraint_from_str(c, parameter_map)
                for c in parameter_constraints
            ],
        ),
        optimization_config=OptimizationConfig(
            objective=Objective(
                metric=Metric(name=objective_name or DEFAULT_OBJECTIVE_NAME),
                minimize=minimize,
            ),
            outcome_constraints=None if outcome_constraints is None else
            [outcome_constraint_from_str(c) for c in outcome_constraints],
        ),
        status_quo=status_quo_arm,
    )
Ejemplo n.º 27
0
    def testModelBridge(self, mock_fit, mock_gen_arms,
                        mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [transform_1, transform_2]
        exp = get_experiment_for_value()
        ss = get_search_space_for_value()
        modelbridge = ModelBridge(ss, 0, transforms, exp, 0)
        self.assertEqual(list(modelbridge.transforms.keys()),
                         ["transform_1", "transform_2"])
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(
            fit_args["search_space"] == get_search_space_for_value(8.0))
        self.assertTrue(fit_args["observation_features"] == [])
        self.assertTrue(fit_args["observation_data"] == [])
        self.assertTrue(mock_observations_from_data.called)

        # Test prediction on out of design features.
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            side_effect=ValueError("Out of Design"),
        )
        # This point is in design, and thus failures in predict are legitimate.
        with mock.patch.object(ModelBridge,
                               "model_space",
                               return_value=get_search_space_for_range_values):
            with self.assertRaises(ValueError):
                modelbridge.predict([get_observation2().features])

        # This point is out of design, and not in training data.
        with self.assertRaises(ValueError):
            modelbridge.predict([get_observation_status_quo0().features])

        # Now it's in the training data.
        with mock.patch.object(
                ModelBridge,
                "get_training_data",
                return_value=[get_observation_status_quo0()],
        ):
            # Return raw training value.
            self.assertEqual(
                modelbridge.predict([get_observation_status_quo0().features]),
                unwrap_observation_data([get_observation_status_quo0().data]),
            )

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[get_observation2trans().data],
        )
        modelbridge.predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Check that _single_predict is equivalent here.
        modelbridge._single_predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([get_observation1trans().features], [2], None, {}),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(
            name="test_metric")))
        modelbridge._set_kwargs_to_save(model_key="TestModel",
                                        model_kwargs={},
                                        bridge_kwargs={})
        gr = modelbridge.gen(
            n=1,
            search_space=get_search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [get_observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        self.assertEqual(gr._model_key, "TestModel")
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [get_observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={},
            observation_features=[get_observation1().features])

        # Gen with no pending observations and no fixed features
        modelbridge.gen(n=1,
                        search_space=get_search_space_for_value(),
                        optimization_config=None)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="test_metric"),
                     Metric(name="test_metric_2")]))
        modelbridge.gen(n=1,
                        search_space=get_search_space_for_value(),
                        optimization_config=oc2)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[get_observation1trans().data],
        )
        cv_training_data = [get_observation2()]
        cv_test_points = [get_observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points)
        modelbridge._cross_validate.assert_called_with(
            obs_feats=[get_observation2trans().features],
            obs_data=[get_observation2trans().data],
            cv_test_points=[get_observation1().features
                            ],  # untransformed after
        )
        self.assertTrue(cv_predictions == [get_observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [get_observation1(), get_observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(
            modelbridge.model_space == get_search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [False, False])

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        # Test feature_importances
        with self.assertRaises(NotImplementedError):
            modelbridge.feature_importances("a")
Ejemplo n.º 28
0
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
    experiment_type: Optional[str] = None,
    tracking_metric_names: Optional[List[str]] = None,
    # Single-objective optimization arguments:
    objective_name: Optional[str] = None,
    minimize: bool = False,
    # Multi-objective optimization arguments:
    objectives: Optional[Dict[str, str]] = None,
    objective_thresholds: Optional[List[str]] = None,
    support_intermediate_data: bool = False,
    immutable_search_space_and_opt_config: bool = True,
    is_test: bool = False,
) -> Experiment:
    """Instantiation wrapper that allows for Ax `Experiment` creation
    without importing or instantiating any Ax classes.

    Args:
        parameters: List of dictionaries representing parameters in the
            experiment search space.
            Required elements in the dictionaries are:
            1. "name" (name of parameter, string),
            2. "type" (type of parameter: "range", "fixed", or "choice", string),
            and one of the following:
            3a. "bounds" for range parameters (list of two values, lower bound
            first),
            3b. "values" for choice parameters (list of values), or
            3c. "value" for fixed parameters (single value).
            Optional elements are:
            1. "log_scale" (for float-valued range parameters, bool),
            2. "value_type" (to specify type that values of this parameter should
            take; expects "float", "int", "bool" or "str"),
            3. "is_fidelity" (bool) and "target_value" (float) for fidelity
            parameters,
            4. "is_ordered" (bool) for choice parameters,
            5. "is_task" (bool) for task parameters, and
            6. "digits" (int) for float-valued range parameters.
        name: Name of the experiment to be created.
        parameter_constraints: List of string representation of parameter
            constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
            the latter constraints, any number of arguments is accepted, and
            acceptable operators are "<=" and ">=".
        parameter_constraints: List of string representation of parameter
                constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
                the latter constraints, any number of arguments is accepted, and
                acceptable operators are "<=" and ">=".
        outcome_constraints: List of string representation of outcome
            constraints of form "metric_name >= bound", like "m1 <= 3."
        status_quo: Parameterization of the current state of the system.
            If set, this will be added to each trial to be evaluated alongside
            test configurations.
        experiment_type: String indicating type of the experiment (e.g. name of
            a product in which it is used), if any.
        tracking_metric_names: Names of additional tracking metrics not used for
            optimization.
        objective_name: Name of the metric used as objective in this experiment,
            if experiment is single-objective optimization.
        minimize: Whether this experiment represents a minimization problem, if
            experiment is a single-objective optimization.
        objectives: Mapping from an objective name to "minimize" or "maximize"
            representing the direction for that objective. Used only for
            multi-objective optimization experiments.
        objective_thresholds: A list of objective threshold constraints for multi-
            objective optimization, in the same string format as `outcome_constraints`
            argument.
        support_intermediate_data: Whether trials may report metrics results for
            incomplete runs.
        immutable_search_space_and_opt_config: Whether it's possible to update the
            search space and optimization config on this experiment after creation.
            Defaults to True. If set to True, we won't store or load copies of the
            search space and optimization config on each generator run, which will
            improve storage performance.
        is_test: Whether this experiment will be a test experiment (useful for
            marking test experiments in storage etc). Defaults to False.
    """
    if objective_name is not None and (
        objectives is not None or objective_thresholds is not None
    ):
        raise UnsupportedError(
            "Ambiguous objective definition: for single-objective optimization "
            "`objective_name` and `minimize` arguments expected. For multi-objective "
            "optimization `objectives` and `objective_thresholds` arguments expected."
        )

    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)

    # TODO(jej): Needs to be decided per-metric when supporting heterogenous data.
    metric_cls = MapMetric if support_intermediate_data else Metric
    if objectives is None:
        optimization_config = OptimizationConfig(
            objective=Objective(
                metric=metric_cls(
                    name=objective_name or DEFAULT_OBJECTIVE_NAME,
                    lower_is_better=minimize,
                ),
                minimize=minimize,
            ),
            outcome_constraints=make_outcome_constraints(
                outcome_constraints or [], status_quo_arm is not None
            ),
        )
    else:
        optimization_config = make_optimization_config(
            objectives,
            objective_thresholds or [],
            outcome_constraints or [],
            status_quo_arm is not None,
        )

    tracking_metrics = (
        None
        if tracking_metric_names is None
        else [Metric(name=metric_name) for metric_name in tracking_metric_names]
    )

    default_data_type = (
        DataType.MAP_DATA if support_intermediate_data else DataType.DATA
    )

    immutable_ss_and_oc = immutable_search_space_and_opt_config
    properties = (
        {}
        if not immutable_search_space_and_opt_config
        else {Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF.value: immutable_ss_and_oc}
    )

    return Experiment(
        name=name,
        search_space=make_search_space(parameters, parameter_constraints or []),
        optimization_config=optimization_config,
        status_quo=status_quo_arm,
        experiment_type=experiment_type,
        tracking_metrics=tracking_metrics,
        default_data_type=default_data_type,
        properties=properties,
        is_test=is_test,
    )
Ejemplo n.º 29
0
def get_optimization_config_no_constraints() -> OptimizationConfig:
    return OptimizationConfig(objective=Objective(metric=Metric("test_metric")))
Ejemplo n.º 30
0
def get_augmented_hartmann_objective() -> Objective:
    return Objective(metric=get_augmented_hartmann_metric(), minimize=False)