コード例 #1
0
    def test_best_point(
        self,
        _mock_gen,
        _mock_best_point,
        _mock_fit,
        _mock_predict,
        _mock_gen_arms,
        _mock_unwrap,
        _mock_obs_from_data,
    ):
        exp = Experiment(search_space=get_search_space_for_range_value(),
                         name="test")
        modelbridge = ArrayModelBridge(
            search_space=get_search_space_for_range_value(),
            model=NumpyModel(),
            transforms=[t1, t2],
            experiment=exp,
            data=Data(),
        )
        self.assertEqual(list(modelbridge.transforms.keys()),
                         ["Cast", "t1", "t2"])
        # _fit is mocked, which typically sets this.
        modelbridge.outcomes = ["a"]
        run = modelbridge.gen(
            n=1,
            optimization_config=OptimizationConfig(
                objective=Objective(metric=Metric("a"), minimize=False),
                outcome_constraints=[],
            ),
        )
        arm, predictions = run.best_arm_predictions
        self.assertEqual(arm.parameters, {})
        self.assertEqual(predictions[0], {"m": 1.0})
        self.assertEqual(predictions[1], {"m": {"m": 2.0}})
        # test check that optimization config is required
        with self.assertRaises(ValueError):
            run = modelbridge.gen(n=1, optimization_config=None)

        # test optimization config validation - raise error when
        # ScalarizedOutcomeConstraint contains a metric that is not in the outcomes
        with self.assertRaises(ValueError):
            run = modelbridge.gen(
                n=1,
                optimization_config=OptimizationConfig(
                    objective=Objective(metric=Metric("a"), minimize=False),
                    outcome_constraints=[
                        ScalarizedOutcomeConstraint(
                            metrics=[Metric("wrong_metric_name")],
                            weights=[1.0],
                            op=ComparisonOp.LEQ,
                            bound=0,
                        )
                    ],
                ),
            )
コード例 #2
0
    def testTransformOptimizationConfig(self):
        m1 = Metric(name="m1")
        m2 = Metric(name="m2")
        m3 = Metric(name="m3")
        objective = Objective(metric=m3, minimize=False)
        cons = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=2.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=3.5,
                              relative=False),
            ScalarizedOutcomeConstraint(
                metrics=[m1, m2],
                weights=[0.5, 0.5],
                op=ComparisonOp.LEQ,
                bound=3.5,
                relative=False,
            ),
        ]
        oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
        oc = self.t.transform_optimization_config(oc, None, None)
        cons_t = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=1.0,
                              relative=False),
            OutcomeConstraint(
                metric=m2,
                op=ComparisonOp.LEQ,
                bound=2.0 * sqrt(3),  # (3.5 - 1.5) / sqrt(1/3)
                relative=False,
            ),
            ScalarizedOutcomeConstraint(
                metrics=[m1, m2],
                weights=[0.5 * 1.0, 0.5 * sqrt(1 / 3)],
                op=ComparisonOp.LEQ,
                bound=2.25,  # 3.5 - (0.5 * 1.0 + 0.5 * 1.5)
                relative=False,
            ),
        ]
        self.assertTrue(oc.outcome_constraints == cons_t)
        self.assertTrue(oc.objective == objective)

        # Check fail with relative
        con = OutcomeConstraint(metric=m1,
                                op=ComparisonOp.GEQ,
                                bound=2.0,
                                relative=True)
        oc = OptimizationConfig(objective=objective, outcome_constraints=[con])
        with self.assertRaises(ValueError):
            oc = self.t.transform_optimization_config(oc, None, None)
コード例 #3
0
    def testTransformOptimizationConfig(self):
        m1 = Metric(name="m1")
        m2 = Metric(name="m2")
        m3 = Metric(name="m3")
        objective = Objective(metric=m3, minimize=False)
        cons = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=2.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=3.5,
                              relative=False),
        ]
        oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
        fixed_features = ObservationFeatures({"z": "a"})
        oc = self.t.transform_optimization_config(oc, None, fixed_features)
        cons_t = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=1.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=-0.5,
                              relative=False),
        ]
        self.assertTrue(oc.outcome_constraints == cons_t)
        self.assertTrue(oc.objective == objective)

        # No constraints
        oc2 = OptimizationConfig(objective=objective)
        oc3 = deepcopy(oc2)
        oc3 = self.t.transform_optimization_config(oc3, None, fixed_features)
        self.assertTrue(oc2 == oc3)

        # Check fail with relative
        con = OutcomeConstraint(metric=m1,
                                op=ComparisonOp.GEQ,
                                bound=2.0,
                                relative=True)
        oc = OptimizationConfig(objective=objective, outcome_constraints=[con])
        with self.assertRaises(ValueError):
            oc = self.t.transform_optimization_config(oc, None, fixed_features)
        # Fail without strat param fixed
        fixed_features = ObservationFeatures({"x": 2.0})
        with self.assertRaises(ValueError):
            oc = self.t.transform_optimization_config(oc, None, fixed_features)
コード例 #4
0
    def opt_config_and_tracking_metrics_from_sqa(
        self, metrics_sqa: List[SQAMetric]
    ) -> Tuple[Optional[OptimizationConfig], List[Metric]]:
        """Convert a list of SQLAlchemy Metrics to a a tuple of Ax OptimizationConfig
        and tracking metrics.
        """
        objective = None
        objective_thresholds = []
        outcome_constraints = []
        tracking_metrics = []
        for metric_sqa in metrics_sqa:
            metric = self.metric_from_sqa(metric_sqa=metric_sqa)
            if isinstance(metric, Objective):
                objective = metric
            elif isinstance(metric, ObjectiveThreshold):
                objective_thresholds.append(metric)
            elif isinstance(metric, OutcomeConstraint):
                outcome_constraints.append(metric)
            else:
                tracking_metrics.append(metric)

        if objective is None:
            return None, tracking_metrics

        if objective_thresholds or type(objective) == MultiObjective:
            optimization_config = MultiObjectiveOptimizationConfig(
                objective=objective,
                outcome_constraints=outcome_constraints,
                objective_thresholds=objective_thresholds,
            )
        else:
            optimization_config = OptimizationConfig(
                objective=objective, outcome_constraints=outcome_constraints)
        return (optimization_config, tracking_metrics)
コード例 #5
0
 def test_best_point(
     self,
     _mock_gen,
     _mock_best_point,
     _mock_fit,
     _mock_predict,
     _mock_gen_arms,
     _mock_unwrap,
     _mock_obs_from_data,
 ):
     exp = Experiment(get_search_space_for_range_value(), "test")
     modelbridge = ArrayModelBridge(get_search_space_for_range_value(),
                                    NumpyModel(), [t1, t2], exp, 0)
     self.assertEqual(list(modelbridge.transforms.keys()),
                      ["Cast", "t1", "t2"])
     # _fit is mocked, which typically sets this.
     modelbridge.outcomes = ["a"]
     run = modelbridge.gen(
         n=1,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("a"), minimize=False),
             outcome_constraints=[],
         ),
     )
     arm, predictions = run.best_arm_predictions
     self.assertEqual(arm.parameters, {})
     self.assertEqual(predictions[0], {"m": 1.0})
     self.assertEqual(predictions[1], {"m": {"m": 2.0}})
     # test check that optimization config is required
     with self.assertRaises(ValueError):
         run = modelbridge.gen(n=1, optimization_config=None)
コード例 #6
0
ファイル: simple_experiment.py プロジェクト: proteanblank/Ax
 def __init__(
     self,
     search_space: SearchSpace,
     name: Optional[str] = None,
     objective_name: Optional[str] = None,
     evaluation_function: TEvaluationFunction = unimplemented_evaluation_function,
     minimize: bool = False,
     outcome_constraints: Optional[List[OutcomeConstraint]] = None,
     status_quo: Optional[Arm] = None,
     properties: Optional[Dict[str, Any]] = None,
     default_data_type: Optional[DataType] = None,
 ) -> None:
     warnings.warn(
         "`SimpleExperiment` is deprecated.  Use `Experiment` instead.",
         DeprecationWarning,
     )
     optimization_config = OptimizationConfig(
         objective=Objective(
             metric=Metric(name=objective_name or DEFAULT_OBJECTIVE_NAME),
             minimize=minimize,
         ),
         outcome_constraints=outcome_constraints,
     )
     super().__init__(
         name=name,
         search_space=search_space,
         optimization_config=optimization_config,
         status_quo=status_quo,
         properties=properties,
         default_data_type=default_data_type,
     )
     self._evaluation_function = evaluation_function
コード例 #7
0
ファイル: instantiation.py プロジェクト: yashpatel5400/Ax
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    objective_name: Optional[str] = None,
    minimize: bool = False,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
) -> Experiment:
    """Instantiation wrapper that allows for creation of SimpleExperiment without
    importing or instantiating any Ax classes."""

    exp_parameters: List[Parameter] = [parameter_from_json(p) for p in parameters]
    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)
    parameter_map = {p.name: p for p in exp_parameters}
    ocs = [outcome_constraint_from_str(c) for c in (outcome_constraints or [])]
    if status_quo_arm is None and any(oc.relative for oc in ocs):
        raise ValueError("Must set status_quo to have relative outcome constraints.")
    return Experiment(
        name=name,
        search_space=SearchSpace(
            parameters=exp_parameters,
            parameter_constraints=None
            if parameter_constraints is None
            else [constraint_from_str(c, parameter_map) for c in parameter_constraints],
        ),
        optimization_config=OptimizationConfig(
            objective=Objective(
                metric=Metric(name=objective_name or DEFAULT_OBJECTIVE_NAME),
                minimize=minimize,
            ),
            outcome_constraints=ocs,
        ),
        status_quo=status_quo_arm,
    )
コード例 #8
0
def optimization_config_from_objectives(
    objectives: List[Objective],
    objective_thresholds: List[ObjectiveThreshold],
    outcome_constraints: List[OutcomeConstraint],
) -> OptimizationConfig:
    """Parse objectives and constraints to define optimization config.

    The resulting optimization config will be regular single-objective config
    if `objectives` is a list of one element and a multi-objective config
    otherwise.

    NOTE: If passing in multiple objectives, `objective_thresholds` must be a
    non-empty list definining constraints for each objective.
    """
    if len(objectives) == 1:
        if objective_thresholds:
            raise ValueError(
                "Single-objective optimizations must not specify objective thresholds."
            )
        return OptimizationConfig(
            objective=objectives[0],
            outcome_constraints=outcome_constraints,
        )

    if not objective_thresholds:
        logger.info(
            "Due to non-specification, we will use the heuristic for selecting "
            "objective thresholds.")

    return MultiObjectiveOptimizationConfig(
        objective=MultiObjective(objectives=objectives),
        outcome_constraints=outcome_constraints,
        objective_thresholds=objective_thresholds,
    )
コード例 #9
0
ファイル: test_array_modelbridge.py プロジェクト: jshuadvd/Ax
 def test_best_point(
     self,
     _mock_gen,
     _mock_best_point,
     _mock_fit,
     _mock_predict,
     _mock_gen_arms,
     _mock_unwrap,
     _mock_obs_from_data,
 ):
     exp = Experiment(get_search_space_for_range_value(), "test")
     modelbridge = ArrayModelBridge(get_search_space_for_range_value(),
                                    NumpyModel(), [t1, t2], exp, 0)
     self.assertEqual(list(modelbridge.transforms.keys()), ["t1", "t2"])
     run = modelbridge.gen(
         n=1,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("a"), minimize=False),
             outcome_constraints=[],
         ),
     )
     arm, predictions = run.best_arm_predictions
     self.assertEqual(arm.parameters, {})
     self.assertEqual(predictions[0], {"m": 1.0})
     self.assertEqual(predictions[1], {"m": {"m": 2.0}})
コード例 #10
0
 def setUp(self):
     self.branin_experiment = get_branin_experiment()
     self.branin_experiment._properties[
         Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF] = True
     self.branin_experiment_no_impl_metrics = Experiment(
         search_space=get_branin_search_space(),
         optimization_config=OptimizationConfig(objective=Objective(
             metric=Metric(name="branin"))),
     )
     self.sobol_GPEI_GS = choose_generation_strategy(
         search_space=get_branin_search_space())
     self.two_sobol_steps_GS = GenerationStrategy(  # Contrived GS to ensure
         steps=[  # that `DataRequiredError` is property handled in scheduler.
             GenerationStep(  # This error is raised when not enough trials
                 model=Models.
                 SOBOL,  # have been observed to proceed to next
                 num_trials=5,  # geneneration step.
                 min_trials_observed=3,
                 max_parallelism=2,
             ),
             GenerationStep(model=Models.SOBOL,
                            num_trials=-1,
                            max_parallelism=3),
         ])
     # GS to force the scheduler to poll completed trials after each ran trial.
     self.sobol_GS_no_parallelism = GenerationStrategy(steps=[
         GenerationStep(
             model=Models.SOBOL, num_trials=-1, max_parallelism=1)
     ])
コード例 #11
0
ファイル: instantiation.py プロジェクト: zorrock/Ax
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    objective_name: Optional[str] = None,
    minimize: bool = False,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
) -> Experiment:
    """Instantiation wrapper that allows for creation of SimpleExperiment without
    importing or instantiating any Ax classes."""

    exp_parameters: List[Parameter] = [
        parameter_from_json(p) for p in parameters
    ]
    parameter_map = {p.name: p for p in exp_parameters}
    return Experiment(
        name=name,
        search_space=SearchSpace(
            parameters=exp_parameters,
            parameter_constraints=None if parameter_constraints is None else [
                constraint_from_str(c, parameter_map)
                for c in parameter_constraints
            ],
        ),
        optimization_config=OptimizationConfig(
            objective=Objective(
                metric=Metric(name=objective_name or DEFAULT_OBJECTIVE_NAME),
                minimize=minimize,
            ),
            outcome_constraints=None if outcome_constraints is None else
            [outcome_constraint_from_str(c) for c in outcome_constraints],
        ),
    )
コード例 #12
0
 def __init__(
     self,
     search_space: SearchSpace,
     name: Optional[str] = None,
     objective_name: Optional[str] = None,
     evaluation_function:
     TEvaluationFunction = unimplemented_evaluation_function,
     minimize: bool = False,
     outcome_constraints: Optional[List[OutcomeConstraint]] = None,
     status_quo: Optional[Arm] = None,
     properties: Optional[Dict[str, Any]] = None,
 ) -> None:
     optimization_config = OptimizationConfig(
         objective=Objective(
             metric=Metric(name=objective_name or DEFAULT_OBJECTIVE_NAME),
             minimize=minimize,
         ),
         outcome_constraints=outcome_constraints,
     )
     super().__init__(
         name=name,
         search_space=search_space,
         optimization_config=optimization_config,
         status_quo=status_quo,
         properties=properties,
     )
     self._evaluation_function = evaluation_function
コード例 #13
0
ファイル: core_stubs.py プロジェクト: tangzhenyu/ax
def get_multi_type_experiment(
    add_trial_type: bool = True, add_trials: bool = False
) -> MultiTypeExperiment:
    oc = OptimizationConfig(Objective(BraninMetric("m1", ["x1", "x2"])))
    experiment = MultiTypeExperiment(
        name="test_exp",
        search_space=get_branin_search_space(),
        default_trial_type="type1",
        default_runner=SyntheticRunner(dummy_metadata="dummy1"),
        optimization_config=oc,
    )
    experiment.add_trial_type(
        trial_type="type2", runner=SyntheticRunner(dummy_metadata="dummy2")
    )
    # Switch the order of variables so metric gives different results
    experiment.add_tracking_metric(
        BraninMetric("m2", ["x2", "x1"]), trial_type="type2", canonical_name="m1"
    )

    if add_trials and add_trial_type:
        generator = get_sobol(experiment.search_space)
        gr = generator.gen(10)
        t1 = experiment.new_batch_trial(generator_run=gr, trial_type="type1")
        t2 = experiment.new_batch_trial(generator_run=gr, trial_type="type2")
        t1.set_status_quo_with_weight(status_quo=t1.arms[0], weight=0.5)
        t2.set_status_quo_with_weight(status_quo=t2.arms[0], weight=0.5)
        t1.run()
        t2.run()

    return experiment
コード例 #14
0
ファイル: core_stubs.py プロジェクト: tangzhenyu/ax
def get_factorial_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_status_quo: bool = False,
) -> Experiment:
    exp = Experiment(
        name="factorial_test_experiment",
        search_space=get_factorial_search_space(),
        optimization_config=OptimizationConfig(
            objective=Objective(metric=get_factorial_metric())
        )
        if has_optimization_config
        else None,
        runner=SyntheticRunner(),
        is_test=True,
        tracking_metrics=[get_factorial_metric("secondary_metric")],
    )

    if with_status_quo:
        exp.status_quo = Arm(
            parameters={
                "factor1": "level11",
                "factor2": "level21",
                "factor3": "level31",
            }
        )

    if with_batch:
        factorial_generator = get_factorial(search_space=exp.search_space)
        factorial_run = factorial_generator.gen(n=-1)
        exp.new_batch_trial(optimize_for_power=with_status_quo).add_generator_run(
            factorial_run
        )

    return exp
コード例 #15
0
    def testEq(self):
        config1 = OptimizationConfig(
            objective=self.objective, outcome_constraints=self.outcome_constraints
        )
        config2 = OptimizationConfig(
            objective=self.objective, outcome_constraints=self.outcome_constraints
        )
        self.assertEqual(config1, config2)

        new_outcome_constraint = OutcomeConstraint(
            metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.5
        )
        config3 = OptimizationConfig(
            objective=self.objective,
            outcome_constraints=[self.outcome_constraint, new_outcome_constraint],
        )
        self.assertNotEqual(config1, config3)
コード例 #16
0
    def testHasGoodOptConfigModelFit(self):
        # Construct diagnostics
        result = []
        for i, obs in enumerate(self.training_data):
            result.append(
                CVResult(observed=obs, predicted=self.observation_data[i]))
        diag = compute_diagnostics(result=result)
        assess_model_fit_result = assess_model_fit(
            diagnostics=diag,
            significance_level=0.05,
        )

        # Test single objective
        optimization_config = OptimizationConfig(objective=Objective(
            metric=Metric("a")))
        has_good_fit = has_good_opt_config_model_fit(
            optimization_config=optimization_config,
            assess_model_fit_result=assess_model_fit_result,
        )
        self.assertFalse(has_good_fit)

        # Test multi objective
        optimization_config = MultiObjectiveOptimizationConfig(
            objective=MultiObjective(
                metrics=[Metric("a"), Metric("b")]))
        has_good_fit = has_good_opt_config_model_fit(
            optimization_config=optimization_config,
            assess_model_fit_result=assess_model_fit_result,
        )
        self.assertFalse(has_good_fit)

        # Test constraints
        optimization_config = OptimizationConfig(
            objective=Objective(metric=Metric("a")),
            outcome_constraints=[
                OutcomeConstraint(metric=Metric("b"),
                                  op=ComparisonOp.GEQ,
                                  bound=0.1)
            ],
        )
        has_good_fit = has_good_opt_config_model_fit(
            optimization_config=optimization_config,
            assess_model_fit_result=assess_model_fit_result,
        )
        self.assertFalse(has_good_fit)
コード例 #17
0
    def transform_optimization_config(
        self,
        optimization_config: OptimizationConfig,
        modelbridge: Optional[modelbridge_module.base.ModelBridge],
        fixed_features: ObservationFeatures,
    ) -> OptimizationConfig:
        r"""
        Change the relative flag of the given relative optimization configuration
        to False. This is needed in order for the new opt config to pass ModelBridge
        that requires non-relativized opt config.

        Args:
            opt_config: Optimization configuaration relative to status quo.

        Returns:
            Optimization configuration relative to status quo with relative flag
            equal to false.

        """
        # Getting constraints
        constraints = [
            constraint.clone()
            for constraint in optimization_config.outcome_constraints
        ]
        if not all(constraint.relative
                   for constraint in optimization_config.outcome_constraints):
            raise ValueError(
                "All constraints must be relative to use the Relativize transform."
            )
        for constraint in constraints:
            constraint.relative = False

        if isinstance(optimization_config, MultiObjectiveOptimizationConfig):
            # Getting objective thresholds
            obj_thresholds = [
                obj_threshold.clone()
                for obj_threshold in optimization_config.objective_thresholds
            ]
            for obj_threshold in obj_thresholds:
                if not obj_threshold.relative:
                    raise ValueError(
                        "All objective thresholds must be relative to use "
                        "the Relativize transform.")
                obj_threshold.relative = False

            new_optimization_config = MultiObjectiveOptimizationConfig(
                objective=optimization_config.objective,
                outcome_constraints=constraints,
                objective_thresholds=obj_thresholds,
            )
        else:
            new_optimization_config = OptimizationConfig(
                objective=optimization_config.objective,
                outcome_constraints=constraints,
            )

        return new_optimization_config
    def testInit(self):
        config1 = OptimizationConfig(
            objective=self.objective,
            outcome_constraints=self.outcome_constraints)
        self.assertEqual(str(config1), CONFIG_STR)
        with self.assertRaises(ValueError):
            config1.objective = self.m2_objective
        # updating constraints is fine.
        config1.outcome_constraints = [self.outcome_constraint]
        self.assertEqual(len(config1.metrics), 2)

        # objective without outcome_constraints is also supported
        config2 = OptimizationConfig(objective=self.objective)
        self.assertEqual(config2.outcome_constraints, [])

        # setting objective is fine too, if it's compatible with constraints..
        config2.objective = self.m2_objective
        # setting incompatible constraints is not fine.
        with self.assertRaises(ValueError):
            config2.outcome_constraints = self.outcome_constraints
コード例 #19
0
    def test_transform_ref_point(self, _mock_fit, _mock_predict, _mock_unwrap):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=False)
        metrics = exp.optimization_config.objective.metrics
        ref_point = {metrics[0].name: 0.0, metrics[1].name: 0.0}
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point=ref_point,
        )
        self.assertIsNone(modelbridge._transformed_ref_point)
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True)
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials))
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point=ref_point,
        )
        self.assertIsNotNone(modelbridge._transformed_ref_point)
        self.assertEqual(2, len(modelbridge._transformed_ref_point))

        mixed_objective_constraints_optimization_config = OptimizationConfig(
            objective=MultiObjective(
                metrics=[get_branin_metric(name="branin_b")], minimize=False),
            outcome_constraints=[
                OutcomeConstraint(metric=Metric(name="branin_a"),
                                  op=ComparisonOp.LEQ,
                                  bound=1)
            ],
        )
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=mixed_objective_constraints_optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point={"branin_b": 0.0},
        )
        self.assertEqual({"branin_a", "branin_b"}, modelbridge._metric_names)
        self.assertEqual(["branin_b"], modelbridge._objective_metric_names)
        self.assertIsNotNone(modelbridge._transformed_ref_point)
        self.assertEqual(1, len(modelbridge._transformed_ref_point))
コード例 #20
0
    def testInit(self):
        config1 = OptimizationConfig(
            objective=self.objective,
            outcome_constraints=self.outcome_constraints)
        self.assertEqual(str(config1), OC_STR)
        with self.assertRaises(ValueError):
            config1.objective = self.alt_objective  # constrained Objective.
        # updating constraints is fine.
        config1.outcome_constraints = [self.outcome_constraint]
        self.assertEqual(len(config1.metrics), 2)

        # objective without outcome_constraints is also supported
        config2 = OptimizationConfig(objective=self.objective)
        self.assertEqual(config2.outcome_constraints, [])

        # setting objective is fine too, if it's compatible with constraints..
        config2.objective = self.m2_objective
        # setting constraints on objectives is fine for MultiObjective components.

        config2.outcome_constraints = self.outcome_constraints
        self.assertEqual(config2.outcome_constraints, self.outcome_constraints)
コード例 #21
0
ファイル: pareto_utils.py プロジェクト: pr0d33p/Ax
def _build_new_optimization_config(weights,
                                   primary_objective,
                                   secondary_objective,
                                   outcome_constraints=None):
    obj = ScalarizedObjective(
        metrics=[primary_objective, secondary_objective],
        weights=weights,
        minimize=False,
    )
    optimization_config = OptimizationConfig(
        objective=obj, outcome_constraints=outcome_constraints)
    return optimization_config
コード例 #22
0
 def test_best_raw_objective_point_scalarized(self):
     exp = get_branin_experiment()
     exp.optimization_config = OptimizationConfig(
         ScalarizedObjective(metrics=[get_branin_metric()], minimize=False)
     )
     with self.assertRaisesRegex(ValueError, "Cannot identify best "):
         get_best_raw_objective_point(exp)
     self.assertEqual(get_best_parameters(exp, Models), None)
     exp.new_trial(
         generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})])
     ).run()
     self.assertEqual(get_best_raw_objective_point(exp)[0], {"x1": 5.0, "x2": 5.0})
コード例 #23
0
ファイル: test_log_y_transform.py プロジェクト: liangshi7/Ax
 def testTransformOptimizationConfig(self):
     # basic test
     m1 = Metric(name="m1")
     objective = Objective(metric=m1, minimize=False)
     oc = OptimizationConfig(objective=objective, outcome_constraints=[])
     tf = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(oc, None, None)
     self.assertTrue(oc_tf == oc)
     # test error if transformed metric appears in outcome constraints
     m2 = Metric(name="m2")
     cons = [
         OutcomeConstraint(metric=m2,
                           op=ComparisonOp.GEQ,
                           bound=0.0,
                           relative=False)
     ]
     oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
     oc_tf = tf.transform_optimization_config(oc, None, None)
     self.assertTrue(oc_tf == oc)
     m2 = Metric(name="m2")
     cons = [
         OutcomeConstraint(metric=m2,
                           op=ComparisonOp.GEQ,
                           bound=0.0,
                           relative=False)
     ]
     oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
     tf2 = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m2"]},
     )
     with self.assertRaises(ValueError):
         tf2.transform_optimization_config(oc, None, None)
コード例 #24
0
    def testTransformOptimizationConfig(self):
        m1 = Metric(name="m1")
        m2 = Metric(name="m2")
        m3 = Metric(name="m3")
        objective = Objective(metric=m3, minimize=False)
        cons = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=2.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=3.5,
                              relative=False),
        ]
        oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
        oc = self.t.transform_optimization_config(oc, None, None)
        cons_t = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=1.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=4.0,
                              relative=False),
        ]
        self.assertTrue(oc.outcome_constraints == cons_t)
        self.assertTrue(oc.objective == objective)

        # Check fail with relative
        con = OutcomeConstraint(metric=m1,
                                op=ComparisonOp.GEQ,
                                bound=2.0,
                                relative=True)
        oc = OptimizationConfig(objective=objective, outcome_constraints=[con])
        with self.assertRaises(ValueError):
            oc = self.t.transform_optimization_config(oc, None, None)
コード例 #25
0
def get_experiment_with_scalarized_objective() -> Experiment:
    objective = get_scalarized_objective()
    outcome_constraints = [get_outcome_constraint()]
    optimization_config = OptimizationConfig(
        objective=objective, outcome_constraints=outcome_constraints)
    return Experiment(
        name="test_experiment_scalarized_objective",
        search_space=get_search_space(),
        optimization_config=optimization_config,
        status_quo=get_status_quo(),
        description="test experiment with scalarized objective",
        tracking_metrics=[Metric(name="tracking")],
        is_test=True,
    )
コード例 #26
0
def get_experiment_with_multi_objective() -> Experiment:
    objective = get_multi_objective()
    outcome_constraints = [get_outcome_constraint()]
    optimization_config = OptimizationConfig(
        objective=objective, outcome_constraints=outcome_constraints)

    exp = Experiment(
        name="test_experiment_multi_objective",
        search_space=get_branin_search_space(),
        optimization_config=optimization_config,
        description="test experiment with multi objective",
        runner=SyntheticRunner(),
        tracking_metrics=[Metric(name="tracking")],
        is_test=True,
    )

    return exp
コード例 #27
0
ファイル: test_base_modelbridge.py プロジェクト: jlin27/Ax
 def testGenWithDefaults(self, _, mock_gen):
     exp = get_experiment()
     exp.optimization_config = get_optimization_config()
     ss = search_space_for_range_value()
     modelbridge = ModelBridge(ss, None, [], exp)
     modelbridge.gen(1)
     mock_gen.assert_called_with(
         modelbridge,
         n=1,
         search_space=ss,
         fixed_features=ObservationFeatures(parameters={}),
         model_gen_options=None,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("test_metric"), minimize=False),
             outcome_constraints=[],
         ),
         pending_observations={},
     )
コード例 #28
0
 def testErrors(self):
     t = Derelativize(search_space=None,
                      observation_features=None,
                      observation_data=None)
     oc = OptimizationConfig(
         objective=Objective(Metric("c")),
         outcome_constraints=[
             OutcomeConstraint(Metric("a"),
                               ComparisonOp.LEQ,
                               bound=2,
                               relative=True)
         ],
     )
     search_space = SearchSpace(
         parameters=[RangeParameter("x", ParameterType.FLOAT, 0, 20)])
     g = ModelBridge(search_space, None, [])
     with self.assertRaises(ValueError):
         t.transform_optimization_config(oc, None, None)
     with self.assertRaises(ValueError):
         t.transform_optimization_config(oc, g, None)
コード例 #29
0
def optimization_config_from_objectives(
    objectives: List[Metric],
    objective_thresholds: List[ObjectiveThreshold],
    outcome_constraints: List[OutcomeConstraint],
) -> OptimizationConfig:
    """Parse objectives and constraints to define optimization config.

    The resulting optimization config will be regular single-objective config
    if `objectives` is a list of one element and a multi-objective config
    otherwise.

    NOTE: If passing in multiple objectives, `objective_thresholds` must be a
    non-empty list definining constraints for each objective.
    """
    if len(objectives) == 1:
        if objective_thresholds:
            raise ValueError(
                "Single-objective optimizations must not specify objective thresholds."
            )
        return OptimizationConfig(
            objective=Objective(
                metric=objectives[0],
            ),
            outcome_constraints=outcome_constraints,
        )
    else:
        objective_names = {m.name for m in objectives}
        threshold_names = {oc.metric.name for oc in objective_thresholds}
        if objective_names != threshold_names:
            diff = objective_names.symmetric_difference(threshold_names)
            raise ValueError(
                "Multi-objective optimization requires one objective threshold "
                f"per objective metric; unmatched names are {diff}"
            )

        return MultiObjectiveOptimizationConfig(
            objective=MultiObjective(metrics=objectives),
            outcome_constraints=outcome_constraints,
            objective_thresholds=objective_thresholds,
        )
コード例 #30
0
ファイル: core_stubs.py プロジェクト: proteanblank/Ax
def get_branin_experiment_with_timestamp_map_metric(
    rate: Optional[float] = None,
    incremental: Optional[bool] = False,
):
    metric_cls = (
        BraninTimestampMapMetric
        if not incremental
        else BraninIncrementalTimestampMapMetric
    )
    return Experiment(
        name="branin_with_timestamp_map_metric",
        search_space=get_branin_search_space(),
        optimization_config=OptimizationConfig(
            objective=Objective(
                metric=metric_cls(name="branin", param_names=["x1", "x2"], rate=rate),
                minimize=True,
            )
        ),
        tracking_metrics=[metric_cls(name="b", param_names=["x1", "x2"])],
        runner=SyntheticRunner(),
        default_data_type=DataType.MAP_DATA,
    )