コード例 #1
0
 def test_best_point(
     self,
     _mock_gen,
     _mock_best_point,
     _mock_fit,
     _mock_predict,
     _mock_gen_arms,
     _mock_unwrap,
     _mock_obs_from_data,
 ):
     exp = Experiment(get_search_space_for_range_value(), "test")
     modelbridge = ArrayModelBridge(get_search_space_for_range_value(),
                                    NumpyModel(), [t1, t2], exp, 0)
     self.assertEqual(list(modelbridge.transforms.keys()),
                      ["Cast", "t1", "t2"])
     # _fit is mocked, which typically sets this.
     modelbridge.outcomes = ["a"]
     run = modelbridge.gen(
         n=1,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("a"), minimize=False),
             outcome_constraints=[],
         ),
     )
     arm, predictions = run.best_arm_predictions
     self.assertEqual(arm.parameters, {})
     self.assertEqual(predictions[0], {"m": 1.0})
     self.assertEqual(predictions[1], {"m": {"m": 2.0}})
     # test check that optimization config is required
     with self.assertRaises(ValueError):
         run = modelbridge.gen(n=1, optimization_config=None)
コード例 #2
0
    def test_best_point(
        self,
        _mock_gen,
        _mock_best_point,
        _mock_fit,
        _mock_predict,
        _mock_gen_arms,
        _mock_unwrap,
        _mock_obs_from_data,
    ):
        exp = Experiment(search_space=get_search_space_for_range_value(),
                         name="test")
        modelbridge = ArrayModelBridge(
            search_space=get_search_space_for_range_value(),
            model=NumpyModel(),
            transforms=[t1, t2],
            experiment=exp,
            data=Data(),
        )
        self.assertEqual(list(modelbridge.transforms.keys()),
                         ["Cast", "t1", "t2"])
        # _fit is mocked, which typically sets this.
        modelbridge.outcomes = ["a"]
        run = modelbridge.gen(
            n=1,
            optimization_config=OptimizationConfig(
                objective=Objective(metric=Metric("a"), minimize=False),
                outcome_constraints=[],
            ),
        )
        arm, predictions = run.best_arm_predictions
        self.assertEqual(arm.parameters, {})
        self.assertEqual(predictions[0], {"m": 1.0})
        self.assertEqual(predictions[1], {"m": {"m": 2.0}})
        # test check that optimization config is required
        with self.assertRaises(ValueError):
            run = modelbridge.gen(n=1, optimization_config=None)

        # test optimization config validation - raise error when
        # ScalarizedOutcomeConstraint contains a metric that is not in the outcomes
        with self.assertRaises(ValueError):
            run = modelbridge.gen(
                n=1,
                optimization_config=OptimizationConfig(
                    objective=Objective(metric=Metric("a"), minimize=False),
                    outcome_constraints=[
                        ScalarizedOutcomeConstraint(
                            metrics=[Metric("wrong_metric_name")],
                            weights=[1.0],
                            op=ComparisonOp.LEQ,
                            bound=0,
                        )
                    ],
                ),
            )
コード例 #3
0
 def test_importances(
     self,
     _mock_feature_importances,
     _mock_fit,
     _mock_predict,
     _mock_gen_arms,
     _mock_unwrap,
     _mock_obs_from_data,
 ):
     exp = Experiment(get_search_space_for_range_value(), "test")
     modelbridge = ArrayModelBridge(get_search_space_for_range_value(),
                                    NumpyModel(), [t1, t2], exp, 0)
     modelbridge.outcomes = ["a", "b"]
     self.assertEqual(modelbridge.feature_importances("a"), {"x": [1.0]})
     self.assertEqual(modelbridge.feature_importances("b"), {"x": [2.0]})