예제 #1
0
 def test_best_point(
     self,
     _mock_gen,
     _mock_best_point,
     _mock_fit,
     _mock_predict,
     _mock_gen_arms,
     _mock_unwrap,
     _mock_obs_from_data,
 ):
     exp = Experiment(get_search_space_for_range_value(), "test")
     modelbridge = ArrayModelBridge(get_search_space_for_range_value(),
                                    NumpyModel(), [t1, t2], exp, 0)
     self.assertEqual(list(modelbridge.transforms.keys()),
                      ["Cast", "t1", "t2"])
     # _fit is mocked, which typically sets this.
     modelbridge.outcomes = ["a"]
     run = modelbridge.gen(
         n=1,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("a"), minimize=False),
             outcome_constraints=[],
         ),
     )
     arm, predictions = run.best_arm_predictions
     self.assertEqual(arm.parameters, {})
     self.assertEqual(predictions[0], {"m": 1.0})
     self.assertEqual(predictions[1], {"m": {"m": 2.0}})
     # test check that optimization config is required
     with self.assertRaises(ValueError):
         run = modelbridge.gen(n=1, optimization_config=None)
예제 #2
0
 def test_transform_callback_int(self, *_):
     exp = get_branin_experiment(with_batch=True)
     data = get_branin_data(trial_indices=exp.trials)
     parameters = [
         RangeParameter(name="x1",
                        parameter_type=ParameterType.INT,
                        lower=1,
                        upper=10),
         RangeParameter(name="x2",
                        parameter_type=ParameterType.INT,
                        lower=5,
                        upper=15),
     ]
     gpei = TorchModelBridge(
         experiment=exp,
         data=data,
         search_space=SearchSpace(parameters=parameters),
         model=BotorchModel(),
         transforms=[IntToFloat],
         torch_dtype=torch.double,
         fit_out_of_design=True,
     )
     transformed = gpei._transform_callback([5.4, 7.6])
     self.assertTrue(np.allclose(transformed, [5, 8]))
     np_mb = ArrayModelBridge(
         experiment=exp,
         data=exp.fetch_data(),
         search_space=SearchSpace(parameters=parameters),
         model=NumpyModel(),
         transforms=[IntToFloat],
     )
     transformed = np_mb._transform_callback(np.array([5.4, 7.6]))
     self.assertTrue(np.allclose(transformed, [5, 8]))
예제 #3
0
 def test_best_point(
     self,
     _mock_gen,
     _mock_best_point,
     _mock_fit,
     _mock_predict,
     _mock_gen_arms,
     _mock_unwrap,
     _mock_obs_from_data,
 ):
     exp = Experiment(get_search_space_for_range_value(), "test")
     modelbridge = ArrayModelBridge(get_search_space_for_range_value(),
                                    NumpyModel(), [t1, t2], exp, 0)
     self.assertEqual(list(modelbridge.transforms.keys()), ["t1", "t2"])
     run = modelbridge.gen(
         n=1,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("a"), minimize=False),
             outcome_constraints=[],
         ),
     )
     arm, predictions = run.best_arm_predictions
     self.assertEqual(arm.parameters, {})
     self.assertEqual(predictions[0], {"m": 1.0})
     self.assertEqual(predictions[1], {"m": {"m": 2.0}})
예제 #4
0
 def test_importances(
     self,
     _mock_feature_importances,
     _mock_fit,
     _mock_predict,
     _mock_gen_arms,
     _mock_unwrap,
     _mock_obs_from_data,
 ):
     exp = Experiment(get_search_space_for_range_value(), "test")
     modelbridge = ArrayModelBridge(get_search_space_for_range_value(),
                                    NumpyModel(), [t1, t2], exp, 0)
     modelbridge.outcomes = ["a", "b"]
     self.assertEqual(modelbridge.feature_importances("a"), {"x": [1.0]})
     self.assertEqual(modelbridge.feature_importances("b"), {"x": [2.0]})
예제 #5
0
    def test_best_point(
        self,
        _mock_gen,
        _mock_best_point,
        _mock_fit,
        _mock_predict,
        _mock_gen_arms,
        _mock_unwrap,
        _mock_obs_from_data,
    ):
        exp = Experiment(search_space=get_search_space_for_range_value(),
                         name="test")
        modelbridge = ArrayModelBridge(
            search_space=get_search_space_for_range_value(),
            model=NumpyModel(),
            transforms=[t1, t2],
            experiment=exp,
            data=Data(),
        )
        self.assertEqual(list(modelbridge.transforms.keys()),
                         ["Cast", "t1", "t2"])
        # _fit is mocked, which typically sets this.
        modelbridge.outcomes = ["a"]
        run = modelbridge.gen(
            n=1,
            optimization_config=OptimizationConfig(
                objective=Objective(metric=Metric("a"), minimize=False),
                outcome_constraints=[],
            ),
        )
        arm, predictions = run.best_arm_predictions
        self.assertEqual(arm.parameters, {})
        self.assertEqual(predictions[0], {"m": 1.0})
        self.assertEqual(predictions[1], {"m": {"m": 2.0}})
        # test check that optimization config is required
        with self.assertRaises(ValueError):
            run = modelbridge.gen(n=1, optimization_config=None)

        # test optimization config validation - raise error when
        # ScalarizedOutcomeConstraint contains a metric that is not in the outcomes
        with self.assertRaises(ValueError):
            run = modelbridge.gen(
                n=1,
                optimization_config=OptimizationConfig(
                    objective=Objective(metric=Metric("a"), minimize=False),
                    outcome_constraints=[
                        ScalarizedOutcomeConstraint(
                            metrics=[Metric("wrong_metric_name")],
                            weights=[1.0],
                            op=ComparisonOp.LEQ,
                            bound=0,
                        )
                    ],
                ),
            )
예제 #6
0
    def test_candidate_metadata_propagation(self, mock_model_fit,
                                            mock_model_update, mock_model_gen):
        exp = get_branin_experiment(with_status_quo=True, with_batch=True)
        # Check that the metadata is correctly re-added to observation
        # features during `fit`.
        preexisting_batch_gr = exp.trials[0]._generator_run_structs[
            0].generator_run
        preexisting_batch_gr._candidate_metadata_by_arm_signature = {
            preexisting_batch_gr.arms[0].signature: {
                "preexisting_batch_cand_metadata": "some_value"
            }
        }
        modelbridge = ArrayModelBridge(
            search_space=exp.search_space,
            experiment=exp,
            model=NumpyModel(),
            data=get_branin_data(),
        )
        self.assertTrue(
            np.array_equal(
                mock_model_fit.call_args[1].get("Xs"),
                np.array([[list(exp.trials[0].arms[0].parameters.values())]]),
            ))
        self.assertEqual(
            mock_model_fit.call_args[1].get("candidate_metadata"),
            [[{
                "preexisting_batch_cand_metadata": "some_value"
            }]],
        )

        # Check that `gen` correctly propagates the metadata to the GR.
        gr = modelbridge.gen(n=1)
        self.assertEqual(
            gr.candidate_metadata_by_arm_signature,
            {
                gr.arms[0].signature: {
                    "some_key": "some_value_0"
                },
                gr.arms[1].signature: {
                    "some_key": "some_value_1"
                },
            },
        )
        # Check that the metadata is correctly re-added to observation
        # features during `update`.
        batch = exp.new_batch_trial(generator_run=gr)
        modelbridge.update(
            experiment=exp,
            new_data=get_branin_data(trial_indices=[batch.index]))
        self.assertTrue(
            np.array_equal(
                mock_model_update.call_args[1].get("Xs"),
                np.array(
                    [[list(exp.trials[0].arms[0].parameters.values()), [1,
                                                                        2]]]),
            ))
        self.assertEqual(
            mock_model_update.call_args[1].get("candidate_metadata"),
            [[
                {
                    "preexisting_batch_cand_metadata": "some_value"
                },
                # new data contained data just for arm '1_0', not for '1_1',
                # so we don't expect to see '{"some_key": "some_value_1"}'
                # in candidate metadata.
                {
                    "some_key": "some_value_0"
                },
            ]],
        )

        # Check that `None` candidate metadata is handled correctly.
        mock_model_gen.return_value = (
            np.array([[2, 4], [3, 5]]),
            np.array([1, 2]),
            None,
            {},
        )
        gr = modelbridge.gen(n=1)
        self.assertIsNone(gr.candidate_metadata_by_arm_signature)
        # Check that the metadata is correctly re-added to observation
        # features during `update`.
        batch = exp.new_batch_trial(generator_run=gr)
        modelbridge.update(
            experiment=exp,
            new_data=get_branin_data(trial_indices=[batch.index]))
        self.assertTrue(
            np.array_equal(
                mock_model_update.call_args[1].get("Xs"),
                np.array([[
                    list(exp.trials[0].arms[0].parameters.values()), [1, 2],
                    [2, 4]
                ]]),
            ))
        self.assertEqual(
            mock_model_update.call_args[1].get("candidate_metadata"),
            [[
                {
                    "preexisting_batch_cand_metadata": "some_value"
                },
                {
                    "some_key": "some_value_0"
                },
                {},
            ]],
        )

        # Check that no candidate metadata is handled correctly.
        exp = get_branin_experiment(with_status_quo=True)
        modelbridge = ArrayModelBridge(search_space=exp.search_space,
                                       experiment=exp,
                                       model=NumpyModel())
        # Hack in outcome names to bypass validation (since we instantiated model
        # without data).
        modelbridge.outcomes = modelbridge._metric_names = next(
            iter(exp.metrics))
        gr = modelbridge.gen(n=1)
        self.assertIsNone(
            mock_model_fit.call_args[1].get("candidate_metadata"))
        self.assertIsNone(gr.candidate_metadata_by_arm_signature)
        batch = exp.new_batch_trial(generator_run=gr)
        modelbridge.update(
            experiment=exp,
            new_data=get_branin_data(trial_indices=[batch.index]))