Exemple #1
0
    def testBadConstruction(self):
        # Duplicate parameter
        with self.assertRaises(ValueError):
            p1 = self.parameters + [self.parameters[0]]
            SearchSpace(parameters=p1, parameter_constraints=[])

        # Constraint on non-existent parameter
        with self.assertRaises(ValueError):
            SearchSpace(
                parameters=self.parameters,
                parameter_constraints=[
                    OrderConstraint(lower_parameter=self.a,
                                    upper_parameter=self.g)
                ],
            )

        # Vanilla Constraint on non-existent parameter
        with self.assertRaises(ValueError):
            SearchSpace(
                parameters=self.parameters,
                parameter_constraints=[
                    ParameterConstraint(constraint_dict={"g": 1}, bound=0)
                ],
            )

        # Constraint on non-numeric parameter
        with self.assertRaises(ValueError):
            SearchSpace(
                parameters=self.parameters,
                parameter_constraints=[
                    OrderConstraint(lower_parameter=self.a,
                                    upper_parameter=self.d)
                ],
            )

        # Constraint on choice parameter
        with self.assertRaises(ValueError):
            SearchSpace(
                parameters=self.parameters,
                parameter_constraints=[
                    OrderConstraint(lower_parameter=self.a,
                                    upper_parameter=self.e)
                ],
            )

        # Constraint on logscale parameter
        with self.assertRaises(ValueError):
            SearchSpace(
                parameters=self.parameters,
                parameter_constraints=[
                    OrderConstraint(lower_parameter=self.a,
                                    upper_parameter=self.f)
                ],
            )

        # Constraint on mismatched parameter
        with self.assertRaises(ValueError):
            wrong_a = self.a.clone()
            wrong_a.update_range(upper=10)
            SearchSpace(
                parameters=self.parameters,
                parameter_constraints=[
                    OrderConstraint(lower_parameter=wrong_a,
                                    upper_parameter=self.b)
                ],
            )
    def setUp(self):
        x = RangeParameter("x", ParameterType.FLOAT, lower=0, upper=1)
        y = RangeParameter("y",
                           ParameterType.FLOAT,
                           lower=1,
                           upper=2,
                           is_fidelity=True,
                           target_value=2)
        z = RangeParameter("z", ParameterType.FLOAT, lower=0, upper=5)
        self.parameters = [x, y, z]
        parameter_constraints = [
            OrderConstraint(x, y),
            SumConstraint([x, z], False, 3.5),
        ]

        self.search_space = SearchSpace(self.parameters, parameter_constraints)

        self.observation_features = [
            ObservationFeatures(parameters={
                "x": 0.2,
                "y": 1.2,
                "z": 3
            }),
            ObservationFeatures(parameters={
                "x": 0.4,
                "y": 1.4,
                "z": 3
            }),
            ObservationFeatures(parameters={
                "x": 0.6,
                "y": 1.6,
                "z": 3
            }),
        ]
        self.observation_data = [
            ObservationData(
                metric_names=["a", "b"],
                means=np.array([1.0, -1.0]),
                covariance=np.array([[1.0, 4.0], [4.0, 6.0]]),
            ),
            ObservationData(
                metric_names=["a", "b"],
                means=np.array([2.0, -2.0]),
                covariance=np.array([[2.0, 5.0], [5.0, 7.0]]),
            ),
            ObservationData(metric_names=["a"],
                            means=np.array([3.0]),
                            covariance=np.array([[3.0]])),
        ]
        self.observations = [
            Observation(
                features=self.observation_features[i],
                data=self.observation_data[i],
                arm_name=str(i),
            ) for i in range(3)
        ]
        self.pending_observations = {
            "b":
            [ObservationFeatures(parameters={
                "x": 0.6,
                "y": 1.6,
                "z": 3
            })]
        }
        self.model_gen_options = {"option": "yes"}
class SearchSpaceToChoiceTest(TestCase):
    def setUp(self):
        self.search_space = SearchSpace(parameters=[
            RangeParameter(
                "a", lower=1, upper=3, parameter_type=ParameterType.FLOAT),
            ChoiceParameter("b",
                            parameter_type=ParameterType.STRING,
                            values=["a", "b", "c"]),
        ])
        self.observation_features = [
            ObservationFeatures(parameters={
                "a": 2,
                "b": "a"
            }),
            ObservationFeatures(parameters={
                "a": 3,
                "b": "b"
            }),
            ObservationFeatures(parameters={
                "a": 3,
                "b": "c"
            }),
        ]
        self.signature_to_parameterization = {
            Arm(parameters=obsf.parameters).signature: obsf.parameters
            for obsf in self.observation_features
        }
        self.transformed_features = [
            ObservationFeatures(
                parameters={
                    "arms": Arm(parameters={
                        "a": 2,
                        "b": "a"
                    }).signature
                }),
            ObservationFeatures(
                parameters={
                    "arms": Arm(parameters={
                        "a": 3,
                        "b": "b"
                    }).signature
                }),
            ObservationFeatures(
                parameters={
                    "arms": Arm(parameters={
                        "a": 3,
                        "b": "c"
                    }).signature
                }),
        ]
        self.t = SearchSpaceToChoice(
            search_space=self.search_space,
            observation_features=self.observation_features,
            observation_data=None,
        )
        self.t2 = SearchSpaceToChoice(
            search_space=self.search_space,
            observation_features=[self.observation_features[0]],
            observation_data=None,
        )

    def testTransformSearchSpace(self):
        ss2 = self.search_space.clone()
        ss2 = self.t.transform_search_space(ss2)
        self.assertEqual(len(ss2.parameters), 1)
        expected_parameter = ChoiceParameter(
            name="arms",
            parameter_type=ParameterType.STRING,
            values=list(self.t.signature_to_parameterization.keys()),
        )
        self.assertEqual(ss2.parameters.get("arms"), expected_parameter)

        # Test error if there are fidelities
        ss3 = SearchSpace(parameters=[
            RangeParameter(
                "a",
                lower=1,
                upper=3,
                parameter_type=ParameterType.FLOAT,
                is_fidelity=True,
                target_value=3,
            )
        ])
        with self.assertRaises(ValueError):
            SearchSpaceToChoice(
                search_space=ss3,
                observation_features=self.observation_features,
                observation_data=None,
            )

    def testTransformSearchSpaceWithFixedParam(self):
        ss2 = self.search_space.clone()
        ss2 = self.t2.transform_search_space(ss2)
        self.assertEqual(len(ss2.parameters), 1)
        expected_parameter = FixedParameter(
            name="arms",
            parameter_type=ParameterType.STRING,
            value=list(self.t2.signature_to_parameterization.keys())[0],
        )
        self.assertEqual(ss2.parameters.get("arms"), expected_parameter)

    def testTransformObservationFeatures(self):
        obs_ft2 = deepcopy(self.observation_features)
        obs_ft2 = self.t.transform_observation_features(obs_ft2)
        self.assertEqual(obs_ft2, self.transformed_features)
        obs_ft2 = self.t.untransform_observation_features(obs_ft2)
        self.assertEqual(obs_ft2, self.observation_features)
Exemple #4
0
def get_search_space_for_value(val: float = 3.0) -> SearchSpace:
    return SearchSpace([FixedParameter("x", ParameterType.FLOAT, val)])
Exemple #5
0
    name="Hartmann6, D=100",
    optimal_value=-3.32237,
    optimization_config=OptimizationConfig(
        objective=Objective(
            metric=Hartmann6Metric(
                name="objective",
                param_names=["x19", "x14", "x43", "x37", "x66", "x3"],
                noise_sd=0.0,
            ),
            minimize=True,
        )
    ),
    search_space=SearchSpace(
        parameters=[
            RangeParameter(
                name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=0.0, upper=1.0
            )
            for i in range(100)
        ]
    ),
)

            
hartmann6_1000 = BenchmarkProblem(
    name="Hartmann6, D=1000",
    optimal_value=-3.32237,
    optimization_config=OptimizationConfig(
        objective=Objective(
            metric=Hartmann6Metric(
                name="objective",
                param_names=["x190", "x140", "x430", "x370", "x660", "x30"],
                noise_sd=0.0,
Exemple #6
0
 def transform_search_space(self, search_space: SearchSpace) -> SearchSpace:
     new_ss = search_space.clone()
     new_ss.parameters["x"]._value *= 2.0  # pyre-ignore[16]: testing hack.
     return new_ss
Exemple #7
0
def get_small_discrete_search_space() -> SearchSpace:
    return SearchSpace([
        RangeParameter("x", ParameterType.INT, 0, 1),
        ChoiceParameter("y", ParameterType.STRING, ["red", "panda"]),
    ])
Exemple #8
0
def define_problem(function_name='exp_sin_squared',
                   n_dimensions=None,
                   noise_std=0.001,
                   observer=None,
                   visualise=False,
                   vis_density=250,
                   log_dir=Path('./')):
    def get_botorch_test_function(override_name=None):
        """Gets function defined in botorch test_functions.

        All parameters are obtained from the define_problems scope.
        """
        if override_name is None:
            function_class = getattr(test_functions,
                                     function_name.capitalize())
        else:
            function_class = getattr(test_functions, override_name)
        if n_dimensions is not None:
            function = function_class(dim=n_dimensions,
                                      noise_std=noise_std,
                                      negate=True)
        else:
            function = function_class(noise_std=noise_std, negate=True)
        return function

    if function_name == 'exp_sin_squared':

        def function(x):
            return torch.exp(-(torch.sin(3 * x)**2 + x**2))

        function.dim = 1
        function._bounds = [(-5, 5)]
        function._optimizers = [(0, )]
        function._optimal_value = 1
    elif (function_name == 'ackley' or function_name == 'griewank'
          or function_name == 'levy' or function_name == 'michalewicz'
          or function_name == 'rastrigin' or function_name == 'rosenbrock'):
        function = get_botorch_test_function()
    elif function_name == 'dixonprice':
        function = get_botorch_test_function(override_name='DixonPrice')
    elif function_name == 'threehumpcamel':
        function = get_botorch_test_function(override_name='ThreeHumpCamel')

    if observer is not None:
        observer.current['optimum']['x'] = torch.tensor(
            function._optimizers[0])
        observer.current['optimum']['function'] = function._optimal_value

    if visualise:
        observer.current['function'] = function
        vis.function(function,
                     vis_start=function._bounds[0][0],
                     vis_end=function._bounds[0][1],
                     vis_density=vis_density,
                     function_name=function_name,
                     log_dir=log_dir.parents[2])

    def evaluation_function(parameterisation):
        return {
            'function':
            (function(torch.tensor(list(parameterisation.values()))).item(),
             noise_std)
        }

    search_space = SearchSpace(parameters=[
        RangeParameter(name=f'x{i}',
                       parameter_type=ParameterType.FLOAT,
                       lower=function._bounds[i][0],
                       upper=function._bounds[i][1])
        for i in range(function.dim)
    ])

    return evaluation_function, search_space
Exemple #9
0
    def testDerelativizeTransform(self, mock_predict, mock_fit,
                                  mock_observations_from_data):
        t = Derelativize(search_space=None,
                         observation_features=None,
                         observation_data=None)

        # ModelBridge with in-design status quo
        search_space = SearchSpace(parameters=[
            RangeParameter("x", ParameterType.FLOAT, 0, 20),
            RangeParameter("y", ParameterType.FLOAT, 0, 20),
        ])
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_1",
        )

        # Test with no relative constraints
        objective = Objective(Metric("c"))
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False)
            ],
        )
        oc2 = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc == oc2)

        # Test with relative constraint, in-design status quo
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        oc = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc.outcome_constraints == [
            OutcomeConstraint(
                Metric("a"), ComparisonOp.LEQ, bound=2, relative=False),
            OutcomeConstraint(
                Metric("b"), ComparisonOp.LEQ, bound=4.5, relative=False),
        ])
        obsf = mock_predict.mock_calls[0][1][1][0]
        obsf2 = ObservationFeatures(parameters={"x": 2.0, "y": 10.0})
        self.assertTrue(obsf == obsf2)

        # Test with relative constraint, out-of-design status quo
        mock_predict.side_effect = Exception()
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_2",
        )
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        oc = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc.outcome_constraints == [
            OutcomeConstraint(
                Metric("a"), ComparisonOp.LEQ, bound=2, relative=False),
            OutcomeConstraint(
                Metric("b"), ComparisonOp.LEQ, bound=3.6, relative=False),
        ])
        self.assertEqual(mock_predict.call_count, 2)

        # Raises error if predict fails with in-design status quo
        g = ModelBridge(search_space, None, [], status_quo_name="1_1")
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        with self.assertRaises(Exception):
            oc = t.transform_optimization_config(oc, g, None)

        # Raises error with relative constraint, no status quo
        exp = Experiment(search_space, "name")
        g = ModelBridge(search_space, None, [], exp)
        with self.assertRaises(ValueError):
            oc = t.transform_optimization_config(oc, g, None)

        # Raises error with relative constraint, no modelbridge
        with self.assertRaises(ValueError):
            oc = t.transform_optimization_config(oc, None, None)
Exemple #10
0
 def setUp(self):
     self.search_space = SearchSpace(parameters=[
         RangeParameter(
             "a", lower=1, upper=3, parameter_type=ParameterType.FLOAT),
         ChoiceParameter("b",
                         parameter_type=ParameterType.STRING,
                         values=["a", "b", "c"]),
     ])
     self.observation_features = [
         ObservationFeatures(parameters={
             "a": 2,
             "b": "a"
         }),
         ObservationFeatures(parameters={
             "a": 3,
             "b": "b"
         }),
         ObservationFeatures(parameters={
             "a": 3,
             "b": "c"
         }),
     ]
     self.signature_to_parameterization = {
         Arm(parameters=obsf.parameters).signature: obsf.parameters
         for obsf in self.observation_features
     }
     self.transformed_features = [
         ObservationFeatures(
             parameters={
                 "arms": Arm(parameters={
                     "a": 2,
                     "b": "a"
                 }).signature
             }),
         ObservationFeatures(
             parameters={
                 "arms": Arm(parameters={
                     "a": 3,
                     "b": "b"
                 }).signature
             }),
         ObservationFeatures(
             parameters={
                 "arms": Arm(parameters={
                     "a": 3,
                     "b": "c"
                 }).signature
             }),
     ]
     self.t = SearchSpaceToChoice(
         search_space=self.search_space,
         observation_features=self.observation_features,
         observation_data=None,
     )
     self.t2 = SearchSpaceToChoice(
         search_space=self.search_space,
         observation_features=[self.observation_features[0]],
         observation_data=None,
     )
     self.t3 = SearchSpaceToChoice(
         search_space=self.search_space,
         observation_features=self.observation_features,
         observation_data=None,
         config={"use_ordered": True},
     )
Exemple #11
0
    def setUp(self):
        self.search_space = SearchSpace(
            parameters=[
                RangeParameter("x",
                               lower=1,
                               upper=3,
                               parameter_type=ParameterType.FLOAT),
                RangeParameter("a",
                               lower=1,
                               upper=2,
                               parameter_type=ParameterType.INT),
                ChoiceParameter("b",
                                parameter_type=ParameterType.STRING,
                                values=["a", "b", "c"]),
                ChoiceParameter("c",
                                parameter_type=ParameterType.BOOL,
                                values=[True, False]),
                ChoiceParameter(
                    "d",
                    parameter_type=ParameterType.FLOAT,
                    values=[1.0, 10.0, 100.0],
                    is_ordered=True,
                ),
            ],
            parameter_constraints=[
                ParameterConstraint(constraint_dict={
                    "x": -0.5,
                    "a": 1
                },
                                    bound=0.5)
            ],
        )
        self.t = OneHot(
            search_space=self.search_space,
            observation_features=None,
            observation_data=None,
        )
        self.t2 = OneHot(
            search_space=self.search_space,
            observation_features=None,
            observation_data=None,
            config={"rounding": "randomized"},
        )

        self.transformed_features = ObservationFeatures(
            parameters={
                "x": 2.2,
                "a": 2,
                "b" + OH_PARAM_INFIX + "_0": 0,
                "b" + OH_PARAM_INFIX + "_1": 1,
                "b" + OH_PARAM_INFIX + "_2": 0,
                # Only two choices => one parameter.
                "c" + OH_PARAM_INFIX: 0,
                "d": 10.0,
            })
        self.observation_features = ObservationFeatures(parameters={
            "x": 2.2,
            "a": 2,
            "b": "b",
            "c": False,
            "d": 10.0
        })
Exemple #12
0
hartmann6 = BenchmarkProblem(
    name=hartmann6_function.name,
    fbest=hartmann6_function.fmin,
    optimization_config=OptimizationConfig(objective=Objective(
        metric=Hartmann6Metric(
            name=hartmann6_function.name,
            param_names=[f"x{i}" for i in range(6)],
            noise_sd=0.01,
        ),
        minimize=True,
    )),
    search_space=SearchSpace(parameters=[
        RangeParameter(
            name=f"x{i}",
            parameter_type=ParameterType.FLOAT,
            lower=param_domain[0],
            upper=param_domain[1],
        ) for i, param_domain in enumerate(hartmann6_function.domain)
    ]),
)

hartmann6_constrained = BenchmarkProblem(
    name=hartmann6_function.name,
    fbest=hartmann6_function.fmin,
    optimization_config=OptimizationConfig(
        objective=Objective(
            metric=Hartmann6Metric(name="hartmann6",
                                   param_names=[f"x{i}" for i in range(6)],
                                   noise_sd=0.2),
            minimize=True,
        ),
Exemple #13
0
    def testModelBridge(self, mock_fit, mock_gen_arms, mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [t1, t2]
        exp = get_experiment()
        modelbridge = ModelBridge(search_space_for_value(), 0, transforms, exp, 0)
        self.assertEqual(list(modelbridge.transforms.keys()), ["t1", "t2"])
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(fit_args["search_space"] == search_space_for_value(8.0))
        self.assertTrue(
            fit_args["observation_features"]
            == [observation1trans().features, observation2trans().features]
        )
        self.assertTrue(
            fit_args["observation_data"]
            == [observation1trans().data, observation2trans().data]
        )
        self.assertTrue(mock_observations_from_data.called)

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[observation2trans().data],
        )

        modelbridge.predict([observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([observation1trans().features], [2], None),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(name="test_metric")))
        modelbridge._set_kwargs_to_save(
            model_key="TestModel", model_kwargs={}, bridge_kwargs={}
        )
        gr = modelbridge.gen(
            n=1,
            search_space=search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        self.assertEqual(gr._model_key, "TestModel")
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={}, observation_features=[observation1().features]
        )

        # Gen with no pending observations and no fixed features
        modelbridge.gen(
            n=1, search_space=search_space_for_value(), optimization_config=None
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(
            objective=ScalarizedObjective(
                metrics=[Metric(name="test_metric"), Metric(name="test_metric_2")]
            )
        )
        modelbridge.gen(
            n=1, search_space=search_space_for_value(), optimization_config=oc2
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[observation1trans().data],
        )
        cv_training_data = [observation2()]
        cv_test_points = [observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points
        )
        modelbridge._cross_validate.assert_called_with(
            obs_feats=[observation2trans().features],
            obs_data=[observation2trans().data],
            cv_test_points=[observation1().features],  # untransformed after
        )
        self.assertTrue(cv_predictions == [observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [observation1(), observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(modelbridge.model_space == search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [True, True])

        modelbridge.training_in_design = [True, False]
        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        ood_obs = modelbridge.out_of_design_data()
        self.assertTrue(ood_obs == unwrap_observation_data([observation2().data]))
Exemple #14
0
    def test_REMBOStrategy(self, mock_fit_gpytorch_model, mock_optimize_acqf):
        # Construct a high-D test experiment with multiple metrics
        hartmann_search_space = SearchSpace(parameters=[
            RangeParameter(
                name=f"x{i}",
                parameter_type=ParameterType.FLOAT,
                lower=0.0,
                upper=1.0,
            ) for i in range(20)
        ])

        exp = SimpleExperiment(
            name="test",
            search_space=hartmann_search_space,
            evaluation_function=hartmann_evaluation_function,
            objective_name="hartmann6",
            minimize=True,
            outcome_constraints=[
                OutcomeConstraint(
                    metric=L2NormMetric(
                        name="l2norm",
                        param_names=[f"x{i}" for i in range(6)],
                        noise_sd=0.2,
                    ),
                    op=ComparisonOp.LEQ,
                    bound=1.25,
                    relative=False,
                )
            ],
        )

        # Instantiate the strategy
        gs = REMBOStrategy(D=20, d=6, k=4, init_per_proj=4)

        # Check that arms and data are correctly segmented by projection
        exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2))
        self.assertEqual(len(gs.arms_by_proj[0]), 2)
        self.assertEqual(len(gs.arms_by_proj[1]), 0)

        exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2))

        self.assertEqual(len(gs.arms_by_proj[0]), 2)
        self.assertEqual(len(gs.arms_by_proj[1]), 2)

        # Iterate until the first projection fits a GP
        for _ in range(4):
            exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2))
            mock_fit_gpytorch_model.assert_not_called()

        self.assertEqual(len(gs.arms_by_proj[0]), 4)
        self.assertEqual(len(gs.arms_by_proj[1]), 4)
        self.assertEqual(len(gs.arms_by_proj[2]), 2)
        self.assertEqual(len(gs.arms_by_proj[3]), 2)

        # Keep iterating until GP is used for gen
        for i in range(4):
            # First two trials will go towards 3rd and 4th proj. getting enough
            if i < 1:  # data for GP.
                self.assertLess(len(gs.arms_by_proj[2]), 4)
            if i < 2:
                self.assertLess(len(gs.arms_by_proj[3]), 4)

            exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2))
            if i < 2:
                mock_fit_gpytorch_model.assert_not_called()
            else:
                # After all proj. have > 4 arms' worth of data, GP can be fit.
                self.assertFalse(
                    any(len(x) < 4 for x in gs.arms_by_proj.values()))
                mock_fit_gpytorch_model.assert_called()

        self.assertTrue(len(gs.model_transitions) > 0)
        gs2 = gs.clone_reset()
        self.assertEqual(gs2.D, 20)
        self.assertEqual(gs2.d, 6)
Exemple #15
0
class SearchSpaceTest(TestCase):
    def setUp(self):
        self.a = RangeParameter(name="a",
                                parameter_type=ParameterType.FLOAT,
                                lower=0.5,
                                upper=5.5)
        self.b = RangeParameter(name="b",
                                parameter_type=ParameterType.INT,
                                lower=2,
                                upper=10)
        self.c = ChoiceParameter(name="c",
                                 parameter_type=ParameterType.STRING,
                                 values=["foo", "bar", "baz"])
        self.d = FixedParameter(name="d",
                                parameter_type=ParameterType.BOOL,
                                value=True)
        self.e = ChoiceParameter(name="e",
                                 parameter_type=ParameterType.FLOAT,
                                 values=[0.0, 0.1, 0.2, 0.5])
        self.f = RangeParameter(
            name="f",
            parameter_type=ParameterType.INT,
            lower=2,
            upper=10,
            log_scale=True,
        )
        self.g = RangeParameter(name="g",
                                parameter_type=ParameterType.FLOAT,
                                lower=0.0,
                                upper=1.0)
        self.parameters = [self.a, self.b, self.c, self.d, self.e, self.f]
        self.ss1 = SearchSpace(parameters=self.parameters)
        self.ss2 = SearchSpace(
            parameters=self.parameters,
            parameter_constraints=[
                OrderConstraint(lower_parameter=self.a, upper_parameter=self.b)
            ],
        )
        self.ss1_repr = (
            "SearchSpace("
            "parameters=["
            "RangeParameter(name='a', parameter_type=FLOAT, range=[0.5, 5.5]), "
            "RangeParameter(name='b', parameter_type=INT, range=[2, 10]), "
            "ChoiceParameter(name='c', parameter_type=STRING, "
            "values=['foo', 'bar', 'baz']), "
            "FixedParameter(name='d', parameter_type=BOOL, value=True), "
            "ChoiceParameter(name='e', parameter_type=FLOAT, "
            "values=[0.0, 0.1, 0.2, 0.5]), "
            "RangeParameter(name='f', parameter_type=INT, range=[2, 10], "
            "log_scale=True)], "
            "parameter_constraints=[])")
        self.ss2_repr = (
            "SearchSpace("
            "parameters=["
            "RangeParameter(name='a', parameter_type=FLOAT, range=[0.5, 5.5]), "
            "RangeParameter(name='b', parameter_type=INT, range=[2, 10]), "
            "ChoiceParameter(name='c', parameter_type=STRING, "
            "values=['foo', 'bar', 'baz']), "
            "FixedParameter(name='d', parameter_type=BOOL, value=True), "
            "ChoiceParameter(name='e', parameter_type=FLOAT, "
            "values=[0.0, 0.1, 0.2, 0.5]), "
            "RangeParameter(name='f', parameter_type=INT, range=[2, 10], "
            "log_scale=True)], "
            "parameter_constraints=[OrderConstraint(a <= b)])")

    def testEq(self):
        ss2 = SearchSpace(
            parameters=self.parameters,
            parameter_constraints=[
                OrderConstraint(lower_parameter=self.a, upper_parameter=self.b)
            ],
        )
        self.assertEqual(self.ss2, ss2)
        self.assertNotEqual(self.ss1, self.ss2)

    def testProperties(self):
        self.assertEqual(len(self.ss1.parameters), TOTAL_PARAMS)
        self.assertTrue("a" in self.ss1.parameters)
        self.assertTrue(len(self.ss1.tunable_parameters), TUNABLE_PARAMS)
        self.assertFalse("d" in self.ss1.tunable_parameters)
        self.assertTrue(len(self.ss1.range_parameters), RANGE_PARAMS)
        self.assertFalse("c" in self.ss1.range_parameters)
        self.assertTrue(len(self.ss1.parameter_constraints) == 0)
        self.assertTrue(len(self.ss2.parameter_constraints) == 1)

    def testRepr(self):
        self.assertEqual(str(self.ss2), self.ss2_repr)
        self.assertEqual(str(self.ss1), self.ss1_repr)

    def testSetter(self):
        new_c = SumConstraint(parameters=[self.a, self.b],
                              is_upper_bound=True,
                              bound=10)
        self.ss2.add_parameter_constraints([new_c])
        self.assertEqual(len(self.ss2.parameter_constraints), 2)

        self.ss2.set_parameter_constraints([])
        self.assertEqual(len(self.ss2.parameter_constraints), 0)

        update_p = RangeParameter(name="b",
                                  parameter_type=ParameterType.INT,
                                  lower=10,
                                  upper=20)
        self.ss2.add_parameter(self.g)
        self.assertEqual(len(self.ss2.parameters), TOTAL_PARAMS + 1)

        self.ss2.update_parameter(update_p)
        self.assertEqual(self.ss2.parameters["b"].lower, 10)

    def testBadConstruction(self):
        # Duplicate parameter
        with self.assertRaises(ValueError):
            p1 = self.parameters + [self.parameters[0]]
            SearchSpace(parameters=p1, parameter_constraints=[])

        # Constraint on non-existent parameter
        with self.assertRaises(ValueError):
            SearchSpace(
                parameters=self.parameters,
                parameter_constraints=[
                    OrderConstraint(lower_parameter=self.a,
                                    upper_parameter=self.g)
                ],
            )

        # Vanilla Constraint on non-existent parameter
        with self.assertRaises(ValueError):
            SearchSpace(
                parameters=self.parameters,
                parameter_constraints=[
                    ParameterConstraint(constraint_dict={"g": 1}, bound=0)
                ],
            )

        # Constraint on non-numeric parameter
        with self.assertRaises(ValueError):
            SearchSpace(
                parameters=self.parameters,
                parameter_constraints=[
                    OrderConstraint(lower_parameter=self.a,
                                    upper_parameter=self.d)
                ],
            )

        # Constraint on choice parameter
        with self.assertRaises(ValueError):
            SearchSpace(
                parameters=self.parameters,
                parameter_constraints=[
                    OrderConstraint(lower_parameter=self.a,
                                    upper_parameter=self.e)
                ],
            )

        # Constraint on logscale parameter
        with self.assertRaises(ValueError):
            SearchSpace(
                parameters=self.parameters,
                parameter_constraints=[
                    OrderConstraint(lower_parameter=self.a,
                                    upper_parameter=self.f)
                ],
            )

        # Constraint on mismatched parameter
        with self.assertRaises(ValueError):
            wrong_a = self.a.clone()
            wrong_a.update_range(upper=10)
            SearchSpace(
                parameters=self.parameters,
                parameter_constraints=[
                    OrderConstraint(lower_parameter=wrong_a,
                                    upper_parameter=self.b)
                ],
            )

    def testBadSetter(self):
        new_p = RangeParameter(name="b",
                               parameter_type=ParameterType.FLOAT,
                               lower=0.0,
                               upper=1.0)

        # Add duplicate parameter
        with self.assertRaises(ValueError):
            self.ss1.add_parameter(new_p)

        # Update parameter to different type
        with self.assertRaises(ValueError):
            self.ss1.update_parameter(new_p)

        # Update non-existent parameter
        new_p = RangeParameter(name="g",
                               parameter_type=ParameterType.FLOAT,
                               lower=0.0,
                               upper=1.0)
        with self.assertRaises(ValueError):
            self.ss1.update_parameter(new_p)

    def testCheckMembership(self):
        p_dict = {"a": 1.0, "b": 5, "c": "foo", "d": True, "e": 0.2, "f": 5}

        # Valid
        self.assertTrue(self.ss2.check_membership(p_dict))

        # Value out of range
        p_dict["a"] = 20.0
        self.assertFalse(self.ss2.check_membership(p_dict))
        with self.assertRaises(ValueError):
            self.ss2.check_membership(p_dict, raise_error=True)

        # Violate constraints
        p_dict["a"] = 5.3
        self.assertFalse(self.ss2.check_membership(p_dict))
        with self.assertRaises(ValueError):
            self.ss2.check_membership(p_dict, raise_error=True)

        # Incomplete param dict
        p_dict.pop("a")
        self.assertFalse(self.ss2.check_membership(p_dict))
        with self.assertRaises(ValueError):
            self.ss2.check_membership(p_dict, raise_error=True)

        # Unknown parameter
        p_dict["q"] = 40
        self.assertFalse(self.ss2.check_membership(p_dict))
        with self.assertRaises(ValueError):
            self.ss2.check_membership(p_dict, raise_error=True)

    def testCheckTypes(self):
        p_dict = {"a": 1.0, "b": 5, "c": "foo", "d": True, "e": 0.2, "f": 5}

        # Valid
        self.assertTrue(self.ss2.check_membership(p_dict))

        # Invalid type
        p_dict["b"] = 5.2
        self.assertFalse(self.ss2.check_types(p_dict))
        with self.assertRaises(ValueError):
            self.ss2.check_types(p_dict, raise_error=True)
        p_dict["b"] = 5

        # Incomplete param dict
        p_dict.pop("a")
        self.assertFalse(self.ss2.check_types(p_dict))
        with self.assertRaises(ValueError):
            self.ss2.check_types(p_dict, raise_error=True)

        # Unknown parameter
        p_dict["q"] = 40
        self.assertFalse(self.ss2.check_types(p_dict))
        with self.assertRaises(ValueError):
            self.ss2.check_types(p_dict, raise_error=True)

    def testCastArm(self):
        p_dict = {"a": 1.0, "b": 5.0, "c": "foo", "d": True, "e": 0.2, "f": 5}

        # Check "b" parameter goes from float to int
        self.assertTrue(isinstance(p_dict["b"], float))
        new_arm = self.ss2.cast_arm(Arm(p_dict))
        self.assertTrue(isinstance(new_arm.parameters["b"], int))

        # Unknown parameter should be unchanged
        p_dict["q"] = 40
        new_arm = self.ss2.cast_arm(Arm(p_dict))
        self.assertTrue(isinstance(new_arm.parameters["q"], int))

    def testCopy(self):
        a = RangeParameter("a", ParameterType.FLOAT, 1.0, 5.5)
        b = RangeParameter("b", ParameterType.FLOAT, 2.0, 5.5)
        c = ChoiceParameter("c", ParameterType.INT, [2, 3])
        ss = SearchSpace(
            parameters=[a, b, c],
            parameter_constraints=[
                OrderConstraint(lower_parameter=a, upper_parameter=b)
            ],
        )
        ss_copy = ss.clone()
        self.assertEqual(len(ss_copy.parameters), len(ss_copy.parameters))
        self.assertEqual(len(ss_copy.parameter_constraints),
                         len(ss_copy.parameter_constraints))

        ss_copy.add_parameter(FixedParameter("d", ParameterType.STRING, "h"))
        self.assertNotEqual(len(ss_copy.parameters), len(ss.parameters))

    def testOutOfDesignArm(self):
        arm1 = self.ss1.out_of_design_arm()
        arm2 = self.ss2.out_of_design_arm()
        arm1_nones = [p is None for p in arm1.parameters.values()]
        self.assertTrue(all(arm1_nones))
        self.assertTrue(arm1 == arm2)

    def testConstructArm(self):
        # Test constructing an arm of default values
        arm = self.ss1.construct_arm(name="test")
        self.assertEqual(arm.name, "test")
        for p_name in self.ss1.parameters.keys():
            self.assertTrue(p_name in arm.parameters)
            self.assertEqual(arm.parameters[p_name], None)

        # Test constructing an arm with a custom value
        arm = self.ss1.construct_arm({"a": 1.0})
        for p_name in self.ss1.parameters.keys():
            self.assertTrue(p_name in arm.parameters)
            if p_name == "a":
                self.assertEqual(arm.parameters[p_name], 1.0)
            else:
                self.assertEqual(arm.parameters[p_name], None)

        # Test constructing an arm with a bad param name
        with self.assertRaises(ValueError):
            self.ss1.construct_arm({"IDONTEXIST_a": 1.0})

        # Test constructing an arm with a bad param name
        with self.assertRaises(ValueError):
            self.ss1.construct_arm({"a": "notafloat"})
Exemple #16
0
    def testModelBridge(self, mock_fit, mock_gen_arms,
                        mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [transform_1, transform_2]
        exp = get_experiment_for_value()
        ss = get_search_space_for_value()
        modelbridge = ModelBridge(
            search_space=ss,
            model=Model(),
            transforms=transforms,
            experiment=exp,
            data=0,
        )
        self.assertFalse(
            modelbridge._experiment_has_immutable_search_space_and_opt_config)
        self.assertEqual(list(modelbridge.transforms.keys()),
                         ["Cast", "transform_1", "transform_2"])
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(
            fit_args["search_space"] == get_search_space_for_value(8.0))
        self.assertTrue(fit_args["observation_features"] == [])
        self.assertTrue(fit_args["observation_data"] == [])
        self.assertTrue(mock_observations_from_data.called)

        # Test prediction on out of design features.
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            side_effect=ValueError("Out of Design"),
        )
        # This point is in design, and thus failures in predict are legitimate.
        with mock.patch.object(ModelBridge,
                               "model_space",
                               return_value=get_search_space_for_range_values):
            with self.assertRaises(ValueError):
                modelbridge.predict([get_observation2().features])

        # This point is out of design, and not in training data.
        with self.assertRaises(ValueError):
            modelbridge.predict([get_observation_status_quo0().features])

        # Now it's in the training data.
        with mock.patch.object(
                ModelBridge,
                "get_training_data",
                return_value=[get_observation_status_quo0()],
        ):
            # Return raw training value.
            self.assertEqual(
                modelbridge.predict([get_observation_status_quo0().features]),
                unwrap_observation_data([get_observation_status_quo0().data]),
            )

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[get_observation2trans().data],
        )
        modelbridge.predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Check that _single_predict is equivalent here.
        modelbridge._single_predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([get_observation1trans().features], [2], None, {}),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(
            name="test_metric")))
        modelbridge._set_kwargs_to_save(model_key="TestModel",
                                        model_kwargs={},
                                        bridge_kwargs={})
        gr = modelbridge.gen(
            n=1,
            search_space=get_search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [get_observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        self.assertEqual(gr._model_key, "TestModel")
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [get_observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={},
            observation_features=[get_observation1().features])

        # Gen with no pending observations and no fixed features
        modelbridge.gen(n=1,
                        search_space=get_search_space_for_value(),
                        optimization_config=None)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="test_metric"),
                     Metric(name="test_metric_2")]))
        modelbridge.gen(n=1,
                        search_space=get_search_space_for_value(),
                        optimization_config=oc2)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[get_observation1trans().data],
        )
        cv_training_data = [get_observation2()]
        cv_test_points = [get_observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points)
        modelbridge._cross_validate.assert_called_with(
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            obs_feats=[get_observation2trans().features],
            obs_data=[get_observation2trans().data],
            cv_test_points=[get_observation1().features
                            ],  # untransformed after
        )
        self.assertTrue(cv_predictions == [get_observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [get_observation1(), get_observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(
            modelbridge.model_space == get_search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [False, False])

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        # Test feature_importances
        with self.assertRaises(NotImplementedError):
            modelbridge.feature_importances("a")

        # Test transform observation features
        with mock.patch(
                "ax.modelbridge.base.ModelBridge._transform_observation_features",
                autospec=True,
        ) as mock_tr:
            modelbridge.transform_observation_features(
                [get_observation2().features])
        mock_tr.assert_called_with(modelbridge,
                                   [get_observation2trans().features])
Exemple #17
0
 def setUp(self):
     self.a = RangeParameter(name="a",
                             parameter_type=ParameterType.FLOAT,
                             lower=0.5,
                             upper=5.5)
     self.b = RangeParameter(name="b",
                             parameter_type=ParameterType.INT,
                             lower=2,
                             upper=10)
     self.c = ChoiceParameter(name="c",
                              parameter_type=ParameterType.STRING,
                              values=["foo", "bar", "baz"])
     self.d = FixedParameter(name="d",
                             parameter_type=ParameterType.BOOL,
                             value=True)
     self.e = ChoiceParameter(name="e",
                              parameter_type=ParameterType.FLOAT,
                              values=[0.0, 0.1, 0.2, 0.5])
     self.f = RangeParameter(
         name="f",
         parameter_type=ParameterType.INT,
         lower=2,
         upper=10,
         log_scale=True,
     )
     self.g = RangeParameter(name="g",
                             parameter_type=ParameterType.FLOAT,
                             lower=0.0,
                             upper=1.0)
     self.parameters = [self.a, self.b, self.c, self.d, self.e, self.f]
     self.ss1 = SearchSpace(parameters=self.parameters)
     self.ss2 = SearchSpace(
         parameters=self.parameters,
         parameter_constraints=[
             OrderConstraint(lower_parameter=self.a, upper_parameter=self.b)
         ],
     )
     self.ss1_repr = (
         "SearchSpace("
         "parameters=["
         "RangeParameter(name='a', parameter_type=FLOAT, range=[0.5, 5.5]), "
         "RangeParameter(name='b', parameter_type=INT, range=[2, 10]), "
         "ChoiceParameter(name='c', parameter_type=STRING, "
         "values=['foo', 'bar', 'baz']), "
         "FixedParameter(name='d', parameter_type=BOOL, value=True), "
         "ChoiceParameter(name='e', parameter_type=FLOAT, "
         "values=[0.0, 0.1, 0.2, 0.5]), "
         "RangeParameter(name='f', parameter_type=INT, range=[2, 10], "
         "log_scale=True)], "
         "parameter_constraints=[])")
     self.ss2_repr = (
         "SearchSpace("
         "parameters=["
         "RangeParameter(name='a', parameter_type=FLOAT, range=[0.5, 5.5]), "
         "RangeParameter(name='b', parameter_type=INT, range=[2, 10]), "
         "ChoiceParameter(name='c', parameter_type=STRING, "
         "values=['foo', 'bar', 'baz']), "
         "FixedParameter(name='d', parameter_type=BOOL, value=True), "
         "ChoiceParameter(name='e', parameter_type=FLOAT, "
         "values=[0.0, 0.1, 0.2, 0.5]), "
         "RangeParameter(name='f', parameter_type=INT, range=[2, 10], "
         "log_scale=True)], "
         "parameter_constraints=[OrderConstraint(a <= b)])")
    def testGen(self, mock_init):
        # Test with constraints
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=True),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        ma = DiscreteModelBridge()
        model = mock.MagicMock(DiscreteModel, autospec=True, instance=True)
        model.gen.return_value = ([[0.0, 2.0, 3.0], [1.0, 1.0,
                                                     3.0]], [1.0, 2.0])
        ma.model = model
        ma.parameters = ["x", "y", "z"]
        ma.outcomes = ["a", "b"]
        observation_features, weights, best_observation = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=optimization_config,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = model.gen.mock_calls[0][2]
        self.assertEqual(gen_args["n"], 3)
        self.assertEqual(gen_args["parameter_values"],
                         [[0.0, 1.0], ["foo", "bar"], [True]])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([-1.0,
                                                                    0.0])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][0],
                           np.array([[0.0, -1.0]])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][1],
                           np.array([[-2]])))
        self.assertEqual(gen_args["pending_observations"][0], [])
        self.assertEqual(gen_args["pending_observations"][1],
                         [[0, "foo", True]])
        self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
        self.assertEqual(observation_features[0].parameters, {
            "x": 0.0,
            "y": 2.0,
            "z": 3.0
        })
        self.assertEqual(observation_features[1].parameters, {
            "x": 1.0,
            "y": 1.0,
            "z": 3.0
        })
        self.assertEqual(weights, [1.0, 2.0])

        # Test with no constraints, no fixed feature, no pending observations
        search_space = SearchSpace(self.parameters[:2])
        optimization_config.outcome_constraints = []
        ma.parameters = ["x", "y"]
        ma._gen(
            n=3,
            search_space=search_space,
            optimization_config=optimization_config,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options={},
        )
        gen_args = model.gen.mock_calls[1][2]
        self.assertEqual(gen_args["parameter_values"],
                         [[0.0, 1.0], ["foo", "bar"]])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertIsNone(gen_args["pending_observations"])

        # Test validation
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=False),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, True)
            ],
        )
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
                model_gen_options={},
            )
Exemple #19
0
    def __init__(
        self,
        search_space: SearchSpace,
        model: Any,
        transforms: Optional[List[Type[Transform]]] = None,
        experiment: Optional[Experiment] = None,
        data: Optional[Data] = None,
        transform_configs: Optional[Dict[str, TConfig]] = None,
        status_quo_name: Optional[str] = None,
        status_quo_features: Optional[ObservationFeatures] = None,
        optimization_config: Optional[OptimizationConfig] = None,
        fit_out_of_design: bool = False,
    ) -> None:
        """
        Applies transforms and fits model.

        Args:
            experiment: Is used to get arm parameters. Is not mutated.
            search_space: Search space for fitting the model. Constraints need
                not be the same ones used in gen.
            data: Ax Data.
            model: Interface will be specified in subclass. If model requires
                initialization, that should be done prior to its use here.
            transforms: List of uninitialized transform classes. Forward
                transforms will be applied in this order, and untransforms in
                the reverse order.
            transform_configs: A dictionary from transform name to the
                transform config dictionary.
            status_quo_name: Name of the status quo arm. Can only be used if
                Data has a single set of ObservationFeatures corresponding to
                that arm.
            status_quo_features: ObservationFeatures to use as status quo.
                Either this or status_quo_name should be specified, not both.
            optimization_config: Optimization config defining how to optimize
                the model.
        """
        t_fit_start = time.time()
        self._metric_names: Set[str] = set()
        self._training_data: List[Observation] = []
        self._optimization_config: Optional[OptimizationConfig] = optimization_config
        self._training_in_design: List[bool] = []
        self._status_quo: Optional[Observation] = None
        self._arms_by_signature: Optional[Dict[str, Arm]] = None
        self.transforms: MutableMapping[str, Transform] = OrderedDict()
        self._model_key: Optional[str] = None
        self._model_kwargs: Optional[Dict[str, Any]] = None
        self._bridge_kwargs: Optional[Dict[str, Any]] = None

        self._model_space = search_space.clone()
        self._raw_transforms = transforms
        self._transform_configs: Optional[Dict[str, TConfig]] = transform_configs
        self._fit_out_of_design = fit_out_of_design

        if experiment is not None:
            if self._optimization_config is None:
                self._optimization_config = experiment.optimization_config
            self._arms_by_signature = experiment.arms_by_signature

        observations = (
            # pyre-fixme[6]: Expected `Experiment` for 1st param but got `None`.
            observations_from_data(experiment, data)
            if experiment is not None and data is not None
            else []
        )
        obs_feats_raw, obs_data_raw = self._set_training_data(
            observations=observations, search_space=search_space
        )
        # Set model status quo
        # NOTE: training data must be set before setting the status quo.
        self._set_status_quo(
            experiment=experiment,
            status_quo_name=status_quo_name,
            status_quo_features=status_quo_features,
        )
        obs_feats, obs_data, search_space = self._transform_data(
            obs_feats=obs_feats_raw,
            obs_data=obs_data_raw,
            search_space=search_space,
            transforms=transforms,
            transform_configs=transform_configs,
        )

        # Apply terminal transform and fit
        try:
            self._fit(
                model=model,
                search_space=search_space,
                observation_features=obs_feats,
                observation_data=obs_data,
            )
            self.fit_time = time.time() - t_fit_start
            self.fit_time_since_gen = float(self.fit_time)
        except NotImplementedError:
            self.fit_time = 0.0
            self.fit_time_since_gen = 0.0
Exemple #20
0
    def testIsFactorial(self):
        self.assertFalse(self.batch.is_factorial)

        # Insufficient factors
        small_experiment = Experiment(
            name="small_test",
            search_space=SearchSpace(
                [FixedParameter("a", ParameterType.INT, 4)]),
        )
        small_trial = small_experiment.new_batch_trial().add_arm(Arm({"a": 4}))
        self.assertFalse(small_trial.is_factorial)

        new_batch_trial = self.experiment.new_batch_trial()
        new_batch_trial.add_arms_and_weights(arms=[
            Arm(parameters={
                "w": 0.75,
                "x": 1,
                "y": "foo",
                "z": True
            }),
            Arm(parameters={
                "w": 0.75,
                "x": 2,
                "y": "foo",
                "z": True
            }),
            Arm(parameters={
                "w": 0.77,
                "x": 1,
                "y": "foo",
                "z": True
            }),
        ])
        self.assertFalse(new_batch_trial.is_factorial)

        new_batch_trial = self.experiment.new_batch_trial()
        new_batch_trial.add_arms_and_weights(arms=[
            Arm(parameters={
                "w": 0.77,
                "x": 1,
                "y": "foo",
                "z": True
            }),
            Arm(parameters={
                "w": 0.77,
                "x": 2,
                "y": "foo",
                "z": True
            }),
            Arm(parameters={
                "w": 0.75,
                "x": 1,
                "y": "foo",
                "z": True
            }),
            Arm(parameters={
                "w": 0.75,
                "x": 2,
                "y": "foo",
                "z": True
            }),
        ])
        self.assertTrue(new_batch_trial.is_factorial)
Exemple #21
0
    def __init__(
        self,
        search_space: SearchSpace,
        model: Any,
        transforms: Optional[List[Type[Transform]]] = None,
        experiment: Optional[Experiment] = None,
        data: Optional[Data] = None,
        transform_configs: Optional[Dict[str, TConfig]] = None,
        status_quo_name: Optional[str] = None,
        status_quo_features: Optional[ObservationFeatures] = None,
        optimization_config: Optional[OptimizationConfig] = None,
    ) -> None:
        """
        Applies transforms and fits model.

        Args:
            experiment: Is used to get arm parameters. Is not mutated.
            search_space: Search space for fitting the model. Constraints need
                not be the same ones used in gen.
            data: Ax Data.
            model: Interface will be specified in subclass. If model requires
                initialization, that should be done prior to its use here.
            transforms: List of uninitialized transform classes. Forward
                transforms will be applied in this order, and untransforms in
                the reverse order.
            transform_configs: A dictionary from transform name to the
                transform config dictionary.
            status_quo_name: Name of the status quo arm. Can only be used if
                Data has a single set of ObservationFeatures corresponding to
                that arm.
            status_quo_features: ObservationFeatures to use as status quo.
                Either this or status_quo_name should be specified, not both.
            optimization_config: Optimization config defining how to optimize
                the model.
        """
        t_fit_start = time.time()
        self._metric_names: Set[str] = set()
        self._training_data: List[Observation] = []
        self._optimization_config: Optional[
            OptimizationConfig] = optimization_config
        self._training_in_design: List[bool] = []
        self._status_quo: Optional[Observation] = None
        self._arms_by_signature: Optional[Dict[str, Arm]] = None
        self.transforms: MutableMapping[str, Transform] = OrderedDict()

        self._model_space = search_space.clone()
        if experiment is not None:
            if self._optimization_config is None:
                self._optimization_config = experiment.optimization_config
            self._arms_by_signature = experiment.arms_by_signature

        # Get observation features and data
        obs_feats: List[ObservationFeatures] = []
        obs_data: List[ObservationData] = []
        observations = (observations_from_data(experiment, data)
                        if experiment is not None and data is not None else [])
        obs_feats, obs_data = self._set_training_data(observations)

        # Set model status quo
        if any(x is not None
               for x in [experiment, status_quo_name, status_quo_features]):
            self._set_status_quo(
                experiment=experiment,
                status_quo_name=status_quo_name,
                status_quo_features=status_quo_features,
            )

        # Initialize transforms
        if transform_configs is None:
            transform_configs = {}

        search_space = search_space.clone()
        if transforms is not None:
            for t in transforms:
                t_instance = t(
                    search_space=search_space,
                    observation_features=obs_feats,
                    observation_data=obs_data,
                    config=transform_configs.get(t.__name__, None),
                )
                search_space = t_instance.transform_search_space(search_space)
                obs_feats = t_instance.transform_observation_features(
                    obs_feats)
                obs_data = t_instance.transform_observation_data(
                    obs_data, obs_feats)
                self.transforms[t.__name__] = t_instance

        # Apply terminal transform and fit
        try:
            self._fit(
                model=model,
                search_space=search_space,
                observation_features=obs_feats,
                observation_data=obs_data,
            )
            self.fit_time = time.time() - t_fit_start
            self.fit_time_since_gen = float(self.fit_time)
        except NotImplementedError:
            self.fit_time = 0.0
            self.fit_time_since_gen = 0.0
    def testGen(self, mock_init, mock_best_point, mock_gen):
        # Test with constraints
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=True),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        ma = NumpyModelBridge()
        ma.parameters = ["x", "y", "z"]
        ma.outcomes = ["a", "b"]
        ma.transforms = OrderedDict()
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=optimization_config,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[0][2]
        self.assertEqual(gen_args["n"], 3)
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([-1.0,
                                                                    0.0])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][0],
                           np.array([[0.0, -1.0]])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][1],
                           np.array([[-2]])))
        self.assertTrue(
            np.array_equal(
                gen_args["linear_constraints"][0],
                np.array([[1.0, -1, 0.0], [-1.0, 0.0, -1.0]]),
            ))
        self.assertTrue(
            np.array_equal(gen_args["linear_constraints"][1],
                           np.array([[0.0], [-3.5]])))
        self.assertEqual(gen_args["fixed_features"], {2: 3.0})
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][0], np.array([])))
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][1],
                           np.array([[0.6, 1.6, 3.0]])))
        self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
        self.assertEqual(observation_features[0].parameters, {
            "x": 1.0,
            "y": 2.0,
            "z": 3.0
        })
        self.assertEqual(observation_features[1].parameters, {
            "x": 3.0,
            "y": 4.0,
            "z": 3.0
        })
        self.assertTrue(np.array_equal(weights, np.array([1.0, 2.0])))

        # Test with multiple objectives.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="a"), Metric(name="b")], minimize=True))
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=oc2,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[1][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"],
                           np.array([-1.0, -1.0])))

        # Test with MultiObjective (unweighted multiple objectives)
        oc3 = MultiObjectiveOptimizationConfig(objective=MultiObjective(
            metrics=[Metric(name="a"),
                     Metric(name="b", lower_is_better=True)],
            minimize=True,
        ))
        search_space = SearchSpace(self.parameters)  # Unconstrained
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=search_space,
            optimization_config=oc3,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[2][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([1.0,
                                                                    -1.0])))

        # Test with no constraints, no fixed feature, no pending observations
        search_space = SearchSpace(self.parameters[:2])
        optimization_config.outcome_constraints = []
        ma.parameters = ["x", "y"]
        ma._gen(3, search_space, {}, ObservationFeatures({}), None,
                optimization_config)
        gen_args = mock_gen.mock_calls[3][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertIsNone(gen_args["linear_constraints"])
        self.assertIsNone(gen_args["fixed_features"])
        self.assertIsNone(gen_args["pending_observations"])

        # Test validation
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=False),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
        optimization_config.objective.minimize = True
        optimization_config.outcome_constraints[0].relative = True
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
Exemple #23
0
def get_search_space_for_range_value(min: float = 3.0, max: float = 6.0) -> SearchSpace:
    return SearchSpace([RangeParameter("x", ParameterType.FLOAT, min, max)])
Exemple #24
0
    def testInit(self):
        self.assertEqual(
            self.t.Ymean,
            {
                ("m1", "a"): 1.0,
                ("m1", "b"): 3.0,
                ("m2", "a"): 5.0,
                ("m2", "b"): 1.5
            },
        )
        self.assertEqual(
            self.t.Ystd,
            {
                ("m1", "a"): 1.0,
                ("m1", "b"): 2.0,
                ("m2", "a"): 3.0,
                ("m2", "b"): 0.5
            },
        )
        with self.assertRaises(ValueError):
            # No parameter specified
            StratifiedStandardizeY(
                search_space=self.search_space,
                observation_features=[self.obsf1, self.obsf2],
                observation_data=[self.obsd1, self.obsd2],
            )
        with self.assertRaises(ValueError):
            # Wrong parameter type
            StratifiedStandardizeY(
                search_space=self.search_space,
                observation_features=[self.obsf1, self.obsf2],
                observation_data=[self.obsd1, self.obsd2],
                config={"parameter_name": "x"},
            )
        # Multiple tasks parameters
        ss3 = SearchSpace(parameters=[
            RangeParameter(name="x",
                           parameter_type=ParameterType.FLOAT,
                           lower=0,
                           upper=10),
            ChoiceParameter(
                name="z",
                parameter_type=ParameterType.STRING,
                values=["a", "b"],
                is_task=True,
            ),
            ChoiceParameter(
                name="z2",
                parameter_type=ParameterType.STRING,
                values=["a", "b"],
                is_task=True,
            ),
        ])
        with self.assertRaises(ValueError):
            StratifiedStandardizeY(
                search_space=ss3,
                observation_features=[self.obsf1, self.obsf2],
                observation_data=[self.obsd1, self.obsd2],
            )

        # Grab from task feature
        ss2 = SearchSpace(parameters=[
            RangeParameter(name="x",
                           parameter_type=ParameterType.FLOAT,
                           lower=0,
                           upper=10),
            ChoiceParameter(
                name="z",
                parameter_type=ParameterType.STRING,
                values=["a", "b"],
                is_task=True,
            ),
        ])
        t2 = StratifiedStandardizeY(
            search_space=ss2,
            observation_features=[self.obsf1, self.obsf2],
            observation_data=[self.obsd1, self.obsd2],
        )
        self.assertEqual(
            t2.Ymean,
            {
                ("m1", "a"): 1.0,
                ("m1", "b"): 3.0,
                ("m2", "a"): 5.0,
                ("m2", "b"): 1.5
            },
        )
        self.assertEqual(
            t2.Ystd,
            {
                ("m1", "a"): 1.0,
                ("m1", "b"): 2.0,
                ("m2", "a"): 3.0,
                ("m2", "b"): 0.5
            },
        )