Exemplo n.º 1
0
 def testRoundingWithImpossiblyConstrainedIntRanges(self):
     parameters = [
         RangeParameter("x",
                        lower=1,
                        upper=3,
                        parameter_type=ParameterType.INT),
         RangeParameter("y",
                        lower=1,
                        upper=3,
                        parameter_type=ParameterType.INT),
     ]
     constrained_int_search_space = SearchSpace(
         parameters=parameters,
         parameter_constraints=[
             SumConstraint(parameters=parameters,
                           is_upper_bound=True,
                           bound=3)
         ],
     )
     t = IntToFloat(
         search_space=constrained_int_search_space,
         observation_features=None,
         observation_data=None,
     )
     self.assertEqual(t.rounding, "randomized")
     observation_features = [
         ObservationFeatures(parameters={
             "x": 2.6,
             "y": 2.6
         })
     ]
     self.assertFalse(
         constrained_int_search_space.check_membership(
             t.untransform_observation_features(
                 observation_features=observation_features)[0].parameters))
Exemplo n.º 2
0
 def _transform_callback(self, x: np.ndarray) -> np.ndarray:  # pragma: no cover
     """A function that performs the `round trip` transformations.
     This function is passed to _model_gen.
     """
     # apply reverse terminal transform to turn array to ObservationFeatures
     observation_features = [
         ObservationFeatures(
             parameters={p: float(x[i]) for i, p in enumerate(self.parameters)}
         )
     ]
     # reverse loop through the transforms and do untransform
     # pyre-fixme[6]: Expected `Sequence[_T]` for 1st param but got `ValuesView[Tr...
     for t in reversed(self.transforms.values()):
         observation_features = t.untransform_observation_features(
             observation_features
         )
     # forward loop through the transforms and do transform
     for t in self.transforms.values():
         observation_features = t.transform_observation_features(
             observation_features
         )
     new_x: List[float] = [
         float(observation_features[0].parameters[p]) for p in self.parameters
     ]
     # turn it back into an array
     return np.array(new_x)
Exemplo n.º 3
0
 def testTransformObservationFeatures(self):
     observation_features = [
         ObservationFeatures(parameters={
             "a": 2,
             "b": "b"
         })
     ]
     obs_ft2 = deepcopy(observation_features)
     obs_ft2 = self.t.transform_observation_features(obs_ft2)
     self.assertEqual(obs_ft2,
                      [ObservationFeatures(parameters={
                          "a": 2,
                          "b": "b"
                      })])
     obs_ft2 = self.t.untransform_observation_features(obs_ft2)
     self.assertEqual(obs_ft2, observation_features)
Exemplo n.º 4
0
    def testFit(self, mock_init):
        ma = DiscreteModelBridge()
        ma._training_data = self.observations
        model = mock.create_autospec(DiscreteModel, instance=True)
        ma._fit(
            model, self.search_space, self.observation_features, self.observation_data
        )
        self.assertEqual(ma.parameters, ["x", "y", "z"])
        self.assertEqual(sorted(ma.outcomes), ["a", "b"])
        Xs = {
            "a": [[0, "foo", True], [1, "foo", True], [1, "bar", True]],
            "b": [[0, "foo", True], [1, "foo", True]],
        }
        Ys = {"a": [[1.0], [2.0], [3.0]], "b": [[-1.0], [-2.0]]}
        Yvars = {"a": [[1.0], [2.0], [3.0]], "b": [[6.0], [7.0]]}
        parameter_values = [[0.0, 1.0], ["foo", "bar"], [True]]
        model_fit_args = model.fit.mock_calls[0][2]
        for i, x in enumerate(model_fit_args["Xs"]):
            self.assertEqual(x, Xs[ma.outcomes[i]])
        for i, y in enumerate(model_fit_args["Ys"]):
            self.assertEqual(y, Ys[ma.outcomes[i]])
        for i, v in enumerate(model_fit_args["Yvars"]):
            self.assertEqual(v, Yvars[ma.outcomes[i]])
        self.assertEqual(model_fit_args["parameter_values"], parameter_values)

        sq_feat = ObservationFeatures({})
        sq_data = self.observation_data[0]
        with self.assertRaises(ValueError):
            ma._fit(
                model,
                self.search_space,
                self.observation_features + [sq_feat],
                self.observation_data + [sq_data],
            )
Exemplo n.º 5
0
 def setUp(self):
     self.search_space = SearchSpace(
         parameters=[
             RangeParameter("x",
                            lower=1,
                            upper=3,
                            parameter_type=ParameterType.FLOAT),
             RangeParameter("a",
                            lower=1,
                            upper=2,
                            parameter_type=ParameterType.INT),
             ChoiceParameter(
                 "b",
                 parameter_type=ParameterType.FLOAT,
                 values=[1.0, 10.0, 100.0],
                 is_ordered=True,
             ),
             ChoiceParameter(
                 "c",
                 parameter_type=ParameterType.FLOAT,
                 values=[10.0, 100.0, 1000.0],
                 is_ordered=True,
             ),
             ChoiceParameter("d",
                             parameter_type=ParameterType.STRING,
                             values=["r", "q", "z"]),
         ],
         parameter_constraints=[
             ParameterConstraint(constraint_dict={
                 "x": -0.5,
                 "a": 1
             },
                                 bound=0.5)
         ],
     )
     self.t = ChoiceEncode(
         search_space=self.search_space,
         observation_features=None,
         observation_data=None,
     )
     self.observation_features = [
         ObservationFeatures(parameters={
             "x": 2.2,
             "a": 2,
             "b": 10.0,
             "c": 10.0,
             "d": "r"
         })
     ]
     # expected parameters after transform
     self.expected_transformed_params = {
         "x": 2.2,
         "a": 2,
         # ordered float choice originally; transformed normalized value
         "b": normalize_values([1.0, 10.0, 100.0])[1],
         # ordered float choice originally; transformed normalized value
         "c": normalize_values([10.0, 100.0, 1000.0])[0],
         # string choice originally; transformed to int index.
         "d": 0,
     }
Exemplo n.º 6
0
 def _transform_callback(self, x: np.ndarray) -> np.ndarray:  # pragma: no cover
     """A function that performs the `round trip` transformations.
     This function is passed to _model_gen.
     """
     # apply reverse terminal transform to turn array to ObservationFeatures
     observation_features = [
         ObservationFeatures(
             parameters={p: float(x[i]) for i, p in enumerate(self.parameters)}
         )
     ]
     # reverse loop through the transforms and do untransform
     for t in reversed(self.transforms.values()):
         observation_features = t.untransform_observation_features(
             observation_features
         )
     # forward loop through the transforms and do transform
     for t in self.transforms.values():
         observation_features = t.transform_observation_features(
             observation_features
         )
     new_x: List[float] = [
         # pyre-fixme[6]: Expected `Union[_SupportsIndex, bytearray, bytes, str,
         #  typing.SupportsFloat]` for 1st param but got `Union[None, bool, float,
         #  int, str]`.
         float(observation_features[0].parameters[p])
         for p in self.parameters
     ]
     # turn it back into an array
     return np.array(new_x)
Exemplo n.º 7
0
 def test_fixed_features(self):
     ms = ModelSpec(model_enum=Models.GPEI)
     self.assertIsNone(ms.fixed_features)
     new_features = ObservationFeatures(parameters={"a": 1.0})
     ms.fixed_features = new_features
     self.assertEqual(ms.fixed_features, new_features)
     self.assertEqual(ms.model_gen_kwargs["fixed_features"], new_features)
Exemplo n.º 8
0
 def test_properties(self):
     node = GenerationNode(
         model_specs=[
             ModelSpec(
                 model_enum=Models.GPEI,
                 model_kwargs={},
                 model_gen_kwargs={
                     "n": 1,
                     "fixed_features": ObservationFeatures(
                         parameters={}, trial_index=0
                     ),
                 },
             ),
         ],
     )
     dat = self.branin_experiment.lookup_data()
     node.fit(
         experiment=self.branin_experiment,
         data=dat,
     )
     self.assertEqual(node.model_enum, node.model_specs[0].model_enum)
     self.assertEqual(node.model_kwargs, node.model_specs[0].model_kwargs)
     self.assertEqual(node.model_gen_kwargs, node.model_specs[0].model_gen_kwargs)
     self.assertEqual(node.model_cv_kwargs, node.model_specs[0].model_cv_kwargs)
     self.assertEqual(node.fixed_features, node.model_specs[0].fixed_features)
     self.assertEqual(node.cv_results, node.model_specs[0].cv_results)
     self.assertEqual(node.diagnostics, node.model_specs[0].diagnostics)
Exemplo n.º 9
0
    def _roundtrip_transform(x: np.ndarray) -> np.ndarray:
        """Inner function for performing aforementioned functionality.

        Args:
            x: points in the transformed space (e.g. all transforms have been applied
                to them)

        Returns:
            points in the transformed space, but rounded via the original space.
        """
        # apply reverse terminal transform to turn array to ObservationFeatures
        observation_features = [
            ObservationFeatures(
                parameters={p: float(x[i])
                            for i, p in enumerate(param_names)})
        ]
        # reverse loop through the transforms and do untransform
        for t in reversed(transforms.values()):
            observation_features = t.untransform_observation_features(
                observation_features)
        # forward loop through the transforms and do transform
        for t in transforms.values():
            observation_features = t.transform_observation_features(
                observation_features)
        # parameters are guaranteed to be float compatible here, but pyre doesn't know
        new_x: List[float] = [
            # pyre-fixme[6]: Expected `Union[_SupportsIndex, bytearray, bytes, str,
            #  typing.SupportsFloat]` for 1st param but got `Union[None, bool, float,
            #  int, str]`.
            float(observation_features[0].parameters[p]) for p in param_names
        ]
        # turn it back into an array
        return np.array(new_x)
Exemplo n.º 10
0
def parse_observation_features(
    X: np.ndarray,
    param_names: List[str],
    candidate_metadata: Optional[List[TCandidateMetadata]] = None,
) -> List[ObservationFeatures]:
    """Re-format raw model-generated candidates into ObservationFeatures.

    Args:
        param_names: List of param names.
        X: Raw np.ndarray of candidate values.
        candidate_metadata: Model's metadata for candidates it produced.

    Returns:
        List of candidates, represented as ObservationFeatures.
    """
    if candidate_metadata and len(candidate_metadata) != len(X):
        raise ValueError(  # pragma: no cover
            "Observations metadata list provided is not of "
            "the same size as the number of candidates.")
    observation_features = []
    for i, x in enumerate(X):
        observation_features.append(
            ObservationFeatures(
                parameters=dict(zip(param_names, x)),
                metadata=candidate_metadata[i] if candidate_metadata else None,
            ))
    return observation_features
Exemplo n.º 11
0
 def test_relativize_transform_observation_data(self):
     obs_data = [
         ObservationData(
             metric_names=["foobar", "foobaz"],
             means=np.array([2, 5]),
             covariance=np.array([[0.1, 0.0], [0.0, 0.2]]),
         ),
         ObservationData(
             metric_names=["foobar", "foobaz"],
             means=np.array([1.0, 10.0]),
             covariance=np.array([[0.3, 0.0], [0.0, 0.4]]),
         ),
     ]
     obs_features = [
         ObservationFeatures(parameters={"x": 1}, trial_index=0),
         ObservationFeatures(parameters={"x": 2}, trial_index=0),
     ]
     modelbridge = Mock(
         status_quo=Mock(
             data=obs_data[0],
             features=obs_features[0],
         )
     )
     results = Relativize(
         search_space=None,
         observation_features=obs_features,
         observation_data=obs_data,
         modelbridge=modelbridge,
     ).transform_observation_data(obs_data, obs_features)
     self.assertEqual(results[0].metric_names, ["foobar", "foobaz"])
     # status quo means must always be zero
     self.assertTrue(
         np.allclose(results[0].means, np.array([0.0, 0.0])), results[0].means
     )
     # status quo covariances must always be zero
     self.assertTrue(
         np.allclose(results[0].covariance, np.array([[0.0, 0.0], [0.0, 0.0]])),
         results[0].covariance,
     )
     self.assertEqual(results[1].metric_names, ["foobar", "foobaz"])
     self.assertTrue(
         np.allclose(results[1].means, np.array([-51.25, 98.4])), results[1].means
     )
     self.assertTrue(
         np.allclose(results[1].covariance, np.array([[812.5, 0.0], [0.0, 480.0]])),
         results[1].covariance,
     )
Exemplo n.º 12
0
 def testClampObservationFeaturesNearBounds(self):
     cases = [
         (
             ObservationFeatures(
                 parameters={"w": 1.0, "x": 2, "y": "foo", "z": True}
             ),
             ObservationFeatures(
                 parameters={"w": 1.0, "x": 2, "y": "foo", "z": True}
             ),
         ),
         (
             ObservationFeatures(
                 parameters={"w": 0.0, "x": 2, "y": "foo", "z": True}
             ),
             ObservationFeatures(
                 parameters={"w": 0.5, "x": 2, "y": "foo", "z": True}
             ),
         ),
         (
             ObservationFeatures(
                 parameters={"w": 100.0, "x": 2, "y": "foo", "z": True}
             ),
             ObservationFeatures(
                 parameters={"w": 5.5, "x": 2, "y": "foo", "z": True}
             ),
         ),
         (
             ObservationFeatures(
                 parameters={"w": 1.0, "x": 0, "y": "foo", "z": True}
             ),
             ObservationFeatures(
                 parameters={"w": 1.0, "x": 1, "y": "foo", "z": True}
             ),
         ),
         (
             ObservationFeatures(
                 parameters={"w": 1.0, "x": 11, "y": "foo", "z": True}
             ),
             ObservationFeatures(
                 parameters={"w": 1.0, "x": 10, "y": "foo", "z": True}
             ),
         ),
     ]
     search_space = get_experiment().search_space
     for obs_ft, expected_obs_ft in cases:
         actual_obs_ft = clamp_observation_features([obs_ft], search_space)
         self.assertEqual(actual_obs_ft[0], expected_obs_ft)
Exemplo n.º 13
0
    def test_ST_MTGP_NEHVI(self):
        """Tests single type MTGP NEHVI instantiation."""
        multi_obj_exp = get_branin_experiment_with_multi_objective(
            with_batch=True,
            with_status_quo=True,
        )
        metrics = multi_obj_exp.optimization_config.objective.metrics
        multi_objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ),
            ObjectiveThreshold(metric=metrics[1],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ),
        ]
        sobol = Models.SOBOL(search_space=multi_obj_exp.search_space)
        self.assertIsInstance(sobol, RandomModelBridge)
        for _ in range(2):
            sobol_run = sobol.gen(n=1)
            t = multi_obj_exp.new_batch_trial().add_generator_run(sobol_run)
            t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5)
            t.run().mark_completed()
        status_quo_features = ObservationFeatures(
            parameters=multi_obj_exp.trials[0].status_quo.parameters,
            trial_index=0,
        )
        mtgp = Models.ST_MTGP_NEHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            status_quo_features=status_quo_features,
            objective_thresholds=multi_objective_thresholds,
        )
        self.assertIsInstance(mtgp, TorchModelBridge)
        self.assertIsInstance(mtgp.model, MultiObjectiveBotorchModel)

        # test it can generate
        mtgp_run = mtgp.gen(n=1,
                            fixed_features=ObservationFeatures(parameters={},
                                                               trial_index=1))
        self.assertEqual(len(mtgp_run.arms), 1)

        # test a generated trial can be completed
        t = multi_obj_exp.new_batch_trial().add_generator_run(mtgp_run)
        t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5)
        t.run().mark_completed()
Exemplo n.º 14
0
    def testTransformOptimizationConfig(self):
        m1 = Metric(name="m1")
        m2 = Metric(name="m2")
        m3 = Metric(name="m3")
        objective = Objective(metric=m3, minimize=False)
        cons = [
            OutcomeConstraint(
                metric=m1, op=ComparisonOp.GEQ, bound=2.0, relative=False
            ),
            OutcomeConstraint(
                metric=m2, op=ComparisonOp.LEQ, bound=3.5, relative=False
            ),
        ]
        oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
        fixed_features = ObservationFeatures({"z": "a"})
        oc = self.t.transform_optimization_config(oc, None, fixed_features)
        cons_t = [
            OutcomeConstraint(
                metric=m1, op=ComparisonOp.GEQ, bound=1.0, relative=False
            ),
            OutcomeConstraint(
                metric=m2,
                op=ComparisonOp.LEQ,
                bound=(3.5 - 5.0) / (sqrt(2) * 3),
                relative=False,
            ),
        ]
        self.assertTrue(oc.outcome_constraints == cons_t)
        self.assertTrue(oc.objective == objective)

        # No constraints
        oc2 = OptimizationConfig(objective=objective)
        oc3 = deepcopy(oc2)
        oc3 = self.t.transform_optimization_config(oc3, None, fixed_features)
        self.assertTrue(oc2 == oc3)

        # Check fail with relative
        con = OutcomeConstraint(
            metric=m1, op=ComparisonOp.GEQ, bound=2.0, relative=True
        )
        oc = OptimizationConfig(objective=objective, outcome_constraints=[con])
        with self.assertRaises(ValueError):
            oc = self.t.transform_optimization_config(oc, None, fixed_features)
        # Fail without strat param fixed
        fixed_features = ObservationFeatures({"x": 2.0})
        with self.assertRaises(ValueError):
            oc = self.t.transform_optimization_config(oc, None, fixed_features)
Exemplo n.º 15
0
    def testSetStatusQuo(self, mock_fit, mock_observations_from_data):
        modelbridge = ModelBridge(search_space_for_value(),
                                  0, [],
                                  get_experiment(),
                                  0,
                                  status_quo_name="1_1")
        self.assertEqual(modelbridge.status_quo, observation1())

        # Alternatively, we can specify by features
        modelbridge = ModelBridge(
            search_space_for_value(),
            0,
            [],
            get_experiment(),
            0,
            status_quo_features=observation1().features,
        )
        self.assertEqual(modelbridge.status_quo, observation1())

        # Alternatively, we can specify on experiment
        # Put a dummy arm with SQ name 1_1 on the dummy experiment.
        exp = get_experiment()
        sq = Arm(name="1_1", parameters={"x": 3.0})
        exp._status_quo = sq
        # Check that we set SQ to arm 1_1
        modelbridge = ModelBridge(search_space_for_value(), 0, [], exp, 0)
        self.assertEqual(modelbridge.status_quo, observation1())

        # Errors if features and name both specified
        with self.assertRaises(ValueError):
            modelbridge = ModelBridge(
                search_space_for_value(),
                0,
                [],
                exp,
                0,
                status_quo_features=observation1().features,
                status_quo_name="1_1",
            )

        # Left as None if features or name don't exist
        modelbridge = ModelBridge(search_space_for_value(),
                                  0, [],
                                  exp,
                                  0,
                                  status_quo_name="1_0")
        self.assertIsNone(modelbridge.status_quo)
        modelbridge = ModelBridge(
            search_space_for_value(),
            0,
            [],
            get_experiment(),
            0,
            status_quo_features=ObservationFeatures(parameters={
                "x": 3.0,
                "y": 10.0
            }),
        )
        self.assertIsNone(modelbridge.status_quo)
Exemplo n.º 16
0
    def _gen(
        self,
        n: int,
        search_space: SearchSpace,
        pending_observations: Dict[str, List[ObservationFeatures]],
        fixed_features: ObservationFeatures,
        model_gen_options: Optional[TConfig] = None,
        optimization_config: Optional[OptimizationConfig] = None,
    ) -> Tuple[List[ObservationFeatures], List[float],
               Optional[ObservationFeatures], TGenMetadata, ]:
        """Generate new candidates according to search_space and
        optimization_config.

        The outcome constraints should be transformed to no longer be relative.
        """
        array_model_gen_args = self._get_transformed_model_gen_args(
            search_space=search_space,
            pending_observations=pending_observations,
            fixed_features=fixed_features,
            model_gen_options=model_gen_options,
            optimization_config=optimization_config,
        )

        # Generate the candidates
        search_space_digest = array_model_gen_args.search_space_digest
        # TODO: pass array_model_gen_args to _model_gen
        X, w, gen_metadata, candidate_metadata = self._model_gen(
            n=n,
            bounds=search_space_digest.bounds,
            objective_weights=array_model_gen_args.objective_weights,
            outcome_constraints=array_model_gen_args.outcome_constraints,
            linear_constraints=array_model_gen_args.linear_constraints,
            fixed_features=array_model_gen_args.fixed_features,
            pending_observations=array_model_gen_args.pending_observations,
            model_gen_options=model_gen_options,
            rounding_func=array_model_gen_args.rounding_func,
            target_fidelities=search_space_digest.target_fidelities,
            **array_model_gen_args.extra_model_gen_kwargs,
        )
        # Transform array to observations
        observation_features = parse_observation_features(
            X=X,
            param_names=self.parameters,
            candidate_metadata=candidate_metadata)
        xbest = self._model_best_point(
            bounds=search_space_digest.bounds,
            objective_weights=array_model_gen_args.objective_weights,
            outcome_constraints=array_model_gen_args.outcome_constraints,
            linear_constraints=array_model_gen_args.linear_constraints,
            fixed_features=array_model_gen_args.fixed_features,
            model_gen_options=model_gen_options,
            target_fidelities=search_space_digest.target_fidelities,
        )
        best_obsf = (None if xbest is None else ObservationFeatures(
            parameters={
                p: float(xbest[i])
                for i, p in enumerate(self.parameters)
            }))
        return observation_features, w.tolist(), best_obsf, gen_metadata
Exemplo n.º 17
0
    def testGenArms(self):
        p1 = {"x": 0, "y": 1}
        p2 = {"x": 4, "y": 8}
        observation_features = [
            ObservationFeatures(parameters=p1),
            ObservationFeatures(parameters=p2),
        ]
        arms = gen_arms(observation_features=observation_features)
        self.assertEqual(arms[0].parameters, p1)

        arm = Arm(name="1_1", parameters=p1)
        arms_by_signature = {arm.signature: arm}
        arms = gen_arms(
            observation_features=observation_features,
            arms_by_signature=arms_by_signature,
        )
        self.assertEqual(arms[0].name, "1_1")
Exemplo n.º 18
0
 def setUp(self):
     self.search_space = SearchSpace(
         parameters=[
             RangeParameter("a",
                            lower=1.0,
                            upper=5.0,
                            parameter_type=ParameterType.FLOAT),
             RangeParameter(
                 "b",
                 lower=1.0,
                 upper=5.0,
                 digits=2,
                 parameter_type=ParameterType.FLOAT,
             ),
             ChoiceParameter("c",
                             parameter_type=ParameterType.STRING,
                             values=["a", "b", "c"]),
             FixedParameter(name="d",
                            parameter_type=ParameterType.INT,
                            value=2),
         ],
         parameter_constraints=[],
     )
     self.t = Cast(search_space=self.search_space)
     self.hss = get_hierarchical_search_space()
     self.t_hss = Cast(search_space=self.hss)
     self.obs_feats_hss = ObservationFeatures(
         parameters={
             "model": "Linear",
             "learning_rate": 0.01,
             "l2_reg_weight": 0.0001,
             "num_boost_rounds": 12,
         },
         trial_index=9,
         metadata=None,
     )
     self.obs_feats_hss_2 = ObservationFeatures(
         parameters={
             "model": "XGBoost",
             "learning_rate": 0.01,
             "l2_reg_weight": 0.0001,
             "num_boost_rounds": 12,
         },
         trial_index=10,
         metadata=None,
     )
Exemplo n.º 19
0
    def setUp(self):
        x = RangeParameter("x", ParameterType.FLOAT, lower=0, upper=1)
        y = RangeParameter(
            "y", ParameterType.FLOAT, lower=1, upper=2, is_fidelity=True, target_value=2
        )
        z = RangeParameter("z", ParameterType.FLOAT, lower=0, upper=5)
        self.parameters = [x, y, z]
        parameter_constraints = [
            OrderConstraint(x, y),
            SumConstraint([x, z], False, 3.5),
        ]

        self.search_space = SearchSpace(self.parameters, parameter_constraints)

        self.observation_features = [
            ObservationFeatures(parameters={"x": 0.2, "y": 1.2, "z": 3}),
            ObservationFeatures(parameters={"x": 0.4, "y": 1.4, "z": 3}),
            ObservationFeatures(parameters={"x": 0.6, "y": 1.6, "z": 3}),
        ]
        self.observation_data = [
            ObservationData(
                metric_names=["a", "b"],
                means=np.array([1.0, -1.0]),
                covariance=np.array([[1.0, 4.0], [4.0, 6.0]]),
            ),
            ObservationData(
                metric_names=["a", "b"],
                means=np.array([2.0, -2.0]),
                covariance=np.array([[2.0, 5.0], [5.0, 7.0]]),
            ),
            ObservationData(
                metric_names=["a"], means=np.array([3.0]), covariance=np.array([[3.0]])
            ),
        ]
        self.observations = [
            Observation(
                features=self.observation_features[i],
                data=self.observation_data[i],
                arm_name=str(i),
            )
            for i in range(3)
        ]
        self.pending_observations = {
            "b": [ObservationFeatures(parameters={"x": 0.6, "y": 1.6, "z": 3})]
        }
        self.model_gen_options = {"option": "yes"}
Exemplo n.º 20
0
def tensor_to_observation_features(
        x_tensor: torch.Tensor) -> List[ObservationFeatures]:
    """Convert torch Tensors to ax ObservationFeatures."""
    x_features = []
    for x in x_tensor:
        x_feature = ObservationFeatures(parameters={})
        x_feature.parameters['x0'] = x.item()
        x_features.append(x_feature)
    return x_features
Exemplo n.º 21
0
    def test_evaluate_acquisition_function(self, _, mock_torch_model):
        ma = TorchModelBridge(
            experiment=None,
            search_space=None,
            data=None,
            model=None,
            transforms=[],
            torch_dtype=torch.float64,
            torch_device=torch.device("cpu"),
        )
        # These attributes would've been set by `ArrayModelBridge` __init__, but it's
        # mocked.
        ma.model = mock_torch_model()
        t = mock.MagicMock(Transform, autospec=True)
        t.transform_observation_features.return_value = [
            ObservationFeatures(parameters={"x": 3.0, "y": 4.0})
        ]
        ma.transforms = {"ExampleTransform": t}
        ma.parameters = ["x", "y"]
        model_eval_acqf = mock_torch_model.return_value.evaluate_acquisition_function
        model_eval_acqf.return_value = torch.tensor([5.0], dtype=torch.float64)

        acqf_vals = ma.evaluate_acquisition_function(
            observation_features=[ObservationFeatures(parameters={"x": 1.0, "y": 2.0})],
            search_space_digest=SearchSpaceDigest(feature_names=[], bounds=[]),
            objective_weights=np.array([1.0]),
            objective_thresholds=None,
            outcome_constraints=None,
            linear_constraints=None,
            fixed_features=None,
            pending_observations=None,
        )

        self.assertEqual(acqf_vals, [5.0])
        t.transform_observation_features.assert_called_with(
            [ObservationFeatures(parameters={"x": 1.0, "y": 2.0})]
        )
        model_eval_acqf.assert_called_once()
        self.assertTrue(
            torch.equal(  # `call_args` is an (args, kwargs) tuple
                model_eval_acqf.call_args[1]["X"],
                torch.tensor([[3.0, 4.0]], dtype=torch.float64),
            )
        )
Exemplo n.º 22
0
def get_MTGP(
    experiment: Experiment,
    data: Data,
    search_space: Optional[SearchSpace] = None,
    trial_index: Optional[int] = None,
) -> TorchModelBridge:
    """Instantiates a Multi-task Gaussian Process (MTGP) model that generates
    points with EI.

    If the input experiment is a MultiTypeExperiment then a
    Multi-type Multi-task GP model will be instantiated.
    Otherwise, the model will be a Single-type Multi-task GP.
    """

    if isinstance(experiment, MultiTypeExperiment):
        trial_index_to_type = {
            t.index: t.trial_type for t in experiment.trials.values()
        }
        transforms = MT_MTGP_trans
        transform_configs = {
            "TrialAsTask": {"trial_level_map": {"trial_type": trial_index_to_type}},
            "ConvertMetricNames": tconfig_from_mt_experiment(experiment),
        }
    else:
        # Set transforms for a Single-type MTGP model.
        transforms = ST_MTGP_trans
        transform_configs = None

    # Choose the status quo features for the experiment from the selected trial.
    # If trial_index is None, we will look for a status quo from the last
    # experiment trial to use as a status quo for the experiment.
    if trial_index is None:
        trial_index = len(experiment.trials) - 1
    elif trial_index >= len(experiment.trials):
        raise ValueError("trial_index is bigger than the number of experiment trials")

    # pyre-fixme[16]: `ax.core.base_trial.BaseTrial` has no attribute `status_quo`.
    status_quo = experiment.trials[trial_index].status_quo
    if status_quo is None:
        status_quo_features = None
    else:
        status_quo_features = ObservationFeatures(
            parameters=status_quo.parameters, trial_index=trial_index
        )

    return TorchModelBridge(
        experiment=experiment,
        search_space=search_space or experiment.search_space,
        data=data,
        model=BotorchModel(),
        transforms=transforms,
        transform_configs=transform_configs,
        torch_dtype=torch.double,
        torch_device=DEFAULT_TORCH_DEVICE,
        status_quo_features=status_quo_features,
    )
Exemplo n.º 23
0
 def testTransformObservationFeatures(self):
     # Verify running the transform on already-casted features does nothing
     observation_features = [
         ObservationFeatures(parameters={"a": 1.2345, "b": 2.34, "c": "a", "d": 2})
     ]
     obs_ft2 = deepcopy(observation_features)
     obs_ft2 = self.t.transform_observation_features(obs_ft2)
     self.assertEqual(obs_ft2, observation_features)
     obs_ft2 = self.t.untransform_observation_features(obs_ft2)
     self.assertEqual(obs_ft2, observation_features)
Exemplo n.º 24
0
def observation2trans() -> Observation:
    return Observation(
        features=ObservationFeatures(parameters={"x": 16.0, "y": 2.0}, trial_index=1),
        data=ObservationData(
            means=np.array([9.0, 4.0]),
            covariance=np.array([[2.0, 3.0], [4.0, 5.0]]),
            metric_names=["a", "b"],
        ),
        arm_name="1_1",
    )
Exemplo n.º 25
0
 def setUp(self):
     super().setUp()
     search_space = get_search_space()
     gr = Models.SOBOL(search_space=search_space).gen(n=1)
     self.model = Mock(
         search_space=search_space,
         status_quo=Mock(
             features=ObservationFeatures(parameters=gr.arms[0].parameters)
         ),
     )
Exemplo n.º 26
0
 def testTransformObservations(self):
     std_m2_a = sqrt(2) * 3
     obsd1_ta = ObservationData(
         metric_names=["m1", "m2", "m2"],
         means=np.array([0.0, -3.0 / std_m2_a, 3.0 / std_m2_a]),
         covariance=np.array(
             [
                 [1.0, 0.2 / std_m2_a, 0.4 / std_m2_a],
                 [0.2 / std_m2_a, 2.0 / 18, 0.8 / 18],
                 [0.4 / std_m2_a, 0.8 / 18, 3.0 / 18],
             ]
         ),
     )
     std_m1_b, std_m2_b = 2 * sqrt(2), sqrt(1 / 2)
     obsd1_tb = ObservationData(
         metric_names=["m1", "m2", "m2"],
         means=np.array([-2.0 / std_m1_b, 0.5 / std_m2_b, 6.5 / std_m2_b]),
         covariance=np.array(
             [
                 [1.0 / 8, 0.2 / 2, 0.4 / 2],
                 [0.2 / 2, 2.0 * 2, 0.8 * 2],
                 [0.4 / 2, 0.8 * 2, 3.0 * 2],
             ]
         ),
     )
     obsd2 = [deepcopy(self.obsd1)]
     obsd2 = self.t.transform_observation_data(
         obsd2, [ObservationFeatures({"z": "a"})]
     )
     self.assertTrue(osd_allclose(obsd2[0], obsd1_ta))
     obsd2 = self.t.untransform_observation_data(
         obsd2, [ObservationFeatures({"z": "a"})]
     )
     self.assertTrue(osd_allclose(obsd2[0], self.obsd1))
     obsd2 = [deepcopy(self.obsd1)]
     obsd2 = self.t.transform_observation_data(
         obsd2, [ObservationFeatures({"z": "b"})]
     )
     self.assertTrue(osd_allclose(obsd2[0], obsd1_tb))
     obsd2 = self.t.untransform_observation_data(
         obsd2, [ObservationFeatures({"z": "b"})]
     )
     self.assertTrue(osd_allclose(obsd2[0], self.obsd1))
Exemplo n.º 27
0
    def test_transform_status_quos_always_zero(
        self,
        sq_mean: float,
        sq_sem: float,
        mean: float,
        sem: float,
    ):
        assume(abs(sq_mean) >= 1e-10)
        assume(abs(sq_mean) != sq_sem)

        obs_data = [
            ObservationData(
                metric_names=["foo"],
                means=np.array([sq_mean]),
                covariance=np.array([[sq_sem]]),
            ),
            ObservationData(
                metric_names=["foo"],
                means=np.array([mean]),
                covariance=np.array([[sem]]),
            ),
        ]
        obs_features = [
            ObservationFeatures(parameters={"x": 1}, trial_index=0),
            ObservationFeatures(parameters={"x": 2}, trial_index=0),
        ]
        modelbridge = Mock(
            status_quo=Mock(
                data=obs_data[0],
                features=obs_features[0],
            )
        )
        transform = Relativize(
            search_space=None,
            observation_features=obs_features,
            observation_data=obs_data,
            modelbridge=modelbridge,
        )
        relative_data = transform.transform_observation_data(obs_data, obs_features)
        self.assertEqual(relative_data[0].metric_names, ["foo"])
        self.assertEqual(relative_data[0].means[0], 0)
        self.assertEqual(relative_data[0].covariance[0][0], 0)
Exemplo n.º 28
0
    def setUp(self):
        self.parameters = [
            ChoiceParameter("x", ParameterType.FLOAT, values=[0, 1]),
            ChoiceParameter("y", ParameterType.STRING, values=["foo", "bar"]),
            FixedParameter("z", ParameterType.BOOL, value=True),
        ]
        parameter_constraints = []

        self.search_space = SearchSpace(self.parameters, parameter_constraints)

        self.observation_features = [
            ObservationFeatures(parameters={"x": 0, "y": "foo", "z": True}),
            ObservationFeatures(parameters={"x": 1, "y": "foo", "z": True}),
            ObservationFeatures(parameters={"x": 1, "y": "bar", "z": True}),
        ]
        self.observation_data = [
            ObservationData(
                metric_names=["a", "b"],
                means=np.array([1.0, -1.0]),
                covariance=np.array([[1.0, 4.0], [4.0, 6.0]]),
            ),
            ObservationData(
                metric_names=["a", "b"],
                means=np.array([2.0, -2.0]),
                covariance=np.array([[2.0, 5.0], [5.0, 7.0]]),
            ),
            ObservationData(
                metric_names=["a"], means=np.array([3.0]), covariance=np.array([[3.0]])
            ),
        ]
        self.observations = [
            Observation(
                features=self.observation_features[i],
                data=self.observation_data[i],
                arm_name=str(i),
            )
            for i in range(3)
        ]
        self.pending_observations = {
            "b": [ObservationFeatures(parameters={"x": 0, "y": "foo", "z": True})]
        }
        self.model_gen_options = {"option": "yes"}
Exemplo n.º 29
0
    def setUp(self):
        self.search_space = SearchSpace(parameters=[
            RangeParameter(
                "x", lower=1, upper=4, parameter_type=ParameterType.FLOAT)
        ])
        self.training_feats = [
            ObservationFeatures({"x": 1}, trial_index=0),
            ObservationFeatures({"x": 2}, trial_index=0),
            ObservationFeatures({"x": 3}, trial_index=1),
            ObservationFeatures({"x": 4}, trial_index=2),
        ]
        self.t = TrialAsTask(
            search_space=self.search_space,
            observation_features=self.training_feats,
            observation_data=None,
        )
        self.bm = {
            "bp1": {
                0: "v1",
                1: "v2",
                2: "v3"
            },
            "bp2": {
                0: "u1",
                1: "u1",
                2: "u2"
            },
        }

        self.t2 = TrialAsTask(
            search_space=self.search_space,
            observation_features=self.training_feats,
            observation_data=None,
            config={"trial_level_map": self.bm},
        )
        self.t3 = TrialAsTask(
            search_space=self.search_space,
            observation_features=self.training_feats,
            observation_data=None,
            config={"trial_level_map": {}},
        )
Exemplo n.º 30
0
def get_observation() -> Observation:
    return Observation(
        features=ObservationFeatures(
            parameters={"x": 2.0, "y": 10.0}, trial_index=np.int64(0)
        ),
        data=ObservationData(
            means=np.array([2.0, 4.0]),
            covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
            metric_names=["a", "b"],
        ),
        arm_name="1_1",
    )