Esempio n. 1
0
 def testObservation(self):
     obs = Observation(
         features=ObservationFeatures(parameters={"x": 20}),
         data=ObservationData(means=np.array([1]),
                              covariance=np.array([[2]]),
                              metric_names=["a"]),
         arm_name="0_0",
     )
     self.assertEqual(obs.features,
                      ObservationFeatures(parameters={"x": 20}))
     self.assertEqual(
         obs.data,
         ObservationData(means=np.array([1]),
                         covariance=np.array([[2]]),
                         metric_names=["a"]),
     )
     self.assertEqual(obs.arm_name, "0_0")
     obs2 = Observation(
         features=ObservationFeatures(parameters={"x": 20}),
         data=ObservationData(means=np.array([1]),
                              covariance=np.array([[2]]),
                              metric_names=["a"]),
         arm_name="0_0",
     )
     self.assertEqual(obs, obs2)
     obs3 = Observation(
         features=ObservationFeatures(parameters={"x": 10}),
         data=ObservationData(means=np.array([1]),
                              covariance=np.array([[2]]),
                              metric_names=["a"]),
         arm_name="0_0",
     )
     self.assertNotEqual(obs, obs3)
     self.assertNotEqual(obs, 1)
Esempio n. 2
0
 def setUp(self):
     self.training_data = [
         Observation(
             features=ObservationFeatures(parameters={"x": 2.0}, trial_index=0),
             data=ObservationData(
                 means=np.array([2.0, 4.0]),
                 covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
                 metric_names=["a", "b"],
             ),
             arm_name="1_1",
         ),
         Observation(
             features=ObservationFeatures(parameters={"x": 2.0}, trial_index=1),
             data=ObservationData(
                 means=np.array([3.0, 5.0, 6.0]),
                 covariance=np.array(
                     [[1.0, 2.0, 3.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]
                 ),
                 metric_names=["a", "b", "a"],
             ),
             arm_name="1_1",
         ),
         Observation(
             features=ObservationFeatures(parameters={"x": 3.0}),
             data=ObservationData(
                 means=np.array([7.0, 8.0]),
                 covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
                 metric_names=["a", "b"],
             ),
             arm_name="1_2",
         ),
         Observation(
             features=ObservationFeatures(parameters={"x": 4.0}, trial_index=2),
             data=ObservationData(
                 means=np.array([9.0, 10.0]),
                 covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
                 metric_names=["a", "b"],
             ),
             arm_name="1_3",
         ),
     ]
     self.observation_data = [
         ObservationData(
             means=np.array([2.0, 1.0]),
             covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
             metric_names=["a", "b"],
         )
     ] * 4
     self.diagnostics: List[CVDiagnostics] = [
         {"Fisher exact test p": {"y_a": 0.0, "y_b": 0.4}},
         {"Fisher exact test p": {"y_a": 0.1, "y_b": 0.1}},
         {"Fisher exact test p": {"y_a": 0.5, "y_b": 0.6}},
     ]
Esempio n. 3
0
 def setUp(self):
     self.training_data = [
         Observation(
             features=ObservationFeatures(parameters={"x": 2.0},
                                          trial_index=0),
             data=ObservationData(
                 means=np.array([2.0, 4.0]),
                 covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
                 metric_names=["a", "b"],
             ),
             arm_name="1_1",
         ),
         Observation(
             features=ObservationFeatures(parameters={"x": 2.0},
                                          trial_index=1),
             data=ObservationData(
                 means=np.array([3.0, 5.0, 6.0]),
                 covariance=np.array([[1.0, 2.0, 3.0], [3.0, 4.0, 5.0],
                                      [6.0, 7.0, 8.0]]),
                 metric_names=["a", "b", "a"],
             ),
             arm_name="1_1",
         ),
         Observation(
             features=ObservationFeatures(parameters={"x": 3.0}),
             data=ObservationData(
                 means=np.array([7.0, 8.0]),
                 covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
                 metric_names=["a", "b"],
             ),
             arm_name="1_2",
         ),
         Observation(
             features=ObservationFeatures(parameters={"x": 4.0},
                                          trial_index=2),
             data=ObservationData(
                 means=np.array([9.0, 10.0]),
                 covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
                 metric_names=["a", "b"],
             ),
             arm_name="1_3",
         ),
     ]
     self.observation_data = [
         ObservationData(
             means=np.array([2.0, 1.0]),
             covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
             metric_names=["a", "b"],
         )
     ] * 4
Esempio n. 4
0
 def testFit(self, mock_init):
     sq_feat = ObservationFeatures({})
     sq_data = self.observation_data[0]
     sq_obs = Observation(features=sq_feat,
                          data=sq_data,
                          arm_name="status_quo")
     ma = DiscreteModelBridge()
     ma._training_data = self.observations + [sq_obs]
     model = mock.create_autospec(DiscreteModel, instance=True)
     ma._fit(
         model,
         self.search_space,
         self.observation_features + [sq_feat],
         self.observation_data + [sq_data],
     )
     self.assertEqual(ma.parameters, ["x", "y", "z"])
     self.assertEqual(sorted(ma.outcomes), ["a", "b"])
     self.assertEqual(ma.training_in_design, [True, True, True, False])
     Xs = {
         "a": [[0, "foo", True], [1, "foo", True], [1, "bar", True]],
         "b": [[0, "foo", True], [1, "foo", True]],
     }
     Ys = {"a": [[1.0], [2.0], [3.0]], "b": [[-1.0], [-2.0]]}
     Yvars = {"a": [[1.0], [2.0], [3.0]], "b": [[6.0], [7.0]]}
     parameter_values = [[0.0, 1.0], ["foo", "bar"], [True]]
     model_fit_args = model.fit.mock_calls[0][2]
     for i, x in enumerate(model_fit_args["Xs"]):
         self.assertEqual(x, Xs[ma.outcomes[i]])
     for i, y in enumerate(model_fit_args["Ys"]):
         self.assertEqual(y, Ys[ma.outcomes[i]])
     for i, v in enumerate(model_fit_args["Yvars"]):
         self.assertEqual(v, Yvars[ma.outcomes[i]])
     self.assertEqual(model_fit_args["parameter_values"], parameter_values)
Esempio n. 5
0
    def setUp(self):
        x = RangeParameter("x", ParameterType.FLOAT, lower=0, upper=1)
        y = RangeParameter("y", ParameterType.FLOAT, lower=1, upper=2)
        z = RangeParameter("z", ParameterType.FLOAT, lower=0, upper=5)
        self.parameters = [x, y, z]
        parameter_constraints = [
            OrderConstraint(x, y),
            SumConstraint([x, z], False, 3.5),
        ]

        self.search_space = SearchSpace(self.parameters, parameter_constraints)

        self.observation_features = [
            ObservationFeatures(parameters={
                "x": 0.2,
                "y": 1.2,
                "z": 3
            }),
            ObservationFeatures(parameters={
                "x": 0.4,
                "y": 1.4,
                "z": 3
            }),
            ObservationFeatures(parameters={
                "x": 0.6,
                "y": 1.6,
                "z": 3
            }),
        ]
        self.observation_data = [
            ObservationData(
                metric_names=["a", "b"],
                means=np.array([1.0, -1.0]),
                covariance=np.array([[1.0, 4.0], [4.0, 6.0]]),
            ),
            ObservationData(
                metric_names=["a", "b"],
                means=np.array([2.0, -2.0]),
                covariance=np.array([[2.0, 5.0], [5.0, 7.0]]),
            ),
            ObservationData(metric_names=["a"],
                            means=np.array([3.0]),
                            covariance=np.array([[3.0]])),
        ]
        self.observations = [
            Observation(
                features=self.observation_features[i],
                data=self.observation_data[i],
                arm_name=str(i),
            ) for i in range(3)
        ]
        self.pending_observations = {
            "b":
            [ObservationFeatures(parameters={
                "x": 0.6,
                "y": 1.6,
                "z": 3
            })]
        }
        self.model_gen_options = {"option": "yes"}
Esempio n. 6
0
 def testSeparateObservations(self):
     obs = Observation(
         features=ObservationFeatures(parameters={"x": 20}),
         data=ObservationData(means=np.array([1]),
                              covariance=np.array([[2]]),
                              metric_names=["a"]),
         arm_name="0_0",
     )
     obs_feats, obs_data = separate_observations(observations=[obs])
     self.assertEqual(obs.features,
                      ObservationFeatures(parameters={"x": 20}))
     self.assertEqual(
         obs.data,
         ObservationData(means=np.array([1]),
                         covariance=np.array([[2]]),
                         metric_names=["a"]),
     )
     obs_feats, obs_data = separate_observations(observations=[obs],
                                                 copy=True)
     self.assertEqual(obs.features,
                      ObservationFeatures(parameters={"x": 20}))
     self.assertEqual(
         obs.data,
         ObservationData(means=np.array([1]),
                         covariance=np.array([[2]]),
                         metric_names=["a"]),
     )
    def testFitAndUpdate(self, mock_init):
        sq_feat = ObservationFeatures({})
        sq_data = self.observation_data[2]
        sq_obs = Observation(
            features=ObservationFeatures({}),
            data=self.observation_data[2],
            arm_name="status_quo",
        )
        ma = NumpyModelBridge()
        ma._training_data = self.observations + [sq_obs]
        model = mock.create_autospec(NumpyModel, instance=True)
        # No out of design points allowed in direct calls to fit.
        with self.assertRaises(ValueError):
            ma._fit(
                model,
                self.search_space,
                self.observation_features + [sq_feat],
                self.observation_data + [sq_data],
            )
        ma._fit(model, self.search_space, self.observation_features,
                self.observation_data)
        self.assertEqual(ma.parameters, ["x", "z", "y"])
        self.assertEqual(sorted(ma.outcomes), ["a", "b"])
        Xs = {
            "a": np.array([[0.2, 3.0, 1.2], [0.4, 3.0, 1.4], [0.6, 3.0, 1.6]]),
            "b": np.array([[0.2, 3.0, 1.2], [0.4, 3.0, 1.4]]),
        }
        Ys = {
            "a": np.array([[1.0], [2.0], [3.0]]),
            "b": np.array([[-1.0], [-2.0]])
        }
        Yvars = {
            "a": np.array([[1.0], [2.0], [3.0]]),
            "b": np.array([[6.0], [7.0]])
        }
        # put fidelity parameter to the last column
        bounds = [(0.0, 1.0), (0.0, 5.0), (1.0, 2.0)]
        model_fit_args = model.fit.mock_calls[0][2]
        for i, x in enumerate(model_fit_args["Xs"]):
            self.assertTrue(np.array_equal(x, Xs[ma.outcomes[i]]))
        for i, y in enumerate(model_fit_args["Ys"]):
            self.assertTrue(np.array_equal(y, Ys[ma.outcomes[i]]))
        for i, v in enumerate(model_fit_args["Yvars"]):
            self.assertTrue(np.array_equal(v, Yvars[ma.outcomes[i]]))
        self.assertEqual(model_fit_args["bounds"], bounds)
        self.assertEqual(model_fit_args["feature_names"], ["x", "z", "y"])

        # And update
        ma._update(
            observation_features=self.observation_features,
            observation_data=self.observation_data,
        )
        # Calling _update requires passing ALL data.
        model_update_args = model.update.mock_calls[0][2]
        for i, x in enumerate(model_update_args["Xs"]):
            self.assertTrue(np.array_equal(x, Xs[ma.outcomes[i]]))
        for i, y in enumerate(model_update_args["Ys"]):
            self.assertTrue(np.array_equal(y, Ys[ma.outcomes[i]]))
        for i, v in enumerate(model_update_args["Yvars"]):
            self.assertTrue(np.array_equal(v, Yvars[ma.outcomes[i]]))
    def setUp(self):
        self.parameters = [
            ChoiceParameter("x", ParameterType.FLOAT, values=[0, 1]),
            ChoiceParameter("y", ParameterType.STRING, values=["foo", "bar"]),
            FixedParameter("z", ParameterType.BOOL, value=True),
        ]
        parameter_constraints = []

        self.search_space = SearchSpace(self.parameters, parameter_constraints)

        self.observation_features = [
            ObservationFeatures(parameters={
                "x": 0,
                "y": "foo",
                "z": True
            }),
            ObservationFeatures(parameters={
                "x": 1,
                "y": "foo",
                "z": True
            }),
            ObservationFeatures(parameters={
                "x": 1,
                "y": "bar",
                "z": True
            }),
        ]
        self.observation_data = [
            ObservationData(
                metric_names=["a", "b"],
                means=np.array([1.0, -1.0]),
                covariance=np.array([[1.0, 4.0], [4.0, 6.0]]),
            ),
            ObservationData(
                metric_names=["a", "b"],
                means=np.array([2.0, -2.0]),
                covariance=np.array([[2.0, 5.0], [5.0, 7.0]]),
            ),
            ObservationData(metric_names=["a"],
                            means=np.array([3.0]),
                            covariance=np.array([[3.0]])),
        ]
        self.observations = [
            Observation(
                features=self.observation_features[i],
                data=self.observation_data[i],
                arm_name=str(i),
            ) for i in range(3)
        ]
        self.pending_observations = {
            "b":
            [ObservationFeatures(parameters={
                "x": 0,
                "y": "foo",
                "z": True
            })]
        }
        self.model_gen_options = {"option": "yes"}
Esempio n. 9
0
    def testFitAndUpdate(self, mock_init):
        sq_feat = ObservationFeatures({})
        sq_data = self.observation_data[2]
        sq_obs = Observation(
            features=ObservationFeatures({}),
            data=self.observation_data[2],
            arm_name="status_quo",
        )
        ma = NumpyModelBridge()
        ma._training_data = self.observations + [sq_obs]
        model = mock.create_autospec(NumpyModel, instance=True)
        ma._fit(
            model,
            self.search_space,
            self.observation_features + [sq_feat],
            self.observation_data + [sq_data],
        )
        self.assertEqual(ma.parameters, ["x", "y", "z"])
        self.assertEqual(sorted(ma.outcomes), ["a", "b"])
        self.assertEqual(ma.training_in_design, [True, True, True, False])
        Xs = {
            "a": np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0], [0.6, 1.6, 3]]),
            "b": np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0]]),
        }
        Ys = {
            "a": np.array([[1.0], [2.0], [3.0]]),
            "b": np.array([[-1.0], [-2.0]])
        }
        Yvars = {
            "a": np.array([[1.0], [2.0], [3.0]]),
            "b": np.array([[6.0], [7.0]])
        }
        bounds = [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)]
        model_fit_args = model.fit.mock_calls[0][2]
        for i, x in enumerate(model_fit_args["Xs"]):
            self.assertTrue(np.array_equal(x, Xs[ma.outcomes[i]]))
        for i, y in enumerate(model_fit_args["Ys"]):
            self.assertTrue(np.array_equal(y, Ys[ma.outcomes[i]]))
        for i, v in enumerate(model_fit_args["Yvars"]):
            self.assertTrue(np.array_equal(v, Yvars[ma.outcomes[i]]))
        self.assertEqual(model_fit_args["bounds"], bounds)
        self.assertEqual(model_fit_args["feature_names"], ["x", "y", "z"])

        # And update
        ma.training_in_design.extend([True, True, True, True])
        ma._update(
            observation_features=self.observation_features + [sq_feat],
            observation_data=self.observation_data + [sq_data],
        )
        self.assertEqual(ma.training_in_design, [True, True, True, False] * 2)
        model_update_args = model.update.mock_calls[0][2]
        for i, x in enumerate(model_update_args["Xs"]):
            self.assertTrue(np.array_equal(x, Xs[ma.outcomes[i]]))
        for i, y in enumerate(model_update_args["Ys"]):
            self.assertTrue(np.array_equal(y, Ys[ma.outcomes[i]]))
        for i, v in enumerate(model_update_args["Yvars"]):
            self.assertTrue(np.array_equal(v, Yvars[ma.outcomes[i]]))
Esempio n. 10
0
def observation2trans() -> Observation:
    return Observation(
        features=ObservationFeatures(parameters={"x": 16.0, "y": 2.0}, trial_index=1),
        data=ObservationData(
            means=np.array([9.0, 4.0]),
            covariance=np.array([[2.0, 3.0], [4.0, 5.0]]),
            metric_names=["a", "b"],
        ),
        arm_name="1_1",
    )
Esempio n. 11
0
def get_observation() -> Observation:
    return Observation(
        features=ObservationFeatures(
            parameters={"x": 2.0, "y": 10.0}, trial_index=np.int64(0)
        ),
        data=ObservationData(
            means=np.array([2.0, 4.0]),
            covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
            metric_names=["a", "b"],
        ),
        arm_name="1_1",
    )
Esempio n. 12
0
def get_observation_status_quo1() -> Observation:
    return Observation(
        features=ObservationFeatures(
            parameters={"w": 0.85, "x": 1, "y": "baz", "z": False},
            trial_index=np.int64(1),
        ),
        data=ObservationData(
            means=np.array([2.0, 4.0]),
            covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
            metric_names=["a", "b"],
        ),
        arm_name="0_0",
    )
Esempio n. 13
0
def get_observation2trans(first_metric_name: str = "a",
                          second_metric_name="b") -> Observation:
    return Observation(
        features=ObservationFeatures(parameters={
            "x": 16.0,
            "y": 2.0
        },
                                     trial_index=np.int64(1)),
        data=ObservationData(
            means=np.array([9.0, 4.0]),
            covariance=np.array([[2.0, 3.0], [4.0, 5.0]]),
            metric_names=[first_metric_name, second_metric_name],
        ),
        arm_name="1_1",
    )
Esempio n. 14
0
def convert_mt_observations(
        observations: List[Observation],
        experiment: MultiTypeExperiment) -> List[Observation]:
    """Apply ConvertMetricNames transform to observations for a MT experiment."""
    observation_data = [o.data for o in observations]
    observation_features = [o.features for o in observations]
    transform = ConvertMetricNames(
        search_space=None,
        observation_data=observation_data,
        observation_features=observation_features,
        config=tconfig_from_mt_experiment(experiment),
    )
    transformed_observations = transform.transform_observation_data(
        observation_data=observation_data,
        observation_features=observation_features)
    return [
        Observation(
            features=obs.features,
            data=transformed_observations[i],
            arm_name=obs.arm_name,
        ) for i, obs in enumerate(observations)
    ]
Esempio n. 15
0
def get_pareto_frontier_and_configs(
    modelbridge: modelbridge_module.array.ArrayModelBridge,
    observation_features: List[ObservationFeatures],
    observation_data: Optional[List[ObservationData]] = None,
    objective_thresholds: Optional[TRefPoint] = None,
    optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
    arm_names: Optional[List[Optional[str]]] = None,
    use_model_predictions: bool = True,
    transform_outcomes_and_configs: bool = True,
) -> Tuple[List[Observation], Tensor, Tensor, Optional[Tensor]]:
    """Helper that applies transforms and calls ``frontier_evaluator``.

    Returns the ``frontier_evaluator`` configs in addition to the Pareto
    observations.

    Args:
        modelbridge: ``Modelbridge`` used to predict metrics outcomes.
        observation_features: Observation features to consider for the Pareto
            frontier.
        observation_data: Data for computing the Pareto front, unless
            ``observation_features`` are provided and ``model_predictions is True``.
        objective_thresholds: Metric values bounding the region of interest in
            the objective outcome space; used to override objective thresholds
            specified in ``optimization_config``, if necessary.
        optimization_config: Multi-objective optimization config.
        arm_names: Arm names for each observation in ``observation_features``.
        use_model_predictions: If ``True``, will use model predictions at
            ``observation_features`` to compute Pareto front. If ``False``,
            will use ``observation_data`` directly to compute Pareto front, ignoring
            ``observation_features``.
        transform_outcomes_and_configs: If ``True``, will transform the optimization
            config, observation features and observation data, before calling
            ``frontier_evaluator``, then will untransform all of the above before
            returning the observations.

    Returns: Four-item tuple of:
          - frontier_observations: Observations of points on the pareto frontier,
          - f: n x m tensor representation of the Pareto frontier values where n is the
            length of frontier_observations and m is the number of metrics,
          - obj_w: m tensor of objective weights,
          - obj_t: m tensor of objective thresholds corresponding to Y, or None if no
            objective thresholds used.
    """

    array_to_tensor = partial(_array_to_tensor, modelbridge=modelbridge)
    X, Y, Yvar = None, None, None
    if use_model_predictions:
        X = array_to_tensor(
            modelbridge.transform_observation_features(observation_features))
    if observation_data is not None:
        if transform_outcomes_and_configs:
            Y, Yvar = modelbridge.transform_observation_data(observation_data)
        else:
            Y, Yvar = observation_data_to_array(
                outcomes=modelbridge.outcomes,
                observation_data=observation_data)
        Y, Yvar = (array_to_tensor(Y), array_to_tensor(Yvar))
    if arm_names is None:
        arm_names = [None] * len(observation_features)

    # Extract optimization config: make sure that the problem is a MOO
    # problem and clone the optimization config with specified
    # `objective_thresholds` if those are provided. If `optimization_config`
    # is not specified, uses the one stored on `modelbridge`.
    optimization_config = _get_multiobjective_optimization_config(
        modelbridge=modelbridge,
        optimization_config=optimization_config,
        objective_thresholds=objective_thresholds,
    )

    # Transform optimization config.
    fixed_features = ObservationFeatures(parameters={})
    if transform_outcomes_and_configs:
        optimization_config = modelbridge.transform_optimization_config(
            optimization_config=optimization_config,
            fixed_features=fixed_features,
        )
    else:
        # de-relativize outcome constraints and objective thresholds
        obs_feats, obs_data, _ = _get_modelbridge_training_data(
            modelbridge=modelbridge)
        tf = Derelativize(
            search_space=modelbridge.model_space.clone(),
            observation_data=obs_data,
            observation_features=obs_feats,
            config={"use_raw_status_quo": True},
        )
        # pyre-ignore [9]
        optimization_config = tf.transform_optimization_config(
            optimization_config=optimization_config.clone(),
            modelbridge=modelbridge,
            fixed_features=fixed_features,
        )
    # Extract weights, constraints, and objective_thresholds
    objective_weights = extract_objective_weights(
        objective=optimization_config.objective, outcomes=modelbridge.outcomes)
    outcome_constraints = extract_outcome_constraints(
        outcome_constraints=optimization_config.outcome_constraints,
        outcomes=modelbridge.outcomes,
    )
    obj_t = extract_objective_thresholds(
        objective_thresholds=optimization_config.objective_thresholds,
        objective=optimization_config.objective,
        outcomes=modelbridge.outcomes,
    )
    obj_t = array_to_tensor(obj_t)
    # Transform to tensors.
    obj_w, oc_c, _, _, _ = validate_and_apply_final_transform(
        objective_weights=objective_weights,
        outcome_constraints=outcome_constraints,
        linear_constraints=None,
        pending_observations=None,
        final_transform=array_to_tensor,
    )
    frontier_evaluator = get_default_frontier_evaluator()
    # pyre-ignore[28]: Unexpected keyword `modelbridge` to anonymous call
    f, cov, indx = frontier_evaluator(
        model=modelbridge.model,
        X=X,
        Y=Y,
        Yvar=Yvar,
        objective_thresholds=obj_t,
        objective_weights=obj_w,
        outcome_constraints=oc_c,
    )
    f, cov = f.detach().cpu().clone(), cov.detach().cpu().clone()
    indx = indx.tolist()
    frontier_observation_data = array_to_observation_data(
        f=f.numpy(), cov=cov.numpy(), outcomes=not_none(modelbridge.outcomes))

    if use_model_predictions:
        # Untransform observations
        for t in reversed(modelbridge.transforms.values()):  # noqa T484
            frontier_observation_data = t.untransform_observation_data(
                frontier_observation_data, [])
        # reconstruct tensor representation of untransformed predictions
        Y_arr, _ = observation_data_to_array(
            outcomes=modelbridge.outcomes,
            observation_data=frontier_observation_data)
        f = _array_to_tensor(Y_arr)
    # Construct observations
    frontier_observations = []
    for i, obsd in enumerate(frontier_observation_data):
        frontier_observations.append(
            Observation(
                features=observation_features[indx[i]],
                data=obsd,
                arm_name=arm_names[indx[i]],
            ))
    return frontier_observations, f, obj_w.cpu(), obj_t.cpu()
Esempio n. 16
0
    def test_multitask_data(self):
        experiment = get_branin_with_multi_task()
        data = experiment.fetch_data()

        observations = observations_from_data(
            experiment=experiment,
            data=data,
        )
        relative_observations = observations_from_data(
            experiment=experiment,
            data=relativize_data(
                data=data,
                status_quo_name="status_quo",
                as_percent=True,
                include_sq=True,
            ),
        )
        status_quo_row = data.df.loc[
            (data.df["arm_name"] == "status_quo") & (data.df["trial_index"] == 1)
        ]
        modelbridge = Mock(
            status_quo=Observation(
                data=ObservationData(
                    metric_names=status_quo_row["metric_name"].values,
                    means=status_quo_row["mean"].values,
                    covariance=np.array([status_quo_row["sem"].values ** 2]),
                ),
                features=ObservationFeatures(
                    parameters=experiment.status_quo.parameters
                ),
            )
        )
        obs_features = [obs.features for obs in observations]
        obs_data = [obs.data for obs in observations]
        expected_obs_data = [obs.data for obs in relative_observations]

        transform = Relativize(
            search_space=None,
            observation_features=obs_features,
            observation_data=obs_data,
            modelbridge=modelbridge,
        )
        relative_obs_data = transform.transform_observation_data(obs_data, obs_features)
        self.maxDiff = None
        # this assertion just checks that order is the same, which
        # is only important for the purposes of this test
        self.assertEqual(
            [datum.metric_names for datum in relative_obs_data],
            [datum.metric_names for datum in expected_obs_data],
        )
        means = [
            np.array([datum.means for datum in relative_obs_data]),
            np.array([datum.means for datum in expected_obs_data]),
        ]
        # `self.assertAlmostEqual(relative_obs_data, expected_obs_data)`
        # fails 1% of the time, so we check with numpy.
        self.assertTrue(
            all(np.isclose(means[0], means[1])),
            means,
        )
        covariances = [
            np.array([datum.covariance for datum in expected_obs_data]),
            np.array([datum.covariance for datum in relative_obs_data]),
        ]
        self.assertTrue(
            all(np.isclose(covariances[0], covariances[1])),
            covariances,
        )
Esempio n. 17
0
def get_pareto_frontier_and_transformed_configs(
    modelbridge: modelbridge_module.array.ArrayModelBridge,
    observation_features: List[ObservationFeatures],
    observation_data: Optional[List[ObservationData]] = None,
    objective_thresholds: Optional[TRefPoint] = None,
    optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
    arm_names: Optional[List[Optional[str]]] = None,
    use_model_predictions: bool = True,
) -> Tuple[List[Observation], Tensor, Tensor, Optional[Tensor]]:
    """Helper that applies transforms and calls frontier_evaluator.

    Returns transformed configs in addition to the Pareto observations.

    Args:
        modelbridge: Modelbridge used to predict metrics outcomes.
        observation_features: observation features to predict, if provided and
            use_model_predictions is True.
        observation_data: data for computing the Pareto front, unless features
            are provided and model_predictions is True.
        objective_thresholds: metric values bounding the region of interest in
            the objective outcome space.
        optimization_config: Optimization config.
        arm_names: Arm names for each observation.
        use_model_predictions: If True, will use model predictions at
            observation_features to compute Pareto front, if provided. If False,
            will use observation_data directly to compute Pareto front, regardless
            of whether observation_features are provided.

    Returns:
        frontier_observations: Observations of points on the pareto frontier.
        f: n x m tensor representation of the Pareto frontier values where n is the
        length of frontier_observations and m is the number of metrics.
        obj_w: m tensor of objective weights.
        obj_t: m tensor of objective thresholds corresponding to Y, or None if no
        objective thresholds used.
    """

    array_to_tensor = partial(_array_to_tensor, modelbridge=modelbridge)
    X = (modelbridge.transform_observation_features(observation_features)
         if use_model_predictions else None)
    X = array_to_tensor(X) if X is not None else None
    Y, Yvar = (None, None)
    if observation_data is not None:
        Y, Yvar = modelbridge.transform_observation_data(observation_data)
        Y, Yvar = (array_to_tensor(Y), array_to_tensor(Yvar))
    if arm_names is None:
        arm_names = [None] * len(observation_features)

    # Optimization_config
    mooc = optimization_config or checked_cast_optional(
        MultiObjectiveOptimizationConfig, modelbridge._optimization_config)
    if not mooc:
        raise ValueError(
            ("Experiment must have an existing optimization_config "
             "of type `MultiObjectiveOptimizationConfig` "
             "or `optimization_config` must be passed as an argument."))
    if not isinstance(mooc, MultiObjectiveOptimizationConfig):
        mooc = not_none(MultiObjectiveOptimizationConfig.from_opt_conf(mooc))
    if objective_thresholds:
        mooc = mooc.clone_with_args(objective_thresholds=objective_thresholds)

    optimization_config = mooc

    # Transform OptimizationConfig.
    optimization_config = modelbridge.transform_optimization_config(
        optimization_config=optimization_config,
        fixed_features=ObservationFeatures(parameters={}),
    )
    # Extract weights, constraints, and objective_thresholds
    objective_weights = extract_objective_weights(
        objective=optimization_config.objective, outcomes=modelbridge.outcomes)
    outcome_constraints = extract_outcome_constraints(
        outcome_constraints=optimization_config.outcome_constraints,
        outcomes=modelbridge.outcomes,
    )
    obj_t = extract_objective_thresholds(
        objective_thresholds=optimization_config.objective_thresholds,
        objective=optimization_config.objective,
        outcomes=modelbridge.outcomes,
    )
    obj_t = array_to_tensor(obj_t)
    # Transform to tensors.
    obj_w, oc_c, _, _, _ = validate_and_apply_final_transform(
        objective_weights=objective_weights,
        outcome_constraints=outcome_constraints,
        linear_constraints=None,
        pending_observations=None,
        final_transform=array_to_tensor,
    )
    frontier_evaluator = get_default_frontier_evaluator()
    # pyre-ignore[28]: Unexpected keyword `modelbridge` to anonymous call
    f, cov, indx = frontier_evaluator(
        model=modelbridge.model,
        X=X,
        Y=Y,
        Yvar=Yvar,
        objective_thresholds=obj_t,
        objective_weights=obj_w,
        outcome_constraints=oc_c,
    )
    f, cov = f.detach().cpu().clone(), cov.detach().cpu().clone()
    indx = indx.tolist()
    frontier_observation_data = array_to_observation_data(
        f=f.numpy(), cov=cov.numpy(), outcomes=not_none(modelbridge.outcomes))
    # Untransform observations
    for t in reversed(modelbridge.transforms.values()):  # noqa T484
        frontier_observation_data = t.untransform_observation_data(
            frontier_observation_data, [])
    # Construct observations
    frontier_observations = []
    for i, obsd in enumerate(frontier_observation_data):
        frontier_observations.append(
            Observation(
                features=observation_features[indx[i]],
                data=obsd,
                arm_name=arm_names[indx[i]],
            ))
    return frontier_observations, f, obj_w, obj_t
Esempio n. 18
0
class DerelativizeTransformTest(TestCase):
    def setUp(self):
        m = mock.patch.object(ModelBridge, "__abstractmethods__", frozenset())
        self.addCleanup(m.stop)
        m.start()

    @mock.patch(
        "ax.modelbridge.base.observations_from_data",
        autospec=True,
        return_value=([
            Observation(
                features=ObservationFeatures(parameters={
                    "x": 2.0,
                    "y": 10.0
                }),
                data=ObservationData(
                    means=np.array([1.0, 2.0, 6.0]),
                    covariance=np.array([[1.0, 2.0, 0.0], [3.0, 4.0, 0.0],
                                         [0.0, 0.0, 4.0]]),
                    metric_names=["a", "b", "b"],
                ),
                arm_name="1_1",
            ),
            Observation(
                features=ObservationFeatures(parameters={
                    "x": None,
                    "y": None
                }),
                data=ObservationData(
                    means=np.array([1.0, 2.0, 6.0]),
                    covariance=np.array([[1.0, 2.0, 0.0], [3.0, 4.0, 0.0],
                                         [0.0, 0.0, 4.0]]),
                    metric_names=["a", "b", "b"],
                ),
                arm_name="1_2",
            ),
        ]),
    )
    @mock.patch("ax.modelbridge.base.ModelBridge._fit", autospec=True)
    @mock.patch(
        "ax.modelbridge.base.ModelBridge._predict",
        autospec=True,
        return_value=([
            ObservationData(
                means=np.array([3.0, 5.0]),
                covariance=np.array([[1.0, 0.0], [0.0, 1.0]]),
                metric_names=["a", "b"],
            )
        ]),
    )
    def testDerelativizeTransform(self, mock_predict, mock_fit,
                                  mock_observations_from_data):
        t = Derelativize(search_space=None,
                         observation_features=None,
                         observation_data=None)

        # ModelBridge with in-design status quo
        search_space = SearchSpace(parameters=[
            RangeParameter("x", ParameterType.FLOAT, 0, 20),
            RangeParameter("y", ParameterType.FLOAT, 0, 20),
        ])
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_1",
        )

        # Test with no relative constraints
        objective = Objective(Metric("c"))
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False)
            ],
        )
        oc2 = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc == oc2)

        # Test with relative constraint, in-design status quo
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        oc = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc.outcome_constraints == [
            OutcomeConstraint(
                Metric("a"), ComparisonOp.LEQ, bound=2, relative=False),
            OutcomeConstraint(
                Metric("b"), ComparisonOp.LEQ, bound=4.5, relative=False),
        ])
        obsf = mock_predict.mock_calls[0][1][1][0]
        obsf2 = ObservationFeatures(parameters={"x": 2.0, "y": 10.0})
        self.assertTrue(obsf == obsf2)

        # Test with relative constraint, out-of-design status quo
        mock_predict.side_effect = Exception()
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_2",
        )
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        oc = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc.outcome_constraints == [
            OutcomeConstraint(
                Metric("a"), ComparisonOp.LEQ, bound=2, relative=False),
            OutcomeConstraint(
                Metric("b"), ComparisonOp.LEQ, bound=3.6, relative=False),
        ])
        self.assertEqual(mock_predict.call_count, 2)

        # Raises error if predict fails with in-design status quo
        g = ModelBridge(search_space, None, [], status_quo_name="1_1")
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        with self.assertRaises(Exception):
            oc = t.transform_optimization_config(oc, g, None)

        # Raises error with relative constraint, no status quo
        exp = Experiment(search_space, "name")
        g = ModelBridge(search_space, None, [], exp)
        with self.assertRaises(ValueError):
            oc = t.transform_optimization_config(oc, g, None)

        # Raises error with relative constraint, no modelbridge
        with self.assertRaises(ValueError):
            oc = t.transform_optimization_config(oc, None, None)

    def testErrors(self):
        t = Derelativize(search_space=None,
                         observation_features=None,
                         observation_data=None)
        oc = OptimizationConfig(
            objective=Objective(Metric("c")),
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=True)
            ],
        )
        search_space = SearchSpace(
            parameters=[RangeParameter("x", ParameterType.FLOAT, 0, 20)])
        g = ModelBridge(search_space, None, [])
        with self.assertRaises(ValueError):
            t.transform_optimization_config(oc, None, None)
        with self.assertRaises(ValueError):
            t.transform_optimization_config(oc, g, None)
Esempio n. 19
0
def pareto_frontier(
    modelbridge: modelbridge_module.array.ArrayModelBridge,
    observation_features: List[ObservationFeatures],
    observation_data: Optional[List[ObservationData]] = None,
    objective_thresholds: Optional[TRefPoint] = None,
    optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
    arm_names: Optional[List[Optional[str]]] = None,
    use_model_predictions: bool = True,
) -> List[Observation]:
    """Helper that applies transforms and calls frontier_evaluator."""
    array_to_tensor = partial(_array_to_tensor, modelbridge=modelbridge)
    X = (
        modelbridge.transform_observation_features(observation_features)
        if use_model_predictions
        else None
    )
    X = array_to_tensor(X) if X is not None else None
    Y, Yvar = (None, None)
    if observation_data is not None:
        Y, Yvar = modelbridge.transform_observation_data(observation_data)
        Y, Yvar = (array_to_tensor(Y), array_to_tensor(Yvar))
    if arm_names is None:
        arm_names = [None] * len(observation_features)

    # Optimization_config
    mooc = optimization_config or checked_cast_optional(
        MultiObjectiveOptimizationConfig, modelbridge._optimization_config
    )
    if not mooc:
        raise ValueError(
            (
                "Experiment must have an existing optimization_config "
                "of type `MultiObjectiveOptimizationConfig` "
                "or `optimization_config` must be passed as an argument."
            )
        )
    if not isinstance(mooc, MultiObjectiveOptimizationConfig):
        mooc = not_none(MultiObjectiveOptimizationConfig.from_opt_conf(mooc))
    if objective_thresholds:
        mooc = mooc.clone_with_args(objective_thresholds=objective_thresholds)

    optimization_config = mooc

    # Transform OptimizationConfig.
    optimization_config = modelbridge.transform_optimization_config(
        optimization_config=optimization_config,
        fixed_features=ObservationFeatures(parameters={}),
    )
    # Extract weights, constraints, and objective_thresholds
    objective_weights = extract_objective_weights(
        objective=optimization_config.objective, outcomes=modelbridge.outcomes
    )
    outcome_constraints = extract_outcome_constraints(
        outcome_constraints=optimization_config.outcome_constraints,
        outcomes=modelbridge.outcomes,
    )
    objective_thresholds_arr = extract_objective_thresholds(
        objective_thresholds=optimization_config.objective_thresholds,
        outcomes=modelbridge.outcomes,
    )
    # Transform to tensors.
    obj_w, oc_c, _, _ = validate_and_apply_final_transform(
        objective_weights=objective_weights,
        outcome_constraints=outcome_constraints,
        linear_constraints=None,
        pending_observations=None,
        final_transform=array_to_tensor,
    )
    obj_t = array_to_tensor(objective_thresholds_arr)
    frontier_evaluator = get_default_frontier_evaluator()
    # pyre-ignore[28]: Unexpected keyword `modelbridge` to anonymous call
    f, cov, indx = frontier_evaluator(
        model=modelbridge.model,
        X=X,
        Y=Y,
        Yvar=Yvar,
        objective_thresholds=obj_t,
        objective_weights=obj_w,
        outcome_constraints=oc_c,
    )
    f, cov = f.detach().cpu().clone().numpy(), cov.detach().cpu().clone().numpy()
    indx = indx.tolist()
    frontier_observation_data = array_to_observation_data(
        f=f, cov=cov, outcomes=not_none(modelbridge.outcomes)
    )
    # Untransform observations
    for t in reversed(modelbridge.transforms.values()):  # noqa T484
        frontier_observation_data = t.untransform_observation_data(
            frontier_observation_data, []
        )
    # Construct observations
    frontier_observations = []
    for i, obsd in enumerate(frontier_observation_data):
        frontier_observations.append(
            Observation(
                features=observation_features[indx[i]],
                data=obsd,
                arm_name=arm_names[indx[i]],
            )
        )
    return frontier_observations