Exemple #1
0
def get_observation1() -> Observation:
    return Observation(
        features=ObservationFeatures(parameters={
            "x": 2.0,
            "y": 10.0
        },
                                     trial_index=np.int64(0)),
        data=ObservationData(
            means=np.array([2.0, 4.0]),
            covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
            metric_names=["a", "b"],
        ),
        arm_name="1_1",
    )
Exemple #2
0
 def test_cast_observation_features(self):
     # Ensure that during casting, full parameterization is saved
     # in metadata and actual parameterization is cast to HSS.
     hss_1_obs_feats_1 = ObservationFeatures.from_arm(
         arm=self.hss_1_arm_1_flat)
     hss_1_obs_feats_1_cast = self.hss_1.cast_observation_features(
         observation_features=hss_1_obs_feats_1)
     self.assertEqual(  # Check one subtree.
         hss_1_obs_feats_1_cast.parameters,
         ObservationFeatures.from_arm(arm=self.hss_1_arm_1_cast).parameters,
     )
     self.assertEqual(  # Check one subtree.
         hss_1_obs_feats_1_cast.metadata.get(Keys.FULL_PARAMETERIZATION),
         hss_1_obs_feats_1.parameters,
     )
     # Check that difference with observation features made from cast arm
     # is only in metadata (to ensure only parameters and metadata are
     # manipulated during casting).
     hss_1_obs_feats_1_cast.metadata = None
     self.assertEqual(
         hss_1_obs_feats_1_cast,
         ObservationFeatures.from_arm(arm=self.hss_1_arm_1_cast),
     )
Exemple #3
0
def _get_contour_predictions(
    model: ModelBridge,
    x_param_name: str,
    y_param_name: str,
    metric: str,
    generator_runs_dict: TNullableGeneratorRunsDict,
    density: int,
    slice_values: Optional[Dict[str, Any]] = None,
    fixed_features: Optional[ObservationFeatures] = None,
) -> ContourPredictions:
    """
    slice_values is a dictionary {param_name: value} for the parameters that
    are being sliced on.
    """
    x_param = get_range_parameter(model, x_param_name)
    y_param = get_range_parameter(model, y_param_name)

    plot_data, _, _ = get_plot_data(
        model, generator_runs_dict or {}, {metric}, fixed_features=fixed_features
    )

    grid_x = get_grid_for_parameter(x_param, density)
    grid_y = get_grid_for_parameter(y_param, density)
    scales = {"x": x_param.log_scale, "y": y_param.log_scale}

    grid2_x, grid2_y = np.meshgrid(grid_x, grid_y)

    grid2_x = grid2_x.flatten()
    grid2_y = grid2_y.flatten()

    if fixed_features is not None:
        slice_values = fixed_features.parameters
    else:
        fixed_features = ObservationFeatures(parameters={})

    fixed_values = get_fixed_values(model, slice_values)

    param_grid_obsf = []
    for i in range(density ** 2):
        predf = deepcopy(fixed_features)
        predf.parameters = fixed_values.copy()
        predf.parameters[x_param_name] = grid2_x[i]
        predf.parameters[y_param_name] = grid2_y[i]
        param_grid_obsf.append(predf)

    mu, cov = model.predict(param_grid_obsf)

    f_plt = mu[metric]
    sd_plt = np.sqrt(cov[metric][metric])
    return plot_data, f_plt, sd_plt, grid_x, grid_y, scales
Exemple #4
0
def _get_out_of_sample_arms(
    model: ModelBridge,
    generator_runs_dict: Dict[str, GeneratorRun],
    metric_names: Set[str],
    fixed_features: Optional[ObservationFeatures] = None,
) -> Dict[str, Dict[str, PlotOutOfSampleArm]]:
    """Get out-of-sample predictions from a model given a dict of generator runs.

    Fixed features input can be used to override fields of the candidate arms
    when making model predictions.

    Args:
        model: The model.
        generator_runs_dict: a mapping from generator run name to generator run.
        metric_names: metrics to include in the plot.

    Returns:
        A mapping from name to a mapping from arm name to plot.

    """
    out_of_sample_plot: Dict[str, Dict[str, PlotOutOfSampleArm]] = {}
    for generator_run_name, generator_run in generator_runs_dict.items():
        out_of_sample_plot[generator_run_name] = {}
        for arm in generator_run.arms:
            # This assumes context is None
            obsf = ObservationFeatures.from_arm(arm)
            if fixed_features is not None:
                obsf.update_features(fixed_features)

            # Make a prediction
            try:
                pred_y, pred_se = _predict_at_point(model, obsf, metric_names)
            except Exception:
                # Check if it is an out-of-design arm.
                if not model.model_space.check_membership(obsf.parameters):
                    # Skip this point
                    continue
                else:
                    # It should have worked
                    raise
            arm_name = arm.name_or_short_signature
            out_of_sample_plot[generator_run_name][
                arm_name] = PlotOutOfSampleArm(
                    name=arm_name,
                    parameters=obsf.parameters,
                    y_hat=pred_y,
                    se_hat=pred_se,
                    context_stratum=None,
                )
    return out_of_sample_plot
Exemple #5
0
 def testTransformObservationFeatures(self):
     observation_features = self.observation_features
     obs_ft2 = deepcopy(observation_features)
     obs_ft2 = self.t.transform_observation_features(obs_ft2)
     self.assertEqual(
         obs_ft2,
         [ObservationFeatures(parameters=self.expected_transformed_params)],
     )
     obs_ft2 = self.t.untransform_observation_features(obs_ft2)
     self.assertEqual(obs_ft2, observation_features)
     # Test transform on partial features
     obs_ft3 = [ObservationFeatures(parameters={"x": 2.2, "b": 10.0})]
     obs_ft3 = self.t.transform_observation_features(obs_ft3)
     self.assertEqual(
         obs_ft3[0],
         ObservationFeatures(parameters={
             "x": 2.2,
             "b": self.expected_transformed_params["b"]
         }),
     )
     obs_ft5 = self.t.transform_observation_features(
         [ObservationFeatures({})])
     self.assertEqual(obs_ft5[0], ObservationFeatures({}))
Exemple #6
0
 def testTransformObservationFeatures(self):
     observation_features = [self.observation_features]
     obs_ft2 = deepcopy(observation_features)
     obs_ft2 = self.t.transform_observation_features(obs_ft2)
     self.assertEqual(obs_ft2, [self.transformed_features])
     obs_ft2 = self.t.untransform_observation_features(obs_ft2)
     self.assertEqual(obs_ft2, observation_features)
     # Test partial transform
     obs_ft3 = [ObservationFeatures(parameters={"x": 2.2, "b": "b"})]
     obs_ft3 = self.t.transform_observation_features(obs_ft3)
     self.assertEqual(
         obs_ft3[0],
         ObservationFeatures(
             parameters={
                 "x": 2.2,
                 "b" + OH_PARAM_INFIX + "_0": 0,
                 "b" + OH_PARAM_INFIX + "_1": 1,
                 "b" + OH_PARAM_INFIX + "_2": 0,
             }
         ),
     )
     obs_ft5 = self.t.transform_observation_features([ObservationFeatures({})])
     self.assertEqual(obs_ft5[0], ObservationFeatures({}))
Exemple #7
0
 def testTransformObservationFeatures(self):
     observation_features = [
         ObservationFeatures(parameters={
             "x": 2.2,
             "a": 2,
             "b": "c"
         })
     ]
     obs_ft2 = deepcopy(observation_features)
     obs_ft2 = self.t.transform_observation_features(obs_ft2)
     self.assertEqual(
         obs_ft2,
         [
             ObservationFeatures(parameters={
                 "x": math.log10(2.2),
                 "a": 2,
                 "b": "c"
             })
         ],
     )
     self.assertTrue(isinstance(obs_ft2[0].parameters["x"], float))
     obs_ft2 = self.t.untransform_observation_features(obs_ft2)
     self.assertEqual(obs_ft2, observation_features)
Exemple #8
0
def observation2() -> Observation:
    return Observation(
        features=ObservationFeatures(parameters={
            "x": 3.0,
            "y": 2.0
        },
                                     trial_index=1),
        data=ObservationData(
            means=np.array([2.0, 1.0]),
            covariance=np.array([[2.0, 3.0], [4.0, 5.0]]),
            metric_names=["a", "b"],
        ),
        arm_name="1_1",
    )
Exemple #9
0
def get_pending_observation_features_based_on_trial_status(
    experiment: Experiment,
) -> Optional[Dict[str, List[ObservationFeatures]]]:
    """A faster analogue of ``get_pending_observation_features`` that makes
    assumptions about trials in experiment in order to speed up extraction
    of pending points.

    Assumptions:

    * All arms in all trials in ``STAGED,`` ``RUNNING`` and ``ABANDONED`` statuses
      are to be considered pending for all outcomes.
    * All arms in all trials in other statuses are to be considered not pending for
      all outcomes.

    This entails:

    * No actual data-fetching for trials to determine whether arms in them are pending
      for specific outcomes.
    * Even if data is present for some outcomes in ``RUNNING`` trials, their arms will
      still be considered pending for those outcomes.

    NOTE: This function should not be used to extract pending features in field
    experiments, where arms in running trials should not be considered pending if
    there is data for those arms.

    Args:
        experiment: Experiment, pending features on which we seek to compute.

    Returns:
        An optional mapping from metric names to a list of observation features,
        pending for that metric (i.e. do not have evaluation data for that metric).
        If there are no pending features for any of the metrics, return is None.
    """
    pending_features = defaultdict(list)
    for status in [
            TrialStatus.STAGED, TrialStatus.RUNNING, TrialStatus.ABANDONED
    ]:
        for trial in experiment.trials_by_status[status]:
            for metric_name in experiment.metrics:
                pending_features[metric_name].extend(
                    ObservationFeatures.from_arm(
                        arm=arm,
                        trial_index=np.int64(trial.index),
                        metadata=trial._get_candidate_metadata(
                            arm_name=arm.name),
                    ) for arm in trial.arms)

    return dict(pending_features) if any(
        x for x in pending_features.values()) else None
Exemple #10
0
 def setUp(self) -> None:
     self.experiment = get_experiment()
     self.arm = Arm({"x": 1, "y": "foo", "z": True, "w": 4})
     self.trial = self.experiment.new_trial(GeneratorRun([self.arm]))
     self.experiment_2 = get_experiment()
     self.batch_trial = self.experiment_2.new_batch_trial(
         GeneratorRun([self.arm]))
     self.batch_trial.set_status_quo_with_weight(
         self.experiment_2.status_quo, 1)
     self.obs_feat = ObservationFeatures.from_arm(arm=self.trial.arm,
                                                  trial_index=np.int64(
                                                      self.trial.index))
     self.hss_exp = get_hierarchical_search_space_experiment()
     self.hss_sobol = Models.SOBOL(search_space=self.hss_exp.search_space)
     self.hss_gr = self.hss_sobol.gen(n=1)
     self.hss_trial = self.hss_exp.new_trial(self.hss_gr)
     self.hss_arm = not_none(self.hss_trial.arm)
     self.hss_cand_metadata = self.hss_trial._get_candidate_metadata(
         arm_name=self.hss_arm.name)
     self.hss_full_parameterization = self.hss_cand_metadata.get(
         Keys.FULL_PARAMETERIZATION).copy()
     self.assertTrue(
         all(p_name in self.hss_full_parameterization
             for p_name in self.hss_exp.search_space.parameters))
     self.hss_obs_feat = ObservationFeatures.from_arm(
         arm=self.hss_arm,
         trial_index=np.int64(self.hss_trial.index),
         metadata=self.hss_cand_metadata,
     )
     self.hss_obs_feat_all_params = ObservationFeatures.from_arm(
         arm=Arm(self.hss_full_parameterization),
         trial_index=np.int64(self.hss_trial.index),
         metadata={
             Keys.FULL_PARAMETERIZATION: self.hss_full_parameterization
         },
     )
Exemple #11
0
    def testClone(self):
        # Test simple cloning.
        arm = Arm({"x": 0, "y": "a"})
        obsf = ObservationFeatures.from_arm(arm, trial_index=3)
        self.assertIsNot(obsf, obsf.clone())
        self.assertEqual(obsf, obsf.clone())

        # Test cloning with swapping parameters.
        clone_with_new_params = obsf.clone(replace_parameters={
            "x": 1,
            "y": "b"
        })
        self.assertNotEqual(obsf, clone_with_new_params)
        obsf.parameters = {"x": 1, "y": "b"}
        self.assertEqual(obsf, clone_with_new_params)
Exemple #12
0
 def testSeparateObservations(self):
     obs = Observation(
         features=ObservationFeatures(parameters={"x": 20}),
         data=ObservationData(
             means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
         ),
         arm_name="0_0",
     )
     obs_feats, obs_data = separate_observations(observations=[obs])
     self.assertEqual(obs.features, ObservationFeatures(parameters={"x": 20}))
     self.assertEqual(
         obs.data,
         ObservationData(
             means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
         ),
     )
     obs_feats, obs_data = separate_observations(observations=[obs], copy=True)
     self.assertEqual(obs.features, ObservationFeatures(parameters={"x": 20}))
     self.assertEqual(
         obs.data,
         ObservationData(
             means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
         ),
     )
Exemple #13
0
 def test_untransform_observation_features(self):
     # Verify running the transform on uncasted values properly converts them
     # (e.g. typing, rounding)
     observation_features = [
         ObservationFeatures(parameters={
             "a": 1,
             "b": 2.3466789,
             "c": "a",
             "d": 2.0
         })
     ]
     observation_features = self.t.untransform_observation_features(
         observation_features)
     self.assertEqual(
         observation_features,
         [
             ObservationFeatures(parameters={
                 "a": 1.0,
                 "b": 2.35,
                 "c": "a",
                 "d": 2
             })
         ],
     )
Exemple #14
0
 def test_transform_observation_features(self):
     # Verify running the transform on already-casted features does nothing
     observation_features = [
         ObservationFeatures(parameters={
             "a": 1.2345,
             "b": 2.34,
             "c": "a",
             "d": 2
         })
     ]
     obs_ft2 = deepcopy(observation_features)
     obs_ft2 = self.t.transform_observation_features(obs_ft2)
     self.assertEqual(obs_ft2, observation_features)
     obs_ft2 = self.t.untransform_observation_features(obs_ft2)
     self.assertEqual(obs_ft2, observation_features)
Exemple #15
0
    def testTransformObservationFeatures(self):
        observation_features = [
            ObservationFeatures(parameters={
                "x": 2.2,
                "a": 2,
                "b": "b",
                "d": 4
            })
        ]
        obs_ft2 = deepcopy(observation_features)
        obs_ft2 = self.t.transform_observation_features(obs_ft2)
        self.assertEqual(
            obs_ft2,
            [
                ObservationFeatures(parameters={
                    "x": 2.2,
                    "a": 2,
                    "b": "b",
                    "d": 4
                })
            ],
        )
        self.assertTrue(isinstance(obs_ft2[0].parameters["a"], float))
        self.assertTrue(isinstance(obs_ft2[0].parameters["d"], float))
        obs_ft2 = self.t.untransform_observation_features(obs_ft2)
        self.assertEqual(obs_ft2, observation_features)

        # Let the transformed space be a float, verify it becomes an int.
        obs_ft3 = [
            ObservationFeatures(parameters={
                "x": 2.2,
                "a": 2.2,
                "b": "b",
                "d": 3.8
            })
        ]
        obs_ft3 = self.t.untransform_observation_features(obs_ft3)
        self.assertEqual(obs_ft3, observation_features)

        # Test forward transform on partial observation
        obs_ft4 = [ObservationFeatures(parameters={"x": 2.2, "d": 4})]
        obs_ft4 = self.t.transform_observation_features(obs_ft4)
        self.assertEqual(obs_ft4,
                         [ObservationFeatures(parameters={
                             "x": 2.2,
                             "d": 4
                         })])
        self.assertTrue(isinstance(obs_ft4[0].parameters["d"], float))
        obs_ft5 = self.t.transform_observation_features(
            [ObservationFeatures({})])
        self.assertEqual(obs_ft5[0], ObservationFeatures({}))
    def setUp(self):
        self.search_space = SearchSpace(
            parameters=[
                RangeParameter(
                    "x", lower=1, upper=4, parameter_type=ParameterType.FLOAT
                )
            ]
        )
        self.training_feats = [
            ObservationFeatures({"x": 1}, trial_index=0),
            ObservationFeatures({"x": 2}, trial_index=0),
            ObservationFeatures({"x": 3}, trial_index=1),
            ObservationFeatures({"x": 4}, trial_index=2),
        ]
        self.t = TrialAsTask(
            search_space=self.search_space,
            observation_features=self.training_feats,
            observation_data=None,
        )
        self.bm = {
            "bp1": {0: "v1", 1: "v2", 2: "v3"},
            "bp2": {0: "u1", 1: "u1", 2: "u2"},
        }

        self.t2 = TrialAsTask(
            search_space=self.search_space,
            observation_features=self.training_feats,
            observation_data=None,
            config={"trial_level_map": self.bm},
        )
        self.t3 = TrialAsTask(
            search_space=self.search_space,
            observation_features=self.training_feats,
            observation_data=None,
            config={"trial_level_map": {}},
        )
Exemple #17
0
    def testTransformObservationFeatures(self):
        # Don't modify points without None.
        observation_features = [
            ObservationFeatures(parameters={
                "a": 2.2,
                "b": "b",
                "c": "a"
            })
        ]
        obs_ft = deepcopy(observation_features)
        obs_ft = self.t.transform_observation_features(obs_ft)
        self.assertEqual(obs_ft, observation_features)

        # Strip params from points with any Nones.
        observation_features = [
            ObservationFeatures(parameters={
                "a": 2.2,
                "b": "b",
                "c": None
            })
        ]
        obs_ft = deepcopy(observation_features)
        obs_ft = self.t.transform_observation_features(obs_ft)
        self.assertEqual(obs_ft, [ObservationFeatures(parameters={})])
Exemple #18
0
def get_observation2trans(first_metric_name: str = "a",
                          second_metric_name="b") -> Observation:
    return Observation(
        features=ObservationFeatures(parameters={
            "x": 16.0,
            "y": 2.0
        },
                                     trial_index=np.int64(1)),
        data=ObservationData(
            means=np.array([9.0, 4.0]),
            covariance=np.array([[2.0, 3.0], [4.0, 5.0]]),
            metric_names=[first_metric_name, second_metric_name],
        ),
        arm_name="1_1",
    )
Exemple #19
0
def ei_new_arm(model, new_configs):
    """
    Computes the maximum sample point regarding prediction mean
    """
    # TODO: use better prediction minimum
    obs_feats = [
        ObservationFeatures(parameters=i) for i in new_configs.values()
    ]
    f_mean, _ = model.predict(obs_feats)
    predicted_values = stack(list(f_mean.values()))
    min_pred_idx = argmin([
        norm(predicted_values[:, i]) for i in range(predicted_values.shape[1])
    ])
    new_arm = list(new_configs)[min_pred_idx]
    return new_arm
Exemple #20
0
 def setUp(self):
     self.obsd1 = ObservationData(
         metric_names=["m1", "m2", "m2"],
         means=np.array([1.0, 2.0, 8.0]),
         covariance=np.array([[1.0, 0.2, 0.4], [0.2, 2.0, 0.8], [0.4, 0.8, 3.0]]),
     )
     self.obsd2 = ObservationData(
         metric_names=["m1", "m1", "m2", "m2"],
         means=np.array([1.0, 5.0, 2.0, 1.0]),
         covariance=np.array(
             [
                 [1.0, 0.0, 0.0, 0.0],
                 [0.0, 1.0, 0.2, 0.4],
                 [0.0, 0.2, 2.0, 0.8],
                 [0.0, 0.4, 0.8, 3.0],
             ]
         ),
     )
     self.search_space = SearchSpace(
         parameters=[
             RangeParameter(
                 name="x", parameter_type=ParameterType.FLOAT, lower=0, upper=10
             ),
             ChoiceParameter(
                 name="z", parameter_type=ParameterType.STRING, values=["a", "b"]
             ),
         ]
     )
     self.obsf1 = ObservationFeatures({"x": 2, "z": "a"})
     self.obsf2 = ObservationFeatures({"x": 5, "z": "b"})
     self.t = StratifiedStandardizeY(
         search_space=self.search_space,
         observation_features=[self.obsf1, self.obsf2],
         observation_data=[self.obsd1, self.obsd2],
         config={"parameter_name": "z"},
     )
 def setUp(self):
     self.search_space = SearchSpace(
         parameters=[
             RangeParameter(
                 "x", lower=1, upper=3, parameter_type=ParameterType.FLOAT
             ),
             RangeParameter("a", lower=1, upper=2, parameter_type=ParameterType.INT),
             ChoiceParameter(
                 "b",
                 parameter_type=ParameterType.FLOAT,
                 values=[1.0, 10.0, 100.0],
                 is_ordered=True,
             ),
             ChoiceParameter(
                 "c",
                 parameter_type=ParameterType.FLOAT,
                 values=[10.0, 100.0, 1000.0],
                 is_ordered=True,
             ),
             ChoiceParameter(
                 "d", parameter_type=ParameterType.STRING, values=["r", "q", "z"]
             ),
         ],
         parameter_constraints=[
             ParameterConstraint(constraint_dict={"x": -0.5, "a": 1}, bound=0.5)
         ],
     )
     self.t = ChoiceEncode(
         search_space=self.search_space,
         observation_features=None,
         observation_data=None,
     )
     self.observation_features = [
         ObservationFeatures(
             parameters={"x": 2.2, "a": 2, "b": 10.0, "c": 100.0, "d": "r"}
         )
     ]
     # expected parameters after transform
     self.expected_transformed_params = {
         "x": 2.2,
         "a": 2,
         # float choice originally; transformed to int index.
         "b": 1,
         # float choice originally; transformed to int index.
         "c": 1,
         # string choice originally; transformed to int index.
         "d": 0,
     }
def observation_status_quo1() -> Observation:
    return Observation(
        features=ObservationFeatures(parameters={
            "w": 0.85,
            "x": 1,
            "y": "baz",
            "z": False
        },
                                     trial_index=1),
        data=ObservationData(
            means=np.array([2.0, 4.0]),
            covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
            metric_names=["a", "b"],
        ),
        arm_name="0_0",
    )
Exemple #23
0
 def test_get_pending_observation_features_batch_trial(self):
     # Check the same functionality for batched trials.
     self.assertIsNone(get_pending_observation_features(self.experiment_2))
     self.batch_trial.mark_running(no_runner_required=True)
     sq_obs_feat = ObservationFeatures.from_arm(
         self.batch_trial.arms_by_name.get("status_quo"),
         trial_index=self.batch_trial.index,
     )
     self.assertEqual(
         get_pending_observation_features(self.experiment_2),
         {
             "tracking": [self.obs_feat, sq_obs_feat],
             "m2": [self.obs_feat, sq_obs_feat],
             "m1": [self.obs_feat, sq_obs_feat],
         },
     )
Exemple #24
0
    def _get_transformed_gen_args(
        self,
        search_space: SearchSpace,
        optimization_config: Optional[OptimizationConfig] = None,
        pending_observations: Optional[Dict[str,
                                            List[ObservationFeatures]]] = None,
        fixed_features: Optional[ObservationFeatures] = None,
    ) -> BaseGenArgs:
        if pending_observations is None:
            pending_observations = {}
        if fixed_features is None:
            fixed_features = ObservationFeatures({})

        if optimization_config is None:
            optimization_config = (self._optimization_config.clone()
                                   if self._optimization_config is not None
                                   else None)
        else:
            optimization_config = optimization_config.clone()

        # TODO(T34225037): replace deepcopy with native clone() in Ax
        pending_observations = deepcopy(pending_observations)
        fixed_features = deepcopy(fixed_features)

        # Transform
        for t in self.transforms.values():
            search_space = t.transform_search_space(search_space)
            if optimization_config is not None:
                optimization_config = t.transform_optimization_config(
                    optimization_config=optimization_config,
                    modelbridge=self,
                    fixed_features=fixed_features,
                )
            for metric, po in pending_observations.items():
                pending_observations[
                    metric] = t.transform_observation_features(po)
            fixed_features = t.transform_observation_features([fixed_features
                                                               ])[0]
        return BaseGenArgs(
            search_space=search_space,
            # pyre-fixme[6]: Expected `OptimizationConfig` for 2nd param but got
            #  `Optional[OptimizationConfig]`.
            optimization_config=optimization_config,
            pending_observations=pending_observations,
            fixed_features=fixed_features,
        )
Exemple #25
0
 def testUpdate(self, mock_init):
     ma = DiscreteModelBridge()
     ma._training_data = self.observations
     model = mock.create_autospec(DiscreteModel, instance=True)
     ma._fit(model, self.search_space, self.observation_features,
             self.observation_data)
     new_feat = ObservationFeatures(parameters={
         "x": 0,
         "y": "bar",
         "z": True
     })
     new_data = ObservationData(metric_names=["a"],
                                means=np.array([3.0]),
                                covariance=np.array([[3.0]]))
     ma._update([new_feat], [new_data])
     self.assertEqual(ma.parameters, ["x", "y", "z"])
     self.assertEqual(sorted(ma.outcomes), ["a", "b"])
Exemple #26
0
def parse_observation_features(
        X: np.ndarray, param_names: List[str]) -> List[ObservationFeatures]:
    """Re-format raw model-generated candidates into ObservationFeatures.

    Args:
        param_names: List of param names.
        X: Raw np.ndarray of candidate values.

    Returns:
        List of candidates, represented as ObservationFeatures.
    """
    observation_features = []
    for x in X:
        observation_features.append(
            ObservationFeatures(
                parameters={p: x[i]
                            for i, p in enumerate(param_names)}))
    return observation_features
Exemple #27
0
 def testGenWithDefaults(self, _, mock_gen):
     exp = get_experiment()
     exp.optimization_config = get_optimization_config()
     ss = search_space_for_range_value()
     modelbridge = ModelBridge(ss, None, [], exp)
     modelbridge.gen(1)
     mock_gen.assert_called_with(
         modelbridge,
         n=1,
         search_space=ss,
         fixed_features=ObservationFeatures(parameters={}),
         model_gen_options=None,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("test_metric"), minimize=False),
             outcome_constraints=[],
         ),
         pending_observations={},
     )
Exemple #28
0
def get_observation_status_quo1(first_metric_name: str = "a",
                                second_metric_name="b") -> Observation:
    return Observation(
        features=ObservationFeatures(
            parameters={
                "w": 0.85,
                "x": 1,
                "y": "baz",
                "z": False
            },
            trial_index=np.int64(1),
        ),
        data=ObservationData(
            means=np.array([2.0, 4.0]),
            covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
            metric_names=[first_metric_name, second_metric_name],
        ),
        arm_name="0_0",
    )
 def test_relativize_transform_requires_a_modelbridge_to_have_status_quo_data(self):
     sobol = Models.SOBOL(search_space=get_search_space())
     self.assertIsNone(sobol.status_quo)
     with self.assertRaisesRegex(ValueError, "status quo data"):
         Relativize(
             search_space=None,
             observation_features=[],
             observation_data=[],
             modelbridge=sobol,
         ).transform_observation_data(
             observation_data=[
                 ObservationData(
                     metric_names=["foo"],
                     means=np.array([2]),
                     covariance=np.array([[0.1]]),
                 )
             ],
             observation_features=[ObservationFeatures(parameters={"x": 1})],
         )
Exemple #30
0
    def testSetStatusQuoMultipleObs(self, mock_fit, mock_observations_from_data):
        exp = get_experiment_with_repeated_arms(2)

        trial_index = 1
        status_quo_features = ObservationFeatures(
            parameters=exp.trials[trial_index].status_quo.parameters,
            trial_index=trial_index,
        )
        modelbridge = ModelBridge(
            get_search_space_for_value(),
            0,
            [],
            exp,
            0,
            status_quo_features=status_quo_features,
        )
        # Check that for experiments with many trials the status quo is set
        # to the value of the status quo of the last trial.
        if len(exp.trials) >= 1:
            self.assertEqual(modelbridge.status_quo, get_observation_status_quo1())