def testSetTrainingDataDupFeatures(self, mock_fit, mock_observations_from_data):
     # Throws an error if repeated features in observations.
     with self.assertRaises(ValueError):
         ModelBridge(
             get_search_space_for_value(),
             0,
             [],
             get_experiment_for_value(),
             0,
             status_quo_name="1_1",
         )
Exemple #2
0
def _get_contour_predictions(
    model: ModelBridge,
    x_param_name: str,
    y_param_name: str,
    metric: str,
    generator_runs_dict: TNullableGeneratorRunsDict,
    density: int,
    slice_values: Optional[Dict[str, Any]] = None,
    fixed_features: Optional[ObservationFeatures] = None,
) -> ContourPredictions:
    """
    slice_values is a dictionary {param_name: value} for the parameters that
    are being sliced on.
    """
    x_param = get_range_parameter(model, x_param_name)
    y_param = get_range_parameter(model, y_param_name)

    plot_data, _, _ = get_plot_data(model,
                                    generator_runs_dict or {}, {metric},
                                    fixed_features=fixed_features)

    grid_x = get_grid_for_parameter(x_param, density)
    grid_y = get_grid_for_parameter(y_param, density)
    scales = {"x": x_param.log_scale, "y": y_param.log_scale}

    grid2_x, grid2_y = np.meshgrid(grid_x, grid_y)

    grid2_x = grid2_x.flatten()
    grid2_y = grid2_y.flatten()

    if fixed_features is not None:
        slice_values = fixed_features.parameters
    else:
        fixed_features = ObservationFeatures(parameters={})

    fixed_values = get_fixed_values(model, slice_values)

    param_grid_obsf = []
    for i in range(density**2):
        predf = deepcopy(fixed_features)
        predf.parameters = fixed_values.copy()
        predf.parameters[x_param_name] = grid2_x[i]
        predf.parameters[y_param_name] = grid2_y[i]
        param_grid_obsf.append(predf)

    mu, cov = model.predict(param_grid_obsf)

    f_plt = mu[metric]
    sd_plt = np.sqrt(cov[metric][metric])
    # pyre-fixme[7]: Expected `Tuple[PlotData, np.ndarray, np.ndarray, np.ndarray,
    #  np.ndarray, Dict[str, bool]]` but got `Tuple[PlotData, typing.List[float],
    #  typing.Any, np.ndarray, np.ndarray, Dict[str, bool]]`.
    return plot_data, f_plt, sd_plt, grid_x, grid_y, scales
Exemple #3
0
 def testGenWithDefaults(self, _, mock_gen):
     exp = get_experiment_for_value()
     exp.optimization_config = get_optimization_config_no_constraints()
     ss = get_search_space_for_range_value()
     modelbridge = ModelBridge(search_space=ss,
                               model=Model(),
                               transforms=[],
                               experiment=exp)
     modelbridge.gen(1)
     mock_gen.assert_called_with(
         modelbridge,
         n=1,
         search_space=ss,
         fixed_features=ObservationFeatures(parameters={}),
         model_gen_options=None,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("test_metric"),
                                 minimize=False),
             outcome_constraints=[],
         ),
         pending_observations={},
     )
Exemple #4
0
    def testSetStatusQuo(self, mock_fit, mock_observations_from_data):
        modelbridge = ModelBridge(search_space_for_value(),
                                  0, [],
                                  get_experiment(),
                                  0,
                                  status_quo_name="1_1")
        self.assertEqual(modelbridge.status_quo, observation1())

        # Alternatively, we can specify by features
        modelbridge = ModelBridge(
            search_space_for_value(),
            0,
            [],
            get_experiment(),
            0,
            status_quo_features=observation1().features,
        )
        self.assertEqual(modelbridge.status_quo, observation1())

        # Alternatively, we can specify on experiment
        # Put a dummy arm with SQ name 1_1 on the dummy experiment.
        exp = get_experiment()
        sq = Arm(name="1_1", parameters={"x": 3.0})
        exp._status_quo = sq
        # Check that we set SQ to arm 1_1
        modelbridge = ModelBridge(search_space_for_value(), 0, [], exp, 0)
        self.assertEqual(modelbridge.status_quo, observation1())

        # Errors if features and name both specified
        with self.assertRaises(ValueError):
            modelbridge = ModelBridge(
                search_space_for_value(),
                0,
                [],
                exp,
                0,
                status_quo_features=observation1().features,
                status_quo_name="1_1",
            )

        # Left as None if features or name don't exist
        modelbridge = ModelBridge(search_space_for_value(),
                                  0, [],
                                  exp,
                                  0,
                                  status_quo_name="1_0")
        self.assertIsNone(modelbridge.status_quo)
        modelbridge = ModelBridge(
            search_space_for_value(),
            0,
            [],
            get_experiment(),
            0,
            status_quo_features=ObservationFeatures(parameters={
                "x": 3.0,
                "y": 10.0
            }),
        )
        self.assertIsNone(modelbridge.status_quo)
Exemple #5
0
def get_fixed_values(
    model: ModelBridge,
    slice_values: Optional[Dict[str, Any]] = None,
    trial_index: Optional[int] = None,
) -> TParameterization:
    """Get fixed values for parameters in a slice plot.

    If there is an in-design status quo, those values will be used. Otherwise,
    the mean of RangeParameters or the mode of ChoiceParameters is used.

    Any value in slice_values will override the above.

    Args:
        model: ModelBridge being used for plotting
        slice_values: Map from parameter name to value at which is should be
            fixed.

    Returns: Map from parameter name to fixed value.
    """

    if trial_index is not None:
        if slice_values is None:
            slice_values = {}
        slice_values["TRIAL_PARAM"] = str(trial_index)

    # Check if status_quo is in design
    if model.status_quo is not None and model.model_space.check_membership(
            # pyre-fixme[16]: `Optional` has no attribute `features`.
            model.status_quo.features.parameters):
        setx = model.status_quo.features.parameters
    else:
        observations = model.get_training_data()
        setx = {}
        for p_name, parameter in model.model_space.parameters.items():
            # Exclude out of design status quo (no parameters)
            vals = [
                obs.features.parameters[p_name] for obs in observations
                if (len(obs.features.parameters) > 0
                    and parameter.validate(obs.features.parameters[p_name]))
            ]
            if isinstance(parameter, FixedParameter):
                setx[p_name] = parameter.value
            elif isinstance(parameter, ChoiceParameter):
                setx[p_name] = Counter(vals).most_common(1)[0][0]
            elif isinstance(parameter, RangeParameter):
                setx[p_name] = parameter.cast(np.mean(vals))

    if slice_values is not None:
        # slice_values has type Dictionary[str, Any]
        setx.update(slice_values)
    return setx
Exemple #6
0
 def testErrors(self):
     t = Derelativize(search_space=None,
                      observation_features=None,
                      observation_data=None)
     oc = OptimizationConfig(
         objective=Objective(Metric("c")),
         outcome_constraints=[
             OutcomeConstraint(Metric("a"),
                               ComparisonOp.LEQ,
                               bound=2,
                               relative=True)
         ],
     )
     search_space = SearchSpace(
         parameters=[RangeParameter("x", ParameterType.FLOAT, 0, 20)])
     g = ModelBridge(search_space, None, [])
     with self.assertRaises(ValueError):
         t.transform_optimization_config(oc, None, None)
     with self.assertRaises(ValueError):
         t.transform_optimization_config(oc, g, None)
Exemple #7
0
    def testSetStatusQuoMultipleObs(self, mock_fit, mock_observations_from_data):
        exp = get_experiment_with_repeated_arms(2)

        trial_index = 1
        status_quo_features = ObservationFeatures(
            parameters=exp.trials[trial_index].status_quo.parameters,
            trial_index=trial_index,
        )
        modelbridge = ModelBridge(
            get_search_space_for_value(),
            0,
            [],
            exp,
            0,
            status_quo_features=status_quo_features,
        )
        # Check that for experiments with many trials the status quo is set
        # to the value of the status quo of the last trial.
        if len(exp.trials) >= 1:
            self.assertEqual(modelbridge.status_quo, get_observation_status_quo1())
Exemple #8
0
def _get_contour_predictions(
    model: ModelBridge,
    x_param_name: str,
    y_param_name: str,
    metric: str,
    generator_runs_dict: TNullableGeneratorRunsDict,
    density: int,
    slice_values: Optional[Dict[str, Any]] = None,
) -> ContourPredictions:
    """
    slice_values is a dictionary {param_name: value} for the parameters that
    are being sliced on.
    """
    x_param = get_range_parameter(model, x_param_name)
    y_param = get_range_parameter(model, y_param_name)

    plot_data, _, _ = get_plot_data(model, generator_runs_dict or {}, {metric})

    grid_x = get_grid_for_parameter(x_param, density)
    grid_y = get_grid_for_parameter(y_param, density)
    scales = {"x": x_param.log_scale, "y": y_param.log_scale}

    grid2_x, grid2_y = np.meshgrid(grid_x, grid_y)

    grid2_x = grid2_x.flatten()
    grid2_y = grid2_y.flatten()

    fixed_values = get_fixed_values(model, slice_values)

    param_grid_obsf = []
    for i in range(density ** 2):
        parameters = fixed_values.copy()
        parameters[x_param_name] = grid2_x[i]
        parameters[y_param_name] = grid2_y[i]
        param_grid_obsf.append(ObservationFeatures(parameters))

    mu, cov = model.predict(param_grid_obsf)

    f_plt = mu[metric]
    sd_plt = np.sqrt(cov[metric][metric])
    return plot_data, f_plt, sd_plt, grid_x, grid_y, scales
Exemple #9
0
    def test_ood_gen(self, _):
        # Test fit_out_of_design by returning OOD candidats
        exp = get_experiment_for_value()
        ss = SearchSpace([RangeParameter("x", ParameterType.FLOAT, 0.0, 1.0)])
        modelbridge = ModelBridge(
            search_space=ss,
            model=Model(),
            transforms=[],
            experiment=exp,
            data=0,
            fit_out_of_design=True,
        )
        obs = ObservationFeatures(parameters={"x": 3.0})
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([obs], [2], None, {}),
        )
        gr = modelbridge.gen(n=1)
        self.assertEqual(gr.arms[0].parameters, obs.parameters)

        # Test clamping arms by setting fit_out_of_design=False
        modelbridge = ModelBridge(
            search_space=ss,
            model=Model(),
            transforms=[],
            experiment=exp,
            data=0,
            fit_out_of_design=False,
        )
        obs = ObservationFeatures(parameters={"x": 3.0})
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([obs], [2], None, {}),
        )
        gr = modelbridge.gen(n=1)
        self.assertEqual(gr.arms[0].parameters, {"x": 1.0})
Exemple #10
0
 def testNoOutOfDesign(self, mock_fit, mock_observations_from_data):
     exp = get_experiment_for_value()
     modelbridge = ModelBridge(get_search_space_for_value(), 0, [], exp, 0)
     self.assertEqual(modelbridge.out_of_design_data(), None)
Exemple #11
0
def _get_in_sample_arms(
    model: ModelBridge,
    metric_names: Set[str],
    fixed_features: Optional[ObservationFeatures] = None,
) -> Tuple[Dict[str, PlotInSampleArm], RawData, Dict[str, TParameterization]]:
    """Get in-sample arms from a model with observed and predicted values
    for specified metrics.

    Returns a PlotInSampleArm object in which repeated observations are merged
    with IVW, and a RawData object in which every observation is listed.

    Fixed features input can be used to override fields of the insample arms
    when making model predictions.

    Args:
        model: An instance of the model bridge.
        metric_names: Restrict predictions to these metrics. If None, uses all
            metrics in the model.
        fixed_features: Features that should be fixed in the arms this function
            will obtain predictions for.

    Returns:
        A tuple containing

        - Map from arm name to PlotInSampleArm.
        - List of the data for each observation like::

            {'metric_name': 'likes', 'arm_name': '0_0', 'mean': 1., 'sem': 0.1}

        - Map from arm name to parameters
    """
    observations = model.get_training_data()
    # Calculate raw data
    raw_data = []
    arm_name_to_parameters = {}
    for obs in observations:
        arm_name_to_parameters[obs.arm_name] = obs.features.parameters
        for j, metric_name in enumerate(obs.data.metric_names):
            if metric_name in metric_names:
                raw_data.append({
                    "metric_name": metric_name,
                    "arm_name": obs.arm_name,
                    "mean": obs.data.means[j],
                    "sem": np.sqrt(obs.data.covariance[j, j]),
                })

    # Check that we have one ObservationFeatures per arm name since we
    # key by arm name and the model is not Multi-task.
    # If "TrialAsTask" is present, one of the arms is also chosen.
    if ("TrialAsTask" not in model.transforms.keys()) and (
            len(arm_name_to_parameters) != len(observations)):
        logger.error(
            "Have observations of arms with different features but same"
            " name. Arbitrary one will be plotted.")

    # Merge multiple measurements within each Observation with IVW to get
    # un-modeled prediction
    t = IVW(None, [], [])
    obs_data = t.transform_observation_data([obs.data for obs in observations],
                                            [])
    # Start filling in plot data
    in_sample_plot: Dict[str, PlotInSampleArm] = {}
    for i, obs in enumerate(observations):
        if obs.arm_name is None:
            raise ValueError("Observation must have arm name for plotting.")

        # Extract raw measurement
        obs_y = {}  # Observed metric means.
        obs_se = {}  # Observed metric standard errors.
        # Use the IVW data, not obs.data
        for j, metric_name in enumerate(obs_data[i].metric_names):
            if metric_name in metric_names:
                obs_y[metric_name] = obs_data[i].means[j]
                obs_se[metric_name] = np.sqrt(obs_data[i].covariance[j, j])
        # Make a prediction.
        if model.training_in_design[i]:
            features = obs.features
            if fixed_features is not None:
                features.update_features(fixed_features)
            pred_y, pred_se = _predict_at_point(model, features, metric_names)
        else:
            # Use raw data for out-of-design points
            pred_y = obs_y
            pred_se = obs_se
        in_sample_plot[not_none(obs.arm_name)] = PlotInSampleArm(
            name=not_none(obs.arm_name),
            y=obs_y,
            se=obs_se,
            parameters=obs.features.parameters,
            y_hat=pred_y,
            se_hat=pred_se,
            context_stratum=None,
        )
    return in_sample_plot, raw_data, arm_name_to_parameters
Exemple #12
0
def cross_validate(model: ModelBridge,
                   folds: int = -1,
                   test_selector: Optional[Callable] = None) -> List[CVResult]:
    """Cross validation for model predictions.

    Splits the model's training data into train/test folds and makes
    out-of-sample predictions on the test folds.

    Train/test splits are made based on arm names, so that repeated
    observations of a arm will always be in the train or test set
    together.

    The test set can be limited to a specific set of observations by passing in
    a test_selector callable. This function should take in an Observation
    and return a boolean indiciating if it should be used in the test set or
    not. For example, we can limit the test set to arms with trial 0 with
    test_selector = lambda obs: obs.features.trial_index == 0
    If not provided, all observations will be available for the test set.

    Args:
        model: Fitted model (ModelBridge) to cross validate.
        folds: Number of folds. Use -1 for leave-one-out, otherwise will be
            k-fold.
        test_selector: Function for selecting observations for the test set.

    Returns:
        A CVResult for each observation in the training data.
    """
    # Get in-design training points
    training_data = [
        obs for i, obs in enumerate(model.get_training_data())
        if model.training_in_design[i]
    ]
    arm_names = {obs.arm_name for obs in training_data}
    n = len(arm_names)
    if folds > n:
        raise ValueError(
            f"Training data only has {n} arms, which is less than folds")
    elif folds < 2 and folds != -1:
        raise ValueError("Folds must be -1 for LOO, or > 1.")
    elif folds == -1:
        folds = n

    arm_names_rnd = np.array(list(arm_names))
    np.random.shuffle(arm_names_rnd)
    result = []
    for train_names, test_names in _gen_train_test_split(
            folds=folds, arm_names=arm_names_rnd):
        # Construct train/test data
        cv_training_data = []
        cv_test_data = []
        cv_test_points = []
        for obs in training_data:
            if obs.arm_name in train_names:
                cv_training_data.append(obs)
            elif obs.arm_name in test_names and (test_selector is None
                                                 or test_selector(obs)):
                cv_test_points.append(obs.features)
                cv_test_data.append(obs)
        if len(cv_test_points) == 0:
            continue
        # Make the prediction
        cv_test_predictions = model.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points)
        # Form CVResult objects
        for i, obs in enumerate(cv_test_data):
            result.append(
                CVResult(observed=obs, predicted=cv_test_predictions[i]))
    return result
Exemple #13
0
def _get_slice_predictions(
    model: ModelBridge,
    param_name: str,
    metric_name: str,
    generator_runs_dict: TNullableGeneratorRunsDict = None,
    relative: bool = False,
    density: int = 50,
    slice_values: Optional[Dict[str, Any]] = None,
    fixed_features: Optional[ObservationFeatures] = None,
    trial_index: Optional[int] = None,
) -> SlicePredictions:
    """Computes slice prediction configuration values for a single metric name.

    Args:
        model: ModelBridge that contains model for predictions
        param_name: Name of parameter that will be sliced
        metric_name: Name of metric to plot
        generator_runs_dict: A dictionary {name: generator run} of generator runs
            whose arms will be plotted, if they lie in the slice.
        relative: Predictions relative to status quo
        density: Number of points along slice to evaluate predictions.
        slice_values: A dictionary {name: val} for the fixed values of the
            other parameters. If not provided, then the status quo values will
            be used if there is a status quo, otherwise the mean of numeric
            parameters or the mode of choice parameters. Ignored if
            fixed_features is specified.
        fixed_features: An ObservationFeatures object containing the values of
            features (including non-parameter features like context) to be set
            in the slice.

    Returns: Configruation values for AxPlotConfig.
    """
    if generator_runs_dict is None:
        generator_runs_dict = {}

    parameter = get_range_parameter(model, param_name)
    grid = get_grid_for_parameter(parameter, density)

    plot_data, raw_data, cond_name_to_parameters = get_plot_data(
        model=model,
        generator_runs_dict=generator_runs_dict,
        metric_names={metric_name},
        fixed_features=fixed_features,
    )

    if fixed_features is not None:
        slice_values = fixed_features.parameters
    else:
        fixed_features = ObservationFeatures(parameters={})
    fixed_values = get_fixed_values(model, slice_values, trial_index)

    prediction_features = []
    for x in grid:
        predf = deepcopy(fixed_features)
        predf.parameters = fixed_values.copy()
        predf.parameters[param_name] = x
        prediction_features.append(predf)

    f, cov = model.predict(prediction_features)
    f_plt = f[metric_name]
    sd_plt = np.sqrt(cov[metric_name][metric_name])
    # pyre-fixme[7]: Expected `Tuple[PlotData, List[Dict[str, Union[float, str]]],
    #  List[float], np.ndarray, np.ndarray, str, str, bool, Dict[str, Union[None, bool,
    #  float, int, str]], np.ndarray, bool]` but got `Tuple[PlotData, Dict[str,
    #  Dict[str, Union[None, bool, float, int, str]]], List[float], List[Dict[str,
    #  Union[float, str]]], np.ndarray, str, str, bool, Dict[str, Union[None, bool,
    #  float, int, str]], typing.Any, bool]`.
    return (
        plot_data,
        cond_name_to_parameters,
        f_plt,
        raw_data,
        grid,
        metric_name,
        param_name,
        relative,
        fixed_values,
        sd_plt,
        parameter.log_scale,
    )
Exemple #14
0
def interact_slice_plotly(
    model: ModelBridge,
    generator_runs_dict: TNullableGeneratorRunsDict = None,
    relative: bool = False,
    density: int = 50,
    slice_values: Optional[Dict[str, Any]] = None,
    fixed_features: Optional[ObservationFeatures] = None,
    trial_index: Optional[int] = None,
) -> go.Figure:
    """Create interactive plot with predictions for a 1-d slice of the parameter
    space.

    Args:
        model: ModelBridge that contains model for predictions
        generator_runs_dict: A dictionary {name: generator run} of generator runs
            whose arms will be plotted, if they lie in the slice.
        relative: Predictions relative to status quo
        density: Number of points along slice to evaluate predictions.
        slice_values: A dictionary {name: val} for the fixed values of the
            other parameters. If not provided, then the status quo values will
            be used if there is a status quo, otherwise the mean of numeric
            parameters or the mode of choice parameters. Ignored if
            fixed_features is specified.
        fixed_features: An ObservationFeatures object containing the values of
            features (including non-parameter features like context) to be set
            in the slice.

    Returns:
        go.Figure: interactive plot of objective vs. parameter
    """
    if generator_runs_dict is None:
        generator_runs_dict = {}

    metric_names = list(model.metric_names)

    # Populate `pbuttons`, which allows the user to select 1D slices of parameter
    # space with the chosen parameter on the x-axis.
    range_parameters = get_range_parameters(model)
    param_names = [parameter.name for parameter in range_parameters]
    pbuttons = []
    init_traces = []
    xaxis_init_format = {}
    first_param_bool = True
    should_replace_slice_values = fixed_features is not None
    for param_name in param_names:
        pbutton_data_args = {"x": [], "y": [], "error_y": []}
        parameter = get_range_parameter(model, param_name)
        grid = get_grid_for_parameter(parameter, density)

        plot_data_dict = {}
        raw_data_dict = {}
        sd_plt_dict: Dict[str, Dict[str, np.ndarray]] = {}

        cond_name_to_parameters_dict = {}
        is_log_dict: Dict[str, bool] = {}

        if should_replace_slice_values:
            slice_values = not_none(fixed_features).parameters
        else:
            fixed_features = ObservationFeatures(parameters={})
        fixed_values = get_fixed_values(model, slice_values, trial_index)
        prediction_features = []
        for x in grid:
            predf = deepcopy(not_none(fixed_features))
            predf.parameters = fixed_values.copy()
            predf.parameters[param_name] = x
            prediction_features.append(predf)

        f, cov = model.predict(prediction_features)

        for metric_name in metric_names:
            pd, cntp, f_plt, rd, _, _, _, _, _, sd_plt, ls = _get_slice_predictions(
                model=model,
                param_name=param_name,
                metric_name=metric_name,
                generator_runs_dict=generator_runs_dict,
                relative=relative,
                density=density,
                slice_values=slice_values,
                fixed_features=fixed_features,
            )

            plot_data_dict[metric_name] = pd
            raw_data_dict[metric_name] = rd
            cond_name_to_parameters_dict[metric_name] = cntp

            sd_plt_dict[metric_name] = np.sqrt(cov[metric_name][metric_name])
            is_log_dict[metric_name] = ls

        config = {
            "arm_data": plot_data_dict,
            "arm_name_to_parameters": cond_name_to_parameters_dict,
            "f": f,
            "fit_data": raw_data_dict,
            "grid": grid,
            "metrics": metric_names,
            "param": param_name,
            "rel": relative,
            "setx": fixed_values,
            "sd": sd_plt_dict,
            "is_log": is_log_dict,
        }
        config = AxPlotConfig(config, plot_type=AxPlotTypes.GENERIC).data

        arm_data = config["arm_data"]
        arm_name_to_parameters = config["arm_name_to_parameters"]
        f = config["f"]
        fit_data = config["fit_data"]
        grid = config["grid"]
        metrics = config["metrics"]
        param = config["param"]
        rel = config["rel"]
        setx = config["setx"]
        sd = config["sd"]
        is_log = config["is_log"]

        # layout
        xrange = axis_range(grid, is_log[metrics[0]])
        xtype = "log" if is_log_dict[metrics[0]] else "linear"

        for i, metric in enumerate(metrics):
            cur_visible = i == 0
            metric = metrics[i]
            traces = slice_config_to_trace(
                arm_data[metric],
                arm_name_to_parameters[metric],
                f[metric],
                fit_data[metric],
                grid,
                metric,
                param,
                rel,
                setx,
                sd[metric],
                is_log[metric],
                cur_visible,
            )
            pbutton_data_args["x"] += [trace["x"] for trace in traces]
            pbutton_data_args["y"] += [trace["y"] for trace in traces]
            pbutton_data_args["error_y"] += [{
                "type": "data",
                "array": trace["error_y"]["array"],
                "visible": True,
                "color": "black",
            } if "error_y" in trace and "array" in trace["error_y"] else []
                                             for trace in traces]
            if first_param_bool:
                init_traces.extend(traces)
        pbutton_args = [
            pbutton_data_args,
            {
                "xaxis.title": param_name,
                "xaxis.range": xrange,
                "xaxis.type": xtype,
            },
        ]

        pbuttons.append({
            "args": pbutton_args,
            "label": param_name,
            "method": "update"
        })
        if first_param_bool:
            xaxis_init_format = {
                "anchor": "y",
                "autorange": False,
                "exponentformat": "e",
                "range": xrange,
                "tickfont": {
                    "size": 11
                },
                "tickmode": "auto",
                "title": param_name,
                "type": xtype,
            }
            first_param_bool = False

    # Populate mbuttons, which allows the user to select which metric to plot
    mbuttons = []
    for i, metric in enumerate(metrics):
        trace_cnt = 3 + len(arm_data[metric]["out_of_sample"].keys())
        visible = [False] * (len(metrics) * trace_cnt)
        for j in range(i * trace_cnt, (i + 1) * trace_cnt):
            visible[j] = True
        mbuttons.append({
            "method": "update",
            "args": [{
                "visible": visible
            }, {
                "yaxis.title": metric
            }],
            "label": metric,
        })

    layout = {
        "title":
        "Predictions for a 1-d slice of the parameter space",
        "annotations": [
            {
                "showarrow": False,
                "text": "Choose metric:",
                "x": 0.225,
                "xanchor": "right",
                "xref": "paper",
                "y": -0.455,
                "yanchor": "bottom",
                "yref": "paper",
            },
            {
                "showarrow": False,
                "text": "Choose parameter:",
                "x": 0.225,
                "xanchor": "right",
                "xref": "paper",
                "y": -0.305,
                "yanchor": "bottom",
                "yref": "paper",
            },
        ],
        "updatemenus": [
            {
                "y": -0.35,
                "x": 0.25,
                "xanchor": "left",
                "yanchor": "top",
                "buttons": mbuttons,
                "direction": "up",
            },
            {
                "y": -0.2,
                "x": 0.25,
                "xanchor": "left",
                "yanchor": "top",
                "buttons": pbuttons,
                "direction": "up",
            },
        ],
        "hovermode":
        "closest",
        "xaxis":
        xaxis_init_format,
        "yaxis": {
            "anchor": "x",
            "autorange": True,
            "tickfont": {
                "size": 11
            },
            "tickmode": "auto",
            "title": metrics[0],
        },
    }

    return go.Figure(data=init_traces, layout=layout)
Exemple #15
0
def interact_slice(
    model: ModelBridge,
    param_name: str,
    metric_name: str = "",
    generator_runs_dict: TNullableGeneratorRunsDict = None,
    relative: bool = False,
    density: int = 50,
    slice_values: Optional[Dict[str, Any]] = None,
    fixed_features: Optional[ObservationFeatures] = None,
) -> AxPlotConfig:
    """Create interactive plot with predictions for a 1-d slice of the parameter
    space.

    Args:
        model: ModelBridge that contains model for predictions
        param_name: Name of parameter that will be sliced
        metric_name: Name of metric to plot
        generator_runs_dict: A dictionary {name: generator run} of generator runs
            whose arms will be plotted, if they lie in the slice.
        relative: Predictions relative to status quo
        density: Number of points along slice to evaluate predictions.
        slice_values: A dictionary {name: val} for the fixed values of the
            other parameters. If not provided, then the status quo values will
            be used if there is a status quo, otherwise the mean of numeric
            parameters or the mode of choice parameters. Ignored if
            fixed_features is specified.
        fixed_features: An ObservationFeatures object containing the values of
            features (including non-parameter features like context) to be set
            in the slice.
    """
    if generator_runs_dict is None:
        generator_runs_dict = {}

    metric_names = list(model.metric_names)

    parameter = get_range_parameter(model, param_name)
    grid = get_grid_for_parameter(parameter, density)

    plot_data_dict = {}
    raw_data_dict = {}
    sd_plt_dict: Dict[str, Dict[str, np.ndarray]] = {}

    cond_name_to_parameters_dict = {}
    is_log_dict: Dict[str, bool] = {}

    if fixed_features is not None:
        slice_values = fixed_features.parameters
    else:
        fixed_features = ObservationFeatures(parameters={})
    fixed_values = get_fixed_values(model, slice_values)

    prediction_features = []
    for x in grid:
        predf = deepcopy(fixed_features)
        predf.parameters = fixed_values.copy()
        predf.parameters[param_name] = x
        prediction_features.append(predf)

    f, cov = model.predict(prediction_features)

    for metric_name in metric_names:
        pd, cntp, f_plt, rd, _, _, _, _, _, sd_plt, ls = _get_slice_predictions(
            model=model,
            param_name=param_name,
            metric_name=metric_name,
            generator_runs_dict=generator_runs_dict,
            relative=relative,
            density=density,
            slice_values=slice_values,
            fixed_features=fixed_features,
        )

        plot_data_dict[metric_name] = pd
        raw_data_dict[metric_name] = rd
        cond_name_to_parameters_dict[metric_name] = cntp

        sd_plt_dict[metric_name] = np.sqrt(cov[metric_name][metric_name])
        is_log_dict[metric_name] = ls

    config = {
        "arm_data": plot_data_dict,
        "arm_name_to_parameters": cond_name_to_parameters_dict,
        "f": f,
        "fit_data": raw_data_dict,
        "grid": grid,
        "metrics": metric_names,
        "param": param_name,
        "rel": relative,
        "setx": fixed_values,
        "sd": sd_plt_dict,
        "is_log": is_log_dict,
    }
    return AxPlotConfig(config, plot_type=AxPlotTypes.INTERACT_SLICE)
Exemple #16
0
 def testSetStatusQuoMultipleObs(self, mock_fit, mock_observations_from_data):
     modelbridge = ModelBridge(
         search_space_for_value(), 0, [], get_experiment(), 0, status_quo_name="1_1"
     )
     # SQ not set if multiple feature sets for SQ arm.
     self.assertIsNone(modelbridge.status_quo)
Exemple #17
0
def plot_slice(
    model: ModelBridge,
    param_name: str,
    metric_name: str,
    generator_runs_dict: TNullableGeneratorRunsDict = None,
    relative: bool = False,
    density: int = 50,
    slice_values: Optional[Dict[str, Any]] = None,
) -> AxPlotConfig:
    """Plot predictions for a 1-d slice of the parameter space.

    Args:
        model: ModelBridge that contains model for predictions
        param_name: Name of parameter that will be sliced
        metric_name: Name of metric to plot
        generator_runs_dict: A dictionary {name: generator run} of generator runs
            whose arms will be plotted, if they lie in the slice.
        relative: Predictions relative to status quo
        density: Number of points along slice to evaluate predictions.
        slice_values: A dictionary {name: val} for the fixed values of the
            other parameters. If not provided, then the status quo values will
            be used if there is a status quo, otherwise the mean of numeric
            parameters or the mode of choice parameters.
    """
    if generator_runs_dict is None:
        generator_runs_dict = {}

    parameter = get_range_parameter(model, param_name)
    grid = get_grid_for_parameter(parameter, density)

    plot_data, raw_data, cond_name_to_parameters = get_plot_data(
        model=model, generator_runs_dict=generator_runs_dict, metric_names={metric_name}
    )

    fixed_values = get_fixed_values(model, slice_values)

    prediction_features = []
    for x in grid:
        parameters = fixed_values.copy()
        parameters[param_name] = x
        # Here we assume context is None
        prediction_features.append(ObservationFeatures(parameters=parameters))

    f, cov = model.predict(prediction_features)

    f_plt = f[metric_name]
    sd_plt = np.sqrt(cov[metric_name][metric_name])

    config = {
        "arm_data": plot_data,
        "arm_name_to_parameters": cond_name_to_parameters,
        "f": f_plt,
        "fit_data": raw_data,
        "grid": grid,
        "metric": metric_name,
        "param": param_name,
        "rel": relative,
        "setx": fixed_values,
        "sd": sd_plt,
        "is_log": parameter.log_scale,
    }
    return AxPlotConfig(config, plot_type=AxPlotTypes.SLICE)
Exemple #18
0
    def testDerelativizeTransform(self, mock_predict, mock_fit,
                                  mock_observations_from_data):
        t = Derelativize(search_space=None,
                         observation_features=None,
                         observation_data=None)

        # ModelBridge with in-design status quo
        search_space = SearchSpace(parameters=[
            RangeParameter("x", ParameterType.FLOAT, 0, 20),
            RangeParameter("y", ParameterType.FLOAT, 0, 20),
        ])
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_1",
        )

        # Test with no relative constraints
        objective = Objective(Metric("c"))
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False)
            ],
        )
        oc2 = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc == oc2)

        # Test with relative constraint, in-design status quo
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        oc = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc.outcome_constraints == [
            OutcomeConstraint(
                Metric("a"), ComparisonOp.LEQ, bound=2, relative=False),
            OutcomeConstraint(
                Metric("b"), ComparisonOp.LEQ, bound=4.5, relative=False),
        ])
        obsf = mock_predict.mock_calls[0][1][1][0]
        obsf2 = ObservationFeatures(parameters={"x": 2.0, "y": 10.0})
        self.assertTrue(obsf == obsf2)

        # Test with relative constraint, out-of-design status quo
        mock_predict.side_effect = Exception()
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_2",
        )
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        oc = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc.outcome_constraints == [
            OutcomeConstraint(
                Metric("a"), ComparisonOp.LEQ, bound=2, relative=False),
            OutcomeConstraint(
                Metric("b"), ComparisonOp.LEQ, bound=3.6, relative=False),
        ])
        self.assertEqual(mock_predict.call_count, 2)

        # Raises error if predict fails with in-design status quo
        g = ModelBridge(search_space, None, [], status_quo_name="1_1")
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        with self.assertRaises(Exception):
            oc = t.transform_optimization_config(oc, g, None)

        # Raises error with relative constraint, no status quo
        exp = Experiment(search_space, "name")
        g = ModelBridge(search_space, None, [], exp)
        with self.assertRaises(ValueError):
            oc = t.transform_optimization_config(oc, g, None)

        # Raises error with relative constraint, no modelbridge
        with self.assertRaises(ValueError):
            oc = t.transform_optimization_config(oc, None, None)
Exemple #19
0
def interact_slice(
    model: ModelBridge,
    param_name: str,
    metric_name: str = "",
    generator_runs_dict: TNullableGeneratorRunsDict = None,
    relative: bool = False,
    density: int = 50,
    slice_values: Optional[Dict[str, Any]] = None,
    fixed_features: Optional[ObservationFeatures] = None,
) -> AxPlotConfig:
    """Create interactive plot with predictions for a 1-d slice of the parameter
    space.

    Args:
        model: ModelBridge that contains model for predictions
        param_name: Name of parameter that will be sliced
        metric_name: Name of metric to plot
        generator_runs_dict: A dictionary {name: generator run} of generator runs
            whose arms will be plotted, if they lie in the slice.
        relative: Predictions relative to status quo
        density: Number of points along slice to evaluate predictions.
        slice_values: A dictionary {name: val} for the fixed values of the
            other parameters. If not provided, then the status quo values will
            be used if there is a status quo, otherwise the mean of numeric
            parameters or the mode of choice parameters. Ignored if
            fixed_features is specified.
        fixed_features: An ObservationFeatures object containing the values of
            features (including non-parameter features like context) to be set
            in the slice.
    """
    if generator_runs_dict is None:
        generator_runs_dict = {}

    metric_names = list(model.metric_names)

    parameter = get_range_parameter(model, param_name)
    grid = get_grid_for_parameter(parameter, density)

    plot_data_dict = {}
    raw_data_dict = {}
    sd_plt_dict: Dict[str, Dict[str, np.ndarray]] = {}

    cond_name_to_parameters_dict = {}
    is_log_dict: Dict[str, bool] = {}

    if fixed_features is not None:
        slice_values = fixed_features.parameters
    else:
        fixed_features = ObservationFeatures(parameters={})
    fixed_values = get_fixed_values(model, slice_values)

    prediction_features = []
    for x in grid:
        predf = deepcopy(fixed_features)
        predf.parameters = fixed_values.copy()
        predf.parameters[param_name] = x
        prediction_features.append(predf)

    f, cov = model.predict(prediction_features)

    for metric_name in metric_names:
        pd, cntp, f_plt, rd, _, _, _, _, _, sd_plt, ls = _get_slice_predictions(
            model=model,
            param_name=param_name,
            metric_name=metric_name,
            generator_runs_dict=generator_runs_dict,
            relative=relative,
            density=density,
            slice_values=slice_values,
            fixed_features=fixed_features,
        )

        plot_data_dict[metric_name] = pd
        raw_data_dict[metric_name] = rd
        cond_name_to_parameters_dict[metric_name] = cntp

        sd_plt_dict[metric_name] = np.sqrt(cov[metric_name][metric_name])
        is_log_dict[metric_name] = ls

    config = {
        "arm_data": plot_data_dict,
        "arm_name_to_parameters": cond_name_to_parameters_dict,
        "f": f,
        "fit_data": raw_data_dict,
        "grid": grid,
        "metrics": metric_names,
        "param": param_name,
        "rel": relative,
        "setx": fixed_values,
        "sd": sd_plt_dict,
        "is_log": is_log_dict,
    }
    config = AxPlotConfig(config, plot_type=AxPlotTypes.GENERIC).data

    arm_data = config["arm_data"]
    arm_name_to_parameters = config["arm_name_to_parameters"]
    f = config["f"]
    fit_data = config["fit_data"]
    grid = config["grid"]
    metrics = config["metrics"]
    param = config["param"]
    rel = config["rel"]
    setx = config["setx"]
    sd = config["sd"]
    is_log = config["is_log"]

    traces = []

    for i, metric in enumerate(metrics):
        cur_visible = i == 0
        metric = metrics[i]
        traces.extend(
            slice_config_to_trace(
                arm_data[metric],
                arm_name_to_parameters[metric],
                f[metric],
                fit_data[metric],
                grid,
                metric,
                param,
                rel,
                setx,
                sd[metric],
                is_log[metric],
                cur_visible,
            )
        )

    # layout
    xrange = axis_range(grid, is_log[metrics[0]])
    xtype = "log" if is_log[metrics[0]] else "linear"

    buttons = []
    for i, metric in enumerate(metrics):
        trace_cnt = 3 + len(arm_data[metric]["out_of_sample"].keys()) * 2
        visible = [False] * (len(metrics) * trace_cnt)
        for j in range(i * trace_cnt, (i + 1) * trace_cnt):
            visible[j] = True
        buttons.append(
            {
                "method": "update",
                "args": [{"visible": visible}, {"yaxis.title": metric}],
                "label": metric,
            }
        )

    layout = {
        "title": "Predictions for a 1-d slice of the parameter space",
        "annotations": [
            {
                "showarrow": False,
                "text": "Choose metric:",
                "x": 0.225,
                "xanchor": "center",
                "xref": "paper",
                "y": 1.005,
                "yanchor": "bottom",
                "yref": "paper",
            }
        ],
        "updatemenus": [{"y": 1.1, "x": 0.5, "yanchor": "top", "buttons": buttons}],
        "hovermode": "closest",
        "xaxis": {
            "anchor": "y",
            "autorange": False,
            "exponentformat": "e",
            "range": xrange,
            "tickfont": {"size": 11},
            "tickmode": "auto",
            "title": param,
            "type": xtype,
        },
        "yaxis": {
            "anchor": "x",
            "autorange": True,
            "tickfont": {"size": 11},
            "tickmode": "auto",
            "title": metrics[0],
        },
    }

    fig = go.Figure(data=traces, layout=layout)
    return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
Exemple #20
0
def _get_in_sample_arms(
    model: ModelBridge, metric_names: Set[str]
) -> Tuple[Dict[str, PlotInSampleArm], RawData, Dict[str, TParameterization]]:
    """Get in-sample arms from a model with observed and predicted values
    for specified metrics.

    Returns a PlotInSampleArm object in which repeated observations are merged
    with IVW, and a RawData object in which every observation is listed.

    Args:
        model: An instance of the model bridge.
        metric_names: Restrict predictions to these metrics. If None, uses all
            metrics in the model.

    Returns:
        A tuple containing

        - Map from arm name to PlotInSampleArm.
        - List of the data for each observation like::

            {'metric_name': 'likes', 'arm_name': '0_0', 'mean': 1., 'sem': 0.1}

        - Map from arm name to parameters
    """
    observations = model.get_training_data()
    # Calculate raw data
    raw_data = []
    cond_name_to_parameters = {}
    for obs in observations:
        cond_name_to_parameters[obs.arm_name] = obs.features.parameters
        for j, metric_name in enumerate(obs.data.metric_names):
            if metric_name in metric_names:
                raw_data.append({
                    "metric_name": metric_name,
                    "arm_name": obs.arm_name,
                    "mean": obs.data.means[j],
                    "sem": np.sqrt(obs.data.covariance[j, j]),
                })
    # Check that we have one ObservationFeatures per arm name since we
    # key by arm name.
    if len(cond_name_to_parameters) != len(observations):
        logger.error(
            "Have observations of arms with different features but same"
            " name. Arbitrary one will be plotted.")
    # Merge multiple measurements within each Observation with IVW to get
    # un-modeled prediction
    t = IVW(None, [], [])
    obs_data = t.transform_observation_data([obs.data for obs in observations],
                                            [])
    # Start filling in plot data
    in_sample_plot: Dict[str, PlotInSampleArm] = {}
    for i, obs in enumerate(observations):
        if obs.arm_name is None:
            raise ValueError("Observation must have arm name for plotting.")

        # Extract raw measurement
        obs_y = {}
        obs_se = {}
        # Use the IVW data, not obs.data
        for j, metric_name in enumerate(obs_data[i].metric_names):
            if metric_name in metric_names:
                obs_y[metric_name] = obs_data[i].means[j]
                obs_se[metric_name] = np.sqrt(obs_data[i].covariance[j, j])
        # Make a prediction.
        if model.training_in_design[i]:
            pred_y, pred_se = _predict_at_point(model, obs.features,
                                                metric_names)
        else:
            # Use raw data for out-of-design points
            pred_y = obs_y
            pred_se = obs_se
        in_sample_plot[obs.arm_name] = PlotInSampleArm(
            name=obs.arm_name,
            y=obs_y,
            se=obs_se,
            parameters=obs.features.parameters,
            y_hat=pred_y,
            se_hat=pred_se,
            context_stratum=None,
        )
    return in_sample_plot, raw_data, cond_name_to_parameters
Exemple #21
0
 def test_update(self, _mock_update, _mock_gen):
     exp = get_experiment_for_value()
     exp.optimization_config = get_optimization_config_no_constraints()
     ss = get_search_space_for_range_values()
     exp.search_space = ss
     modelbridge = ModelBridge(ss, None, [Log], exp)
     exp.new_trial(generator_run=modelbridge.gen(1))
     modelbridge._set_training_data(
         observations_from_data(
             data=Data(
                 pd.DataFrame([{
                     "arm_name": "0_0",
                     "metric_name": "m1",
                     "mean": 3.0,
                     "sem": 1.0,
                 }])),
             experiment=exp,
         ),
         ss,
     )
     exp.new_trial(generator_run=modelbridge.gen(1))
     modelbridge.update(
         data=Data(
             pd.DataFrame([{
                 "arm_name": "1_0",
                 "metric_name": "m1",
                 "mean": 5.0,
                 "sem": 0.0
             }])),
         experiment=exp,
     )
     exp.new_trial(generator_run=modelbridge.gen(1))
     # Trying to update with unrecognised metric should error.
     with self.assertRaisesRegex(ValueError, "Unrecognised metric"):
         modelbridge.update(
             data=Data(
                 pd.DataFrame([{
                     "arm_name": "1_0",
                     "metric_name": "m2",
                     "mean": 5.0,
                     "sem": 0.0,
                 }])),
             experiment=exp,
         )
Exemple #22
0
    def testModelBridge(self, mock_fit, mock_gen_arms,
                        mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [transform_1, transform_2]
        exp = get_experiment_for_value()
        ss = get_search_space_for_value()
        modelbridge = ModelBridge(ss, 0, transforms, exp, 0)
        self.assertEqual(list(modelbridge.transforms.keys()),
                         ["transform_1", "transform_2"])
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(
            fit_args["search_space"] == get_search_space_for_value(8.0))
        self.assertTrue(fit_args["observation_features"] == [])
        self.assertTrue(fit_args["observation_data"] == [])
        self.assertTrue(mock_observations_from_data.called)

        # Test prediction on out of design features.
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            side_effect=ValueError("Out of Design"),
        )
        # This point is in design, and thus failures in predict are legitimate.
        with mock.patch.object(ModelBridge,
                               "model_space",
                               return_value=get_search_space_for_range_values):
            with self.assertRaises(ValueError):
                modelbridge.predict([get_observation2().features])

        # This point is out of design, and not in training data.
        with self.assertRaises(ValueError):
            modelbridge.predict([get_observation_status_quo0().features])

        # Now it's in the training data.
        with mock.patch.object(
                ModelBridge,
                "get_training_data",
                return_value=[get_observation_status_quo0()],
        ):
            # Return raw training value.
            self.assertEqual(
                modelbridge.predict([get_observation_status_quo0().features]),
                unwrap_observation_data([get_observation_status_quo0().data]),
            )

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[get_observation2trans().data],
        )
        modelbridge.predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Check that _single_predict is equivalent here.
        modelbridge._single_predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([get_observation1trans().features], [2], None, {}),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(
            name="test_metric")))
        modelbridge._set_kwargs_to_save(model_key="TestModel",
                                        model_kwargs={},
                                        bridge_kwargs={})
        gr = modelbridge.gen(
            n=1,
            search_space=get_search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [get_observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        self.assertEqual(gr._model_key, "TestModel")
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [get_observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={},
            observation_features=[get_observation1().features])

        # Gen with no pending observations and no fixed features
        modelbridge.gen(n=1,
                        search_space=get_search_space_for_value(),
                        optimization_config=None)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="test_metric"),
                     Metric(name="test_metric_2")]))
        modelbridge.gen(n=1,
                        search_space=get_search_space_for_value(),
                        optimization_config=oc2)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[get_observation1trans().data],
        )
        cv_training_data = [get_observation2()]
        cv_test_points = [get_observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points)
        modelbridge._cross_validate.assert_called_with(
            obs_feats=[get_observation2trans().features],
            obs_data=[get_observation2trans().data],
            cv_test_points=[get_observation1().features
                            ],  # untransformed after
        )
        self.assertTrue(cv_predictions == [get_observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [get_observation1(), get_observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(
            modelbridge.model_space == get_search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [False, False])

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        # Test feature_importances
        with self.assertRaises(NotImplementedError):
            modelbridge.feature_importances("a")
Exemple #23
0
    def testModelBridge(self, mock_fit, mock_gen_arms, mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [t1, t2]
        exp = get_experiment()
        modelbridge = ModelBridge(search_space_for_value(), 0, transforms, exp, 0)
        self.assertEqual(list(modelbridge.transforms.keys()), ["t1", "t2"])
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(fit_args["search_space"] == search_space_for_value(8.0))
        self.assertTrue(
            fit_args["observation_features"]
            == [observation1trans().features, observation2trans().features]
        )
        self.assertTrue(
            fit_args["observation_data"]
            == [observation1trans().data, observation2trans().data]
        )
        self.assertTrue(mock_observations_from_data.called)

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[observation2trans().data],
        )

        modelbridge.predict([observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([observation1trans().features], [2], None),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(name="test_metric")))
        modelbridge._set_kwargs_to_save(
            model_key="TestModel", model_kwargs={}, bridge_kwargs={}
        )
        gr = modelbridge.gen(
            n=1,
            search_space=search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        self.assertEqual(gr._model_key, "TestModel")
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={}, observation_features=[observation1().features]
        )

        # Gen with no pending observations and no fixed features
        modelbridge.gen(
            n=1, search_space=search_space_for_value(), optimization_config=None
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(
            objective=ScalarizedObjective(
                metrics=[Metric(name="test_metric"), Metric(name="test_metric_2")]
            )
        )
        modelbridge.gen(
            n=1, search_space=search_space_for_value(), optimization_config=oc2
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[observation1trans().data],
        )
        cv_training_data = [observation2()]
        cv_test_points = [observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points
        )
        modelbridge._cross_validate.assert_called_with(
            obs_feats=[observation2trans().features],
            obs_data=[observation2trans().data],
            cv_test_points=[observation1().features],  # untransformed after
        )
        self.assertTrue(cv_predictions == [observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [observation1(), observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(modelbridge.model_space == search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [True, True])

        modelbridge.training_in_design = [True, False]
        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        ood_obs = modelbridge.out_of_design_data()
        self.assertTrue(ood_obs == unwrap_observation_data([observation2().data]))