def _predict_at_point( model: ModelBridge, obsf: ObservationFeatures, metric_names: Set[str]) -> Tuple[Dict[str, float], Dict[str, float]]: """Make a prediction at a point. Returns mean and standard deviation in format expected by plotting. Args: model: ModelBridge obsf: ObservationFeatures for which to predict metric_names: Limit predictions to these metrics. Returns: A tuple containing - Map from metric name to prediction. - Map from metric name to standard error. """ y_hat = {} se_hat = {} f_pred, cov_pred = model.predict([obsf]) for metric_name in f_pred: if metric_name in metric_names: y_hat[metric_name] = f_pred[metric_name][0] se_hat[metric_name] = np.sqrt( cov_pred[metric_name][metric_name][0]) return y_hat, se_hat
def _get_contour_predictions( model: ModelBridge, x_param_name: str, y_param_name: str, metric: str, generator_runs_dict: TNullableGeneratorRunsDict, density: int, slice_values: Optional[Dict[str, Any]] = None, fixed_features: Optional[ObservationFeatures] = None, ) -> ContourPredictions: """ slice_values is a dictionary {param_name: value} for the parameters that are being sliced on. """ x_param = get_range_parameter(model, x_param_name) y_param = get_range_parameter(model, y_param_name) plot_data, _, _ = get_plot_data(model, generator_runs_dict or {}, {metric}, fixed_features=fixed_features) grid_x = get_grid_for_parameter(x_param, density) grid_y = get_grid_for_parameter(y_param, density) scales = {"x": x_param.log_scale, "y": y_param.log_scale} grid2_x, grid2_y = np.meshgrid(grid_x, grid_y) grid2_x = grid2_x.flatten() grid2_y = grid2_y.flatten() if fixed_features is not None: slice_values = fixed_features.parameters else: fixed_features = ObservationFeatures(parameters={}) fixed_values = get_fixed_values(model, slice_values) param_grid_obsf = [] for i in range(density**2): predf = deepcopy(fixed_features) predf.parameters = fixed_values.copy() predf.parameters[x_param_name] = grid2_x[i] predf.parameters[y_param_name] = grid2_y[i] param_grid_obsf.append(predf) mu, cov = model.predict(param_grid_obsf) f_plt = mu[metric] sd_plt = np.sqrt(cov[metric][metric]) # pyre-fixme[7]: Expected `Tuple[PlotData, np.ndarray, np.ndarray, np.ndarray, # np.ndarray, Dict[str, bool]]` but got `Tuple[PlotData, typing.List[float], # typing.Any, np.ndarray, np.ndarray, Dict[str, bool]]`. return plot_data, f_plt, sd_plt, grid_x, grid_y, scales
def _get_contour_predictions( model: ModelBridge, x_param_name: str, y_param_name: str, metric: str, generator_runs_dict: TNullableGeneratorRunsDict, density: int, slice_values: Optional[Dict[str, Any]] = None, ) -> ContourPredictions: """ slice_values is a dictionary {param_name: value} for the parameters that are being sliced on. """ x_param = get_range_parameter(model, x_param_name) y_param = get_range_parameter(model, y_param_name) plot_data, _, _ = get_plot_data(model, generator_runs_dict or {}, {metric}) grid_x = get_grid_for_parameter(x_param, density) grid_y = get_grid_for_parameter(y_param, density) scales = {"x": x_param.log_scale, "y": y_param.log_scale} grid2_x, grid2_y = np.meshgrid(grid_x, grid_y) grid2_x = grid2_x.flatten() grid2_y = grid2_y.flatten() fixed_values = get_fixed_values(model, slice_values) param_grid_obsf = [] for i in range(density ** 2): parameters = fixed_values.copy() parameters[x_param_name] = grid2_x[i] parameters[y_param_name] = grid2_y[i] param_grid_obsf.append(ObservationFeatures(parameters)) mu, cov = model.predict(param_grid_obsf) f_plt = mu[metric] sd_plt = np.sqrt(cov[metric][metric]) return plot_data, f_plt, sd_plt, grid_x, grid_y, scales
def testModelBridge(self, mock_fit, mock_gen_arms, mock_observations_from_data): # Test that on init transforms are stored and applied in the correct order transforms = [transform_1, transform_2] exp = get_experiment_for_value() ss = get_search_space_for_value() modelbridge = ModelBridge(ss, 0, transforms, exp, 0) self.assertEqual(list(modelbridge.transforms.keys()), ["transform_1", "transform_2"]) fit_args = mock_fit.mock_calls[0][2] self.assertTrue( fit_args["search_space"] == get_search_space_for_value(8.0)) self.assertTrue(fit_args["observation_features"] == []) self.assertTrue(fit_args["observation_data"] == []) self.assertTrue(mock_observations_from_data.called) # Test prediction on out of design features. modelbridge._predict = mock.MagicMock( "ax.modelbridge.base.ModelBridge._predict", autospec=True, side_effect=ValueError("Out of Design"), ) # This point is in design, and thus failures in predict are legitimate. with mock.patch.object(ModelBridge, "model_space", return_value=get_search_space_for_range_values): with self.assertRaises(ValueError): modelbridge.predict([get_observation2().features]) # This point is out of design, and not in training data. with self.assertRaises(ValueError): modelbridge.predict([get_observation_status_quo0().features]) # Now it's in the training data. with mock.patch.object( ModelBridge, "get_training_data", return_value=[get_observation_status_quo0()], ): # Return raw training value. self.assertEqual( modelbridge.predict([get_observation_status_quo0().features]), unwrap_observation_data([get_observation_status_quo0().data]), ) # Test that transforms are applied correctly on predict modelbridge._predict = mock.MagicMock( "ax.modelbridge.base.ModelBridge._predict", autospec=True, return_value=[get_observation2trans().data], ) modelbridge.predict([get_observation2().features]) # Observation features sent to _predict are un-transformed afterwards modelbridge._predict.assert_called_with([get_observation2().features]) # Check that _single_predict is equivalent here. modelbridge._single_predict([get_observation2().features]) # Observation features sent to _predict are un-transformed afterwards modelbridge._predict.assert_called_with([get_observation2().features]) # Test transforms applied on gen modelbridge._gen = mock.MagicMock( "ax.modelbridge.base.ModelBridge._gen", autospec=True, return_value=([get_observation1trans().features], [2], None, {}), ) oc = OptimizationConfig(objective=Objective(metric=Metric( name="test_metric"))) modelbridge._set_kwargs_to_save(model_key="TestModel", model_kwargs={}, bridge_kwargs={}) gr = modelbridge.gen( n=1, search_space=get_search_space_for_value(), optimization_config=oc, pending_observations={"a": [get_observation2().features]}, fixed_features=ObservationFeatures({"x": 5}), ) self.assertEqual(gr._model_key, "TestModel") modelbridge._gen.assert_called_with( n=1, search_space=SearchSpace( [FixedParameter("x", ParameterType.FLOAT, 8.0)]), optimization_config=oc, pending_observations={"a": [get_observation2trans().features]}, fixed_features=ObservationFeatures({"x": 36}), model_gen_options=None, ) mock_gen_arms.assert_called_with( arms_by_signature={}, observation_features=[get_observation1().features]) # Gen with no pending observations and no fixed features modelbridge.gen(n=1, search_space=get_search_space_for_value(), optimization_config=None) modelbridge._gen.assert_called_with( n=1, search_space=SearchSpace( [FixedParameter("x", ParameterType.FLOAT, 8.0)]), optimization_config=None, pending_observations={}, fixed_features=ObservationFeatures({}), model_gen_options=None, ) # Gen with multi-objective optimization config. oc2 = OptimizationConfig(objective=ScalarizedObjective( metrics=[Metric(name="test_metric"), Metric(name="test_metric_2")])) modelbridge.gen(n=1, search_space=get_search_space_for_value(), optimization_config=oc2) modelbridge._gen.assert_called_with( n=1, search_space=SearchSpace( [FixedParameter("x", ParameterType.FLOAT, 8.0)]), optimization_config=oc2, pending_observations={}, fixed_features=ObservationFeatures({}), model_gen_options=None, ) # Test transforms applied on cross_validate modelbridge._cross_validate = mock.MagicMock( "ax.modelbridge.base.ModelBridge._cross_validate", autospec=True, return_value=[get_observation1trans().data], ) cv_training_data = [get_observation2()] cv_test_points = [get_observation1().features] cv_predictions = modelbridge.cross_validate( cv_training_data=cv_training_data, cv_test_points=cv_test_points) modelbridge._cross_validate.assert_called_with( obs_feats=[get_observation2trans().features], obs_data=[get_observation2trans().data], cv_test_points=[get_observation1().features ], # untransformed after ) self.assertTrue(cv_predictions == [get_observation1().data]) # Test stored training data obs = modelbridge.get_training_data() self.assertTrue(obs == [get_observation1(), get_observation2()]) self.assertEqual(modelbridge.metric_names, {"a", "b"}) self.assertIsNone(modelbridge.status_quo) self.assertTrue( modelbridge.model_space == get_search_space_for_value()) self.assertEqual(modelbridge.training_in_design, [False, False]) with self.assertRaises(ValueError): modelbridge.training_in_design = [True, True, False] with self.assertRaises(ValueError): modelbridge.training_in_design = [True, True, False] # Test feature_importances with self.assertRaises(NotImplementedError): modelbridge.feature_importances("a")
def _get_slice_predictions( model: ModelBridge, param_name: str, metric_name: str, generator_runs_dict: TNullableGeneratorRunsDict = None, relative: bool = False, density: int = 50, slice_values: Optional[Dict[str, Any]] = None, fixed_features: Optional[ObservationFeatures] = None, trial_index: Optional[int] = None, ) -> SlicePredictions: """Computes slice prediction configuration values for a single metric name. Args: model: ModelBridge that contains model for predictions param_name: Name of parameter that will be sliced metric_name: Name of metric to plot generator_runs_dict: A dictionary {name: generator run} of generator runs whose arms will be plotted, if they lie in the slice. relative: Predictions relative to status quo density: Number of points along slice to evaluate predictions. slice_values: A dictionary {name: val} for the fixed values of the other parameters. If not provided, then the status quo values will be used if there is a status quo, otherwise the mean of numeric parameters or the mode of choice parameters. Ignored if fixed_features is specified. fixed_features: An ObservationFeatures object containing the values of features (including non-parameter features like context) to be set in the slice. Returns: Configruation values for AxPlotConfig. """ if generator_runs_dict is None: generator_runs_dict = {} parameter = get_range_parameter(model, param_name) grid = get_grid_for_parameter(parameter, density) plot_data, raw_data, cond_name_to_parameters = get_plot_data( model=model, generator_runs_dict=generator_runs_dict, metric_names={metric_name}, fixed_features=fixed_features, ) if fixed_features is not None: slice_values = fixed_features.parameters else: fixed_features = ObservationFeatures(parameters={}) fixed_values = get_fixed_values(model, slice_values, trial_index) prediction_features = [] for x in grid: predf = deepcopy(fixed_features) predf.parameters = fixed_values.copy() predf.parameters[param_name] = x prediction_features.append(predf) f, cov = model.predict(prediction_features) f_plt = f[metric_name] sd_plt = np.sqrt(cov[metric_name][metric_name]) # pyre-fixme[7]: Expected `Tuple[PlotData, List[Dict[str, Union[float, str]]], # List[float], np.ndarray, np.ndarray, str, str, bool, Dict[str, Union[None, bool, # float, int, str]], np.ndarray, bool]` but got `Tuple[PlotData, Dict[str, # Dict[str, Union[None, bool, float, int, str]]], List[float], List[Dict[str, # Union[float, str]]], np.ndarray, str, str, bool, Dict[str, Union[None, bool, # float, int, str]], typing.Any, bool]`. return ( plot_data, cond_name_to_parameters, f_plt, raw_data, grid, metric_name, param_name, relative, fixed_values, sd_plt, parameter.log_scale, )
def interact_slice_plotly( model: ModelBridge, generator_runs_dict: TNullableGeneratorRunsDict = None, relative: bool = False, density: int = 50, slice_values: Optional[Dict[str, Any]] = None, fixed_features: Optional[ObservationFeatures] = None, trial_index: Optional[int] = None, ) -> go.Figure: """Create interactive plot with predictions for a 1-d slice of the parameter space. Args: model: ModelBridge that contains model for predictions generator_runs_dict: A dictionary {name: generator run} of generator runs whose arms will be plotted, if they lie in the slice. relative: Predictions relative to status quo density: Number of points along slice to evaluate predictions. slice_values: A dictionary {name: val} for the fixed values of the other parameters. If not provided, then the status quo values will be used if there is a status quo, otherwise the mean of numeric parameters or the mode of choice parameters. Ignored if fixed_features is specified. fixed_features: An ObservationFeatures object containing the values of features (including non-parameter features like context) to be set in the slice. Returns: go.Figure: interactive plot of objective vs. parameter """ if generator_runs_dict is None: generator_runs_dict = {} metric_names = list(model.metric_names) # Populate `pbuttons`, which allows the user to select 1D slices of parameter # space with the chosen parameter on the x-axis. range_parameters = get_range_parameters(model) param_names = [parameter.name for parameter in range_parameters] pbuttons = [] init_traces = [] xaxis_init_format = {} first_param_bool = True should_replace_slice_values = fixed_features is not None for param_name in param_names: pbutton_data_args = {"x": [], "y": [], "error_y": []} parameter = get_range_parameter(model, param_name) grid = get_grid_for_parameter(parameter, density) plot_data_dict = {} raw_data_dict = {} sd_plt_dict: Dict[str, Dict[str, np.ndarray]] = {} cond_name_to_parameters_dict = {} is_log_dict: Dict[str, bool] = {} if should_replace_slice_values: slice_values = not_none(fixed_features).parameters else: fixed_features = ObservationFeatures(parameters={}) fixed_values = get_fixed_values(model, slice_values, trial_index) prediction_features = [] for x in grid: predf = deepcopy(not_none(fixed_features)) predf.parameters = fixed_values.copy() predf.parameters[param_name] = x prediction_features.append(predf) f, cov = model.predict(prediction_features) for metric_name in metric_names: pd, cntp, f_plt, rd, _, _, _, _, _, sd_plt, ls = _get_slice_predictions( model=model, param_name=param_name, metric_name=metric_name, generator_runs_dict=generator_runs_dict, relative=relative, density=density, slice_values=slice_values, fixed_features=fixed_features, ) plot_data_dict[metric_name] = pd raw_data_dict[metric_name] = rd cond_name_to_parameters_dict[metric_name] = cntp sd_plt_dict[metric_name] = np.sqrt(cov[metric_name][metric_name]) is_log_dict[metric_name] = ls config = { "arm_data": plot_data_dict, "arm_name_to_parameters": cond_name_to_parameters_dict, "f": f, "fit_data": raw_data_dict, "grid": grid, "metrics": metric_names, "param": param_name, "rel": relative, "setx": fixed_values, "sd": sd_plt_dict, "is_log": is_log_dict, } config = AxPlotConfig(config, plot_type=AxPlotTypes.GENERIC).data arm_data = config["arm_data"] arm_name_to_parameters = config["arm_name_to_parameters"] f = config["f"] fit_data = config["fit_data"] grid = config["grid"] metrics = config["metrics"] param = config["param"] rel = config["rel"] setx = config["setx"] sd = config["sd"] is_log = config["is_log"] # layout xrange = axis_range(grid, is_log[metrics[0]]) xtype = "log" if is_log_dict[metrics[0]] else "linear" for i, metric in enumerate(metrics): cur_visible = i == 0 metric = metrics[i] traces = slice_config_to_trace( arm_data[metric], arm_name_to_parameters[metric], f[metric], fit_data[metric], grid, metric, param, rel, setx, sd[metric], is_log[metric], cur_visible, ) pbutton_data_args["x"] += [trace["x"] for trace in traces] pbutton_data_args["y"] += [trace["y"] for trace in traces] pbutton_data_args["error_y"] += [{ "type": "data", "array": trace["error_y"]["array"], "visible": True, "color": "black", } if "error_y" in trace and "array" in trace["error_y"] else [] for trace in traces] if first_param_bool: init_traces.extend(traces) pbutton_args = [ pbutton_data_args, { "xaxis.title": param_name, "xaxis.range": xrange, "xaxis.type": xtype, }, ] pbuttons.append({ "args": pbutton_args, "label": param_name, "method": "update" }) if first_param_bool: xaxis_init_format = { "anchor": "y", "autorange": False, "exponentformat": "e", "range": xrange, "tickfont": { "size": 11 }, "tickmode": "auto", "title": param_name, "type": xtype, } first_param_bool = False # Populate mbuttons, which allows the user to select which metric to plot mbuttons = [] for i, metric in enumerate(metrics): trace_cnt = 3 + len(arm_data[metric]["out_of_sample"].keys()) visible = [False] * (len(metrics) * trace_cnt) for j in range(i * trace_cnt, (i + 1) * trace_cnt): visible[j] = True mbuttons.append({ "method": "update", "args": [{ "visible": visible }, { "yaxis.title": metric }], "label": metric, }) layout = { "title": "Predictions for a 1-d slice of the parameter space", "annotations": [ { "showarrow": False, "text": "Choose metric:", "x": 0.225, "xanchor": "right", "xref": "paper", "y": -0.455, "yanchor": "bottom", "yref": "paper", }, { "showarrow": False, "text": "Choose parameter:", "x": 0.225, "xanchor": "right", "xref": "paper", "y": -0.305, "yanchor": "bottom", "yref": "paper", }, ], "updatemenus": [ { "y": -0.35, "x": 0.25, "xanchor": "left", "yanchor": "top", "buttons": mbuttons, "direction": "up", }, { "y": -0.2, "x": 0.25, "xanchor": "left", "yanchor": "top", "buttons": pbuttons, "direction": "up", }, ], "hovermode": "closest", "xaxis": xaxis_init_format, "yaxis": { "anchor": "x", "autorange": True, "tickfont": { "size": 11 }, "tickmode": "auto", "title": metrics[0], }, } return go.Figure(data=init_traces, layout=layout)
def plot_slice( model: ModelBridge, param_name: str, metric_name: str, generator_runs_dict: TNullableGeneratorRunsDict = None, relative: bool = False, density: int = 50, slice_values: Optional[Dict[str, Any]] = None, ) -> AxPlotConfig: """Plot predictions for a 1-d slice of the parameter space. Args: model: ModelBridge that contains model for predictions param_name: Name of parameter that will be sliced metric_name: Name of metric to plot generator_runs_dict: A dictionary {name: generator run} of generator runs whose arms will be plotted, if they lie in the slice. relative: Predictions relative to status quo density: Number of points along slice to evaluate predictions. slice_values: A dictionary {name: val} for the fixed values of the other parameters. If not provided, then the status quo values will be used if there is a status quo, otherwise the mean of numeric parameters or the mode of choice parameters. """ if generator_runs_dict is None: generator_runs_dict = {} parameter = get_range_parameter(model, param_name) grid = get_grid_for_parameter(parameter, density) plot_data, raw_data, cond_name_to_parameters = get_plot_data( model=model, generator_runs_dict=generator_runs_dict, metric_names={metric_name} ) fixed_values = get_fixed_values(model, slice_values) prediction_features = [] for x in grid: parameters = fixed_values.copy() parameters[param_name] = x # Here we assume context is None prediction_features.append(ObservationFeatures(parameters=parameters)) f, cov = model.predict(prediction_features) f_plt = f[metric_name] sd_plt = np.sqrt(cov[metric_name][metric_name]) config = { "arm_data": plot_data, "arm_name_to_parameters": cond_name_to_parameters, "f": f_plt, "fit_data": raw_data, "grid": grid, "metric": metric_name, "param": param_name, "rel": relative, "setx": fixed_values, "sd": sd_plt, "is_log": parameter.log_scale, } return AxPlotConfig(config, plot_type=AxPlotTypes.SLICE)
def testModelBridge(self, mock_fit, mock_gen_arms, mock_observations_from_data): # Test that on init transforms are stored and applied in the correct order transforms = [t1, t2] exp = get_experiment() modelbridge = ModelBridge(search_space_for_value(), 0, transforms, exp, 0) self.assertEqual(list(modelbridge.transforms.keys()), ["t1", "t2"]) fit_args = mock_fit.mock_calls[0][2] self.assertTrue(fit_args["search_space"] == search_space_for_value(8.0)) self.assertTrue( fit_args["observation_features"] == [observation1trans().features, observation2trans().features] ) self.assertTrue( fit_args["observation_data"] == [observation1trans().data, observation2trans().data] ) self.assertTrue(mock_observations_from_data.called) # Test that transforms are applied correctly on predict modelbridge._predict = mock.MagicMock( "ax.modelbridge.base.ModelBridge._predict", autospec=True, return_value=[observation2trans().data], ) modelbridge.predict([observation2().features]) # Observation features sent to _predict are un-transformed afterwards modelbridge._predict.assert_called_with([observation2().features]) # Test transforms applied on gen modelbridge._gen = mock.MagicMock( "ax.modelbridge.base.ModelBridge._gen", autospec=True, return_value=([observation1trans().features], [2], None), ) oc = OptimizationConfig(objective=Objective(metric=Metric(name="test_metric"))) modelbridge._set_kwargs_to_save( model_key="TestModel", model_kwargs={}, bridge_kwargs={} ) gr = modelbridge.gen( n=1, search_space=search_space_for_value(), optimization_config=oc, pending_observations={"a": [observation2().features]}, fixed_features=ObservationFeatures({"x": 5}), ) self.assertEqual(gr._model_key, "TestModel") modelbridge._gen.assert_called_with( n=1, search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]), optimization_config=oc, pending_observations={"a": [observation2trans().features]}, fixed_features=ObservationFeatures({"x": 36}), model_gen_options=None, ) mock_gen_arms.assert_called_with( arms_by_signature={}, observation_features=[observation1().features] ) # Gen with no pending observations and no fixed features modelbridge.gen( n=1, search_space=search_space_for_value(), optimization_config=None ) modelbridge._gen.assert_called_with( n=1, search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]), optimization_config=None, pending_observations={}, fixed_features=ObservationFeatures({}), model_gen_options=None, ) # Gen with multi-objective optimization config. oc2 = OptimizationConfig( objective=ScalarizedObjective( metrics=[Metric(name="test_metric"), Metric(name="test_metric_2")] ) ) modelbridge.gen( n=1, search_space=search_space_for_value(), optimization_config=oc2 ) modelbridge._gen.assert_called_with( n=1, search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]), optimization_config=oc2, pending_observations={}, fixed_features=ObservationFeatures({}), model_gen_options=None, ) # Test transforms applied on cross_validate modelbridge._cross_validate = mock.MagicMock( "ax.modelbridge.base.ModelBridge._cross_validate", autospec=True, return_value=[observation1trans().data], ) cv_training_data = [observation2()] cv_test_points = [observation1().features] cv_predictions = modelbridge.cross_validate( cv_training_data=cv_training_data, cv_test_points=cv_test_points ) modelbridge._cross_validate.assert_called_with( obs_feats=[observation2trans().features], obs_data=[observation2trans().data], cv_test_points=[observation1().features], # untransformed after ) self.assertTrue(cv_predictions == [observation1().data]) # Test stored training data obs = modelbridge.get_training_data() self.assertTrue(obs == [observation1(), observation2()]) self.assertEqual(modelbridge.metric_names, {"a", "b"}) self.assertIsNone(modelbridge.status_quo) self.assertTrue(modelbridge.model_space == search_space_for_value()) self.assertEqual(modelbridge.training_in_design, [True, True]) modelbridge.training_in_design = [True, False] with self.assertRaises(ValueError): modelbridge.training_in_design = [True, True, False] ood_obs = modelbridge.out_of_design_data() self.assertTrue(ood_obs == unwrap_observation_data([observation2().data]))
def interact_slice( model: ModelBridge, param_name: str, metric_name: str = "", generator_runs_dict: TNullableGeneratorRunsDict = None, relative: bool = False, density: int = 50, slice_values: Optional[Dict[str, Any]] = None, fixed_features: Optional[ObservationFeatures] = None, ) -> AxPlotConfig: """Create interactive plot with predictions for a 1-d slice of the parameter space. Args: model: ModelBridge that contains model for predictions param_name: Name of parameter that will be sliced metric_name: Name of metric to plot generator_runs_dict: A dictionary {name: generator run} of generator runs whose arms will be plotted, if they lie in the slice. relative: Predictions relative to status quo density: Number of points along slice to evaluate predictions. slice_values: A dictionary {name: val} for the fixed values of the other parameters. If not provided, then the status quo values will be used if there is a status quo, otherwise the mean of numeric parameters or the mode of choice parameters. Ignored if fixed_features is specified. fixed_features: An ObservationFeatures object containing the values of features (including non-parameter features like context) to be set in the slice. """ if generator_runs_dict is None: generator_runs_dict = {} metric_names = list(model.metric_names) parameter = get_range_parameter(model, param_name) grid = get_grid_for_parameter(parameter, density) plot_data_dict = {} raw_data_dict = {} sd_plt_dict: Dict[str, Dict[str, np.ndarray]] = {} cond_name_to_parameters_dict = {} is_log_dict: Dict[str, bool] = {} if fixed_features is not None: slice_values = fixed_features.parameters else: fixed_features = ObservationFeatures(parameters={}) fixed_values = get_fixed_values(model, slice_values) prediction_features = [] for x in grid: predf = deepcopy(fixed_features) predf.parameters = fixed_values.copy() predf.parameters[param_name] = x prediction_features.append(predf) f, cov = model.predict(prediction_features) for metric_name in metric_names: pd, cntp, f_plt, rd, _, _, _, _, _, sd_plt, ls = _get_slice_predictions( model=model, param_name=param_name, metric_name=metric_name, generator_runs_dict=generator_runs_dict, relative=relative, density=density, slice_values=slice_values, fixed_features=fixed_features, ) plot_data_dict[metric_name] = pd raw_data_dict[metric_name] = rd cond_name_to_parameters_dict[metric_name] = cntp sd_plt_dict[metric_name] = np.sqrt(cov[metric_name][metric_name]) is_log_dict[metric_name] = ls config = { "arm_data": plot_data_dict, "arm_name_to_parameters": cond_name_to_parameters_dict, "f": f, "fit_data": raw_data_dict, "grid": grid, "metrics": metric_names, "param": param_name, "rel": relative, "setx": fixed_values, "sd": sd_plt_dict, "is_log": is_log_dict, } config = AxPlotConfig(config, plot_type=AxPlotTypes.GENERIC).data arm_data = config["arm_data"] arm_name_to_parameters = config["arm_name_to_parameters"] f = config["f"] fit_data = config["fit_data"] grid = config["grid"] metrics = config["metrics"] param = config["param"] rel = config["rel"] setx = config["setx"] sd = config["sd"] is_log = config["is_log"] traces = [] for i, metric in enumerate(metrics): cur_visible = i == 0 metric = metrics[i] traces.extend( slice_config_to_trace( arm_data[metric], arm_name_to_parameters[metric], f[metric], fit_data[metric], grid, metric, param, rel, setx, sd[metric], is_log[metric], cur_visible, ) ) # layout xrange = axis_range(grid, is_log[metrics[0]]) xtype = "log" if is_log[metrics[0]] else "linear" buttons = [] for i, metric in enumerate(metrics): trace_cnt = 3 + len(arm_data[metric]["out_of_sample"].keys()) * 2 visible = [False] * (len(metrics) * trace_cnt) for j in range(i * trace_cnt, (i + 1) * trace_cnt): visible[j] = True buttons.append( { "method": "update", "args": [{"visible": visible}, {"yaxis.title": metric}], "label": metric, } ) layout = { "title": "Predictions for a 1-d slice of the parameter space", "annotations": [ { "showarrow": False, "text": "Choose metric:", "x": 0.225, "xanchor": "center", "xref": "paper", "y": 1.005, "yanchor": "bottom", "yref": "paper", } ], "updatemenus": [{"y": 1.1, "x": 0.5, "yanchor": "top", "buttons": buttons}], "hovermode": "closest", "xaxis": { "anchor": "y", "autorange": False, "exponentformat": "e", "range": xrange, "tickfont": {"size": 11}, "tickmode": "auto", "title": param, "type": xtype, }, "yaxis": { "anchor": "x", "autorange": True, "tickfont": {"size": 11}, "tickmode": "auto", "title": metrics[0], }, } fig = go.Figure(data=traces, layout=layout) return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
def interact_slice( model: ModelBridge, param_name: str, metric_name: str = "", generator_runs_dict: TNullableGeneratorRunsDict = None, relative: bool = False, density: int = 50, slice_values: Optional[Dict[str, Any]] = None, fixed_features: Optional[ObservationFeatures] = None, ) -> AxPlotConfig: """Create interactive plot with predictions for a 1-d slice of the parameter space. Args: model: ModelBridge that contains model for predictions param_name: Name of parameter that will be sliced metric_name: Name of metric to plot generator_runs_dict: A dictionary {name: generator run} of generator runs whose arms will be plotted, if they lie in the slice. relative: Predictions relative to status quo density: Number of points along slice to evaluate predictions. slice_values: A dictionary {name: val} for the fixed values of the other parameters. If not provided, then the status quo values will be used if there is a status quo, otherwise the mean of numeric parameters or the mode of choice parameters. Ignored if fixed_features is specified. fixed_features: An ObservationFeatures object containing the values of features (including non-parameter features like context) to be set in the slice. """ if generator_runs_dict is None: generator_runs_dict = {} metric_names = list(model.metric_names) parameter = get_range_parameter(model, param_name) grid = get_grid_for_parameter(parameter, density) plot_data_dict = {} raw_data_dict = {} sd_plt_dict: Dict[str, Dict[str, np.ndarray]] = {} cond_name_to_parameters_dict = {} is_log_dict: Dict[str, bool] = {} if fixed_features is not None: slice_values = fixed_features.parameters else: fixed_features = ObservationFeatures(parameters={}) fixed_values = get_fixed_values(model, slice_values) prediction_features = [] for x in grid: predf = deepcopy(fixed_features) predf.parameters = fixed_values.copy() predf.parameters[param_name] = x prediction_features.append(predf) f, cov = model.predict(prediction_features) for metric_name in metric_names: pd, cntp, f_plt, rd, _, _, _, _, _, sd_plt, ls = _get_slice_predictions( model=model, param_name=param_name, metric_name=metric_name, generator_runs_dict=generator_runs_dict, relative=relative, density=density, slice_values=slice_values, fixed_features=fixed_features, ) plot_data_dict[metric_name] = pd raw_data_dict[metric_name] = rd cond_name_to_parameters_dict[metric_name] = cntp sd_plt_dict[metric_name] = np.sqrt(cov[metric_name][metric_name]) is_log_dict[metric_name] = ls config = { "arm_data": plot_data_dict, "arm_name_to_parameters": cond_name_to_parameters_dict, "f": f, "fit_data": raw_data_dict, "grid": grid, "metrics": metric_names, "param": param_name, "rel": relative, "setx": fixed_values, "sd": sd_plt_dict, "is_log": is_log_dict, } return AxPlotConfig(config, plot_type=AxPlotTypes.INTERACT_SLICE)