Пример #1
0
 def test_exp_to_df(self):
     exp = get_branin_experiment(with_batch=True)
     exp.trials[0].run()
     df = exp_to_df(exp)
     self.assertIsInstance(df, pd.DataFrame)
     df = exp_to_df(exp, run_metadata_fields=["name"])
     self.assertIn("name", df.columns)
Пример #2
0
def prepare_experiment_for_plotting(
    experiment: Experiment,
    ignored_names: Optional[List[str]] = None,
) -> pd.DataFrame:
    """Strip variables not desired in the final plot and truncate names for readability

    Args:
        experiment: Experiment containing trials to plot
        ignored_names: Metrics present in the experiment data we wish to exclude from
            the final plot. By default we ignore ["generation_method", "trial_status",
            "arm_name"]

    Returns:
        df.DataFrame: data frame ready for ingestion by plotly
    """
    ignored_names = (["generation_method", "trial_status", "arm_name"]
                     if ignored_names is None else ignored_names)

    df = exp_to_df(experiment)

    dropped = df.drop(ignored_names, axis=1)

    renamed = dropped.rename(
        # pyre-fixme[6] Expected `typing.Union[
        # typing.Callable[[Optional[typing.Hashable]], Optional[typing.Hashable]],
        # None, typing.Mapping[Optional[typing.Hashable], typing.Any]]` for 1st
        # parameter `columns` to call `pd.core.frame.DataFrame.rename` but got
        # `typing.Dict[str, str]`.
        columns=_get_shortest_unique_suffix_dict(
            [str(c) for c in dropped.columns]))

    return renamed
Пример #3
0
 def test_get_best_trial(self):
     exp = get_branin_experiment(with_batch=True, minimize=True)
     # Hack in `noise_sd` value to ensure full reproducibility.
     exp.metrics["branin"].noise_sd = 0.0
     exp.trials[0].run()
     df = exp_to_df(exp)
     best_trial = get_best_trial(exp)
     pd.testing.assert_frame_equal(
         df.sort_values("branin").head(1), best_trial)
Пример #4
0
    def test_get_best_trial(self):
        exp = get_branin_experiment(with_batch=True, minimize=True)

        # exp with no completed trials should return None
        self.assertIsNone(get_best_trial(exp))

        # exp with completed trials should return optimal row
        # Hack in `noise_sd` value to ensure full reproducibility.
        exp.metrics[OBJECTIVE_NAME].noise_sd = 0.0
        exp.trials[0].run()
        df = exp_to_df(exp)
        best_trial = get_best_trial(exp)
        pd.testing.assert_frame_equal(
            df.sort_values(OBJECTIVE_NAME).head(1), best_trial
        )

        # exp with missing rows should return optimal row
        dummy_struct = namedtuple("dummy_struct", "df")
        mock_results = dummy_struct(
            df=pd.DataFrame(
                {
                    "arm_name": ["0_0"],
                    "metric_name": [OBJECTIVE_NAME],
                    "mean": [DUMMY_OBJECTIVE_MEAN],
                    "sem": [0],
                    "trial_index": [0],
                    "n": [123],
                    "frac_nonnull": [1],
                }
            )
        )
        with patch.object(Experiment, "fetch_data", lambda self, metrics: mock_results):
            best_trial = get_best_trial(exp=exp)
        self.assertEqual(best_trial[OBJECTIVE_NAME][0], DUMMY_OBJECTIVE_MEAN)

        # when optimal objective is shared across multiple trials,
        # arbitrarily return a single optimal row
        mock_results = dummy_struct(
            df=pd.DataFrame(
                {
                    "arm_name": ["0_0", "0_1"],
                    "metric_name": [OBJECTIVE_NAME] * 2,
                    "mean": [DUMMY_OBJECTIVE_MEAN] * 2,
                    "sem": [0] * 2,
                    "trial_index": [0, 1],
                    "n": [123] * 2,
                    "frac_nonnull": [1] * 2,
                }
            )
        )
        with patch.object(Experiment, "fetch_data", lambda self, metrics: mock_results):
            best_trial = get_best_trial(exp=exp)
        self.assertEqual(len(best_trial.index), 1)
        self.assertEqual(best_trial[OBJECTIVE_NAME][0], DUMMY_OBJECTIVE_MEAN)
Пример #5
0
    def test_exp_to_df(self):
        # MultiTypeExperiment should fail
        exp = get_multi_type_experiment()
        with self.assertRaisesRegex(ValueError, "MultiTypeExperiment"):
            exp_to_df(exp=exp)

        # exp with no trials should return empty results
        exp = get_branin_experiment()
        df = exp_to_df(exp=exp)
        self.assertEqual(len(df), 0)

        # set up experiment
        exp = get_branin_experiment(with_batch=True)

        # check that pre-run experiment returns all columns except objective
        df = exp_to_df(exp)
        self.assertEqual(
            set(EXPECTED_COLUMNS) - set(df.columns), {OBJECTIVE_NAME})
        self.assertEqual(len(df.index), len(exp.arms_by_name))

        exp.trials[0].run()

        # assert result is df with expected columns and length
        df = exp_to_df(exp=exp)
        self.assertIsInstance(df, pd.DataFrame)
        self.assertListEqual(sorted(df.columns), sorted(EXPECTED_COLUMNS))
        self.assertEqual(len(df.index), len(exp.arms_by_name))

        # test with run_metadata_fields and trial_properties_fields not empty
        # add source to properties
        for _, trial in exp.trials.items():
            trial._properties["source"] = DUMMY_SOURCE
        df = exp_to_df(exp,
                       run_metadata_fields=["name"],
                       trial_properties_fields=["source"])
        self.assertIn("name", df.columns)
        self.assertIn("trial_properties_source", df.columns)

        # test column values or types
        self.assertTrue(all(x == 0 for x in df.trial_index))
        self.assertTrue(all(x == "RUNNING" for x in df.trial_status))
        self.assertTrue(all(x == "Sobol" for x in df.generation_method))
        self.assertTrue(
            all(x == DUMMY_SOURCE for x in df.trial_properties_source))
        self.assertTrue(all(x == "branin_test_experiment_0" for x in df.name))
        for float_column in FLOAT_COLUMNS:
            self.assertTrue(all(
                isinstance(x, float) for x in df[float_column]))

        # works correctly for failed trials (will need to mock)
        dummy_struct = namedtuple("dummy_struct", "df")
        mock_results = dummy_struct(df=pd.DataFrame({
            "arm_name": ["0_0"],
            "metric_name": [OBJECTIVE_NAME],
            "mean": [DUMMY_OBJECTIVE_MEAN],
            "sem": [0],
            "trial_index": [0],
            "n": [123],
            "frac_nonnull": [1],
        }))
        with patch.object(Experiment, "fetch_data",
                          lambda self, metrics: mock_results):
            df = exp_to_df(exp=exp)

        # all but one row should have a metric value of NaN
        self.assertEqual(pd.isna(df[OBJECTIVE_NAME]).sum(), len(df.index) - 1)

        # an experiment with more results than arms raises an error
        with patch.object(
                Experiment, "fetch_data",
                lambda self, metrics: mock_results), self.assertRaisesRegex(
                    ValueError, "inconsistent experimental state"):
            exp_to_df(exp=get_branin_experiment())

        # custom added trial has a generation_method of Manual
        custom_arm = Arm(name="custom", parameters={"x1": 0, "x2": 0})
        exp.new_trial().add_arm(custom_arm)
        df = exp_to_df(exp)
        self.assertEqual(df[df.arm_name == "custom"].iloc[0].generation_method,
                         "Manual")
Пример #6
0
 def get_trials_data_frame(self) -> pd.DataFrame:
     return exp_to_df(exp=self.experiment)
Пример #7
0
    def test_infer_objective_thresholds(self, _, cuda=False):
        # lightweight test
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True,
            with_batch=True,
            with_status_quo=True,
        )
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys())
        )
        data = exp.fetch_data()
        modelbridge = TorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=Cont_X_trans + Y_trans,
            torch_device=torch.device("cuda" if cuda else "cpu"),
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={"x1": 0.0})
        search_space = exp.search_space.clone()
        param_constraints = [
            ParameterConstraint(constraint_dict={"x1": 1.0}, bound=10.0)
        ]
        search_space.add_parameter_constraints(param_constraints)
        oc = exp.optimization_config.clone()
        oc.objective._objectives[0].minimize = True
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=oc,
            fixed_features=fixed_features,
        )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                )
            )
            mock_get_transformed_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_gen_args",
                    wraps=modelbridge._get_transformed_gen_args,
                )
            )
            mock_get_transformed_model_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_model_gen_args",
                    wraps=modelbridge._get_transformed_model_gen_args,
                )
            )
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "_untransform_objective_thresholds",
                    wraps=modelbridge._untransform_objective_thresholds,
                )
            )
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=oc,
                fixed_features=fixed_features,
            )
            expected_obj_weights = torch.tensor([-1.0, 1.0])
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertTrue(
                torch.equal(ckwargs["objective_weights"], expected_obj_weights)
            )
            # check that transforms have been applied (at least UnitX)
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            lc = ckwargs["linear_constraints"]
            self.assertTrue(torch.equal(lc[0], torch.tensor([[15.0, 0.0]])))
            self.assertTrue(torch.equal(lc[1], torch.tensor([[15.0]])))
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
            mock_get_transformed_gen_args.assert_called_once()
            mock_get_transformed_model_gen_args.assert_called_once_with(
                search_space=expected_base_gen_args.search_space,
                fixed_features=expected_base_gen_args.fixed_features,
                pending_observations=expected_base_gen_args.pending_observations,
                optimization_config=expected_base_gen_args.optimization_config,
            )
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]

            self.assertTrue(
                torch.equal(ckwargs["objective_weights"], expected_obj_weights)
            )
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.LEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        Y = np.stack([df.branin_a.values, df.branin_b.values]).T
        Y = torch.from_numpy(Y)
        Y[:, 0] *= -1
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([-obj_thresholds[0].bound, obj_thresholds[1].bound])
                < nadir.numpy()
            )
        )
        # test using MTGP
        sobol_generator = get_sobol(
            search_space=exp.search_space,
            seed=TEST_SOBOL_SEED,
            # set initial position equal to the number of sobol arms generated
            # so far. This means that new sobol arms will complement the previous
            # arms in a space-filling fashion
            init_position=len(exp.arms_by_name) - 1,
        )
        sobol_run = sobol_generator.gen(n=2)
        trial = exp.new_batch_trial(optimize_for_power=True)
        trial.add_generator_run(sobol_run)
        trial.mark_running(no_runner_required=True).mark_completed()
        data = exp.fetch_data()
        torch.manual_seed(0)  # make model fitting deterministic
        modelbridge = TorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=ST_MTGP_trans,
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={}, trial_index=1)
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=exp.optimization_config,
            fixed_features=fixed_features,
        )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                )
            )
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "_untransform_objective_thresholds",
                    wraps=modelbridge._untransform_objective_thresholds,
                )
            )
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=exp.optimization_config,
                fixed_features=fixed_features,
            )
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.GEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        trial_mask = df.trial_index == 1
        Y = np.stack([df.branin_a.values[trial_mask], df.branin_b.values[trial_mask]]).T
        Y = torch.from_numpy(Y)
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([obj_thresholds[0].bound, obj_thresholds[1].bound])
                < nadir.numpy()
            )
        )
    def test_infer_objective_thresholds(self, _, cuda=False):
        # lightweight test
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True,
            with_batch=True,
            with_status_quo=True,
        )
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys()))
        data = exp.fetch_data()
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=Cont_X_trans + Y_trans,
            torch_device=torch.device("cuda" if cuda else "cpu"),
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={"x1": 0.0})
        search_space = exp.search_space.clone()
        param_constraints = [
            ParameterConstraint(constraint_dict={"x1": 1.0}, bound=10.0)
        ]
        outcome_constraints = [
            OutcomeConstraint(
                metric=exp.metrics["branin_a"],
                op=ComparisonOp.GEQ,
                bound=-40.0,
                relative=False,
            )
        ]
        search_space.add_parameter_constraints(param_constraints)
        exp.optimization_config.outcome_constraints = outcome_constraints
        oc = exp.optimization_config.clone()
        oc.objective._objectives[0].minimize = True
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=oc,
            fixed_features=fixed_features,
        )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.multi_objective_torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                ))
            mock_get_transformed_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_gen_args",
                    wraps=modelbridge._get_transformed_gen_args,
                ))
            mock_get_transformed_model_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_model_gen_args",
                    wraps=modelbridge._get_transformed_model_gen_args,
                ))
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "untransform_objective_thresholds",
                    wraps=modelbridge.untransform_objective_thresholds,
                ))
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=oc,
                fixed_features=fixed_features,
            )
            expected_obj_weights = torch.tensor([-1.0, 1.0])
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertTrue(
                torch.equal(ckwargs["objective_weights"],
                            expected_obj_weights))
            # check that transforms have been applied (at least UnitX)
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            oc = ckwargs["outcome_constraints"]
            self.assertTrue(torch.equal(oc[0], torch.tensor([[-1.0, 0.0]])))
            self.assertTrue(torch.equal(oc[1], torch.tensor([[45.0]])))
            lc = ckwargs["linear_constraints"]
            self.assertTrue(torch.equal(lc[0], torch.tensor([[15.0, 0.0]])))
            self.assertTrue(torch.equal(lc[1], torch.tensor([[15.0]])))
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
            mock_get_transformed_gen_args.assert_called_once()
            mock_get_transformed_model_gen_args.assert_called_once_with(
                search_space=expected_base_gen_args.search_space,
                fixed_features=expected_base_gen_args.fixed_features,
                pending_observations=expected_base_gen_args.
                pending_observations,
                optimization_config=expected_base_gen_args.optimization_config,
            )
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]

            self.assertTrue(
                torch.equal(ckwargs["objective_weights"],
                            expected_obj_weights))
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.LEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        Y = np.stack([df.branin_a.values, df.branin_b.values]).T
        Y = torch.from_numpy(Y)
        Y[:, 0] *= -1
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([-obj_thresholds[0].bound, obj_thresholds[1].bound]) <
                nadir.numpy()))
        # test using MTGP
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=5)
        trial = exp.new_batch_trial(optimize_for_power=True)
        trial.add_generator_run(sobol_run)
        trial.mark_running(no_runner_required=True).mark_completed()
        data = exp.fetch_data()
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=ST_MTGP_trans,
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={}, trial_index=1)
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=exp.optimization_config,
            fixed_features=fixed_features,
        )
        with self.assertRaises(ValueError):
            # Check that a ValueError is raised when MTGP is being used
            # and trial_index is not specified as a fixed features.
            # Note: this error is raised by StratifiedStandardizeY
            modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=exp.optimization_config,
            )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.multi_objective_torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                ))
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "untransform_objective_thresholds",
                    wraps=modelbridge.untransform_objective_thresholds,
                ))
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=exp.optimization_config,
                fixed_features=fixed_features,
            )
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.GEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        trial_mask = df.trial_index == 1
        Y = np.stack(
            [df.branin_a.values[trial_mask], df.branin_b.values[trial_mask]]).T
        Y = torch.from_numpy(Y)
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([obj_thresholds[0].bound, obj_thresholds[1].bound]) <
                nadir.numpy()))