Example #1
0
    def testWarmStartFromOldExperiment(self):
        # create old_experiment
        len_old_trials = 5
        i_failed_trial = 3
        old_experiment = get_branin_experiment()
        for i_old_trial in range(len_old_trials):
            sobol_run = get_sobol(search_space=old_experiment.search_space).gen(n=1)
            trial = old_experiment.new_trial(generator_run=sobol_run)
            trial.mark_running(no_runner_required=True)
            if i_old_trial == i_failed_trial:
                trial.mark_failed()
            else:
                trial.mark_completed()
        # make metric noiseless for exact reproducibility
        old_experiment.optimization_config.objective.metric.noise_sd = 0
        old_experiment.fetch_data()

        # should fail if new_experiment has trials
        new_experiment = get_branin_experiment(with_trial=True)
        with self.assertRaisesRegex(ValueError, "Experiment.*has.*trials"):
            new_experiment.warm_start_from_old_experiment(old_experiment=old_experiment)

        # should fail if search spaces are different
        with self.assertRaisesRegex(ValueError, "mismatch in search space parameters"):
            self.experiment.warm_start_from_old_experiment(
                old_experiment=old_experiment
            )

        # check that all non-failed trials are copied to new_experiment
        new_experiment = get_branin_experiment()
        # make metric noiseless for exact reproducibility
        new_experiment.optimization_config.objective.metric.noise_sd = 0
        for _, trial in old_experiment.trials.items():
            trial._run_metadata = DUMMY_RUN_METADATA
        new_experiment.warm_start_from_old_experiment(
            old_experiment=old_experiment, copy_run_metadata=True
        )
        self.assertEqual(len(new_experiment.trials), len(old_experiment.trials) - 1)
        i_old_trial = 0
        for _, trial in new_experiment.trials.items():
            # skip failed trial
            i_old_trial += i_old_trial == i_failed_trial
            self.assertEqual(
                trial.arm.parameters, old_experiment.trials[i_old_trial].arm.parameters
            )
            self.assertRegex(
                trial._properties["source"], "Warm start.*Experiment.*trial"
            )
            self.assertDictEqual(trial.run_metadata, DUMMY_RUN_METADATA)
            i_old_trial += 1

        # Check that the data was attached for correct trials
        old_df = old_experiment.fetch_data().df
        new_df = new_experiment.fetch_data().df

        self.assertEqual(len(new_df), len_old_trials - 1)
        pd.testing.assert_frame_equal(
            old_df.drop(["arm_name", "trial_index"], axis=1),
            new_df.drop(["arm_name", "trial_index"], axis=1),
        )
Example #2
0
    def test_MTGP(self):
        """Tests MTGP instantiation."""
        # Test Multi-type MTGP
        exp = get_multi_type_experiment(add_trials=True)
        mtgp = get_MTGP(experiment=exp, data=exp.fetch_data())
        self.assertIsInstance(mtgp, TorchModelBridge)

        # Test Single-type MTGP
        exp = get_branin_experiment()
        # Check that factory generates a valid sobol modelbridge.
        sobol = get_sobol(search_space=exp.search_space)
        self.assertIsInstance(sobol, RandomModelBridge)
        for _ in range(5):
            sobol_run = sobol.gen(n=1)
            t = exp.new_batch_trial().add_generator_run(sobol_run)
            t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5)
            t.run().mark_completed()
        mtgp = get_MTGP(experiment=exp, data=exp.fetch_data(), trial_index=0)
        self.assertIsInstance(mtgp, TorchModelBridge)
        # mtgp_run = mtgp.gen(
        #     n=1
        # )  # TODO[T110948251]: This is broken at the ChoiceEncode level

        with self.assertRaises(ValueError):
            get_MTGP(experiment=exp, data=exp.fetch_data(), trial_index=9)

        exp = get_branin_experiment()
        sobol = get_sobol(search_space=exp.search_space)
        self.assertIsInstance(sobol, RandomModelBridge)
        sobol_run = sobol.gen(n=1)
        t = exp.new_batch_trial().add_generator_run(sobol_run)
        t.run().mark_completed()

        with self.assertRaises(ValueError):
            get_MTGP(experiment=exp, data=exp.fetch_data(), trial_index=0)
Example #3
0
 def test_min_observed(self):
     # We should fail to transition the next model if there is not
     # enough data observed.
     exp = get_branin_experiment(get_branin_experiment())
     gs = GenerationStrategy(steps=[
         GenerationStep(model=Models.SOBOL, num_arms=5,
                        min_arms_observed=5),
         GenerationStep(model=Models.GPEI, num_arms=1),
     ])
     self.assertFalse(gs.uses_non_registered_models)
     for _ in range(5):
         gs.gen(exp)
     with self.assertRaises(ValueError):
         gs.gen(exp)
Example #4
0
 def test_sobol_GPEI_strategy_batches(self, mock_GPEI_gen, mock_GPEI_update,
                                      mock_GPEI_init):
     exp = get_branin_experiment()
     sobol_GPEI_generation_strategy = GenerationStrategy(
         name="Sobol+GPEI",
         steps=[
             GenerationStep(model=Models.SOBOL, num_arms=5),
             GenerationStep(model=Models.GPEI, num_arms=8),
         ],
     )
     self.assertEqual(sobol_GPEI_generation_strategy.name, "Sobol+GPEI")
     self.assertEqual(sobol_GPEI_generation_strategy.generator_changes, [5])
     exp.new_batch_trial(
         generator_run=sobol_GPEI_generation_strategy.gen(exp, n=2)).run()
     for i in range(1, 8):
         if i == 7:
             # Check completeness error message.
             with self.assertRaisesRegex(ValueError, "Generation strategy"):
                 g = sobol_GPEI_generation_strategy.gen(
                     exp, exp._fetch_trial_data(trial_index=i - 1), n=2)
         else:
             g = sobol_GPEI_generation_strategy.gen(
                 exp, exp._fetch_trial_data(trial_index=i - 1), n=2)
         exp.new_batch_trial(generator_run=g).run()
     with self.assertRaises(ValueError):
         sobol_GPEI_generation_strategy.gen(exp, exp.fetch_data())
     self.assertIsInstance(sobol_GPEI_generation_strategy.model,
                           TorchModelBridge)
Example #5
0
 def test_store_experiment(self):
     exp = get_branin_experiment()
     sobol_generation_strategy = GenerationStrategy(
         steps=[GenerationStep(model=Models.SOBOL, num_arms=5)])
     self.assertIsNone(sobol_generation_strategy._experiment)
     sobol_generation_strategy.gen(exp)
     self.assertIsNotNone(sobol_generation_strategy._experiment)
Example #6
0
 def setUp(self):
     self.branin_experiment = get_branin_experiment()
     self.branin_experiment._properties[
         Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF] = True
     self.branin_experiment_no_impl_metrics = Experiment(
         search_space=get_branin_search_space(),
         optimization_config=OptimizationConfig(objective=Objective(
             metric=Metric(name="branin"))),
     )
     self.sobol_GPEI_GS = choose_generation_strategy(
         search_space=get_branin_search_space())
     self.two_sobol_steps_GS = GenerationStrategy(  # Contrived GS to ensure
         steps=[  # that `DataRequiredError` is property handled in scheduler.
             GenerationStep(  # This error is raised when not enough trials
                 model=Models.
                 SOBOL,  # have been observed to proceed to next
                 num_trials=5,  # geneneration step.
                 min_trials_observed=3,
                 max_parallelism=2,
             ),
             GenerationStep(model=Models.SOBOL,
                            num_trials=-1,
                            max_parallelism=3),
         ])
     # GS to force the scheduler to poll completed trials after each ran trial.
     self.sobol_GS_no_parallelism = GenerationStrategy(steps=[
         GenerationStep(
             model=Models.SOBOL, num_trials=-1, max_parallelism=1)
     ])
Example #7
0
 def test_enum_uniform(self):
     """Tests uniform random instantiation through the Models enum."""
     exp = get_branin_experiment()
     uniform = Models.UNIFORM(exp.search_space)
     self.assertIsInstance(uniform, RandomModelBridge)
     uniform_run = uniform.gen(n=5)
     self.assertEqual(len(uniform_run.arms), 5)
Example #8
0
 def test_transform_callback_int(self, *_):
     exp = get_branin_experiment(with_batch=True)
     data = get_branin_data(trial_indices=exp.trials)
     parameters = [
         RangeParameter(name="x1",
                        parameter_type=ParameterType.INT,
                        lower=1,
                        upper=10),
         RangeParameter(name="x2",
                        parameter_type=ParameterType.INT,
                        lower=5,
                        upper=15),
     ]
     gpei = TorchModelBridge(
         experiment=exp,
         data=data,
         search_space=SearchSpace(parameters=parameters),
         model=BotorchModel(),
         transforms=[IntToFloat],
         torch_dtype=torch.double,
         fit_out_of_design=True,
     )
     transformed = gpei._transform_callback([5.4, 7.6])
     self.assertTrue(np.allclose(transformed, [5, 8]))
     np_mb = ArrayModelBridge(
         experiment=exp,
         data=exp.fetch_data(),
         search_space=SearchSpace(parameters=parameters),
         model=NumpyModel(),
         transforms=[IntToFloat],
     )
     transformed = np_mb._transform_callback(np.array([5.4, 7.6]))
     self.assertTrue(np.allclose(transformed, [5, 8]))
Example #9
0
    def testUpdateGenerationStrategyIncrementally(self):
        experiment = get_branin_experiment()
        generation_strategy = choose_generation_strategy(
            experiment.search_space)
        save_experiment(experiment=experiment)
        save_generation_strategy(generation_strategy=generation_strategy)

        # add generator runs, save, reload
        generator_runs = []
        for i in range(7):
            data = get_branin_data() if i > 0 else None
            gr = generation_strategy.gen(experiment, data=data)
            generator_runs.append(gr)
            trial = experiment.new_trial(generator_run=gr).mark_running(
                no_runner_required=True)
            trial.mark_completed()

        save_experiment(experiment=experiment)
        update_generation_strategy(generation_strategy=generation_strategy,
                                   generator_runs=generator_runs)
        loaded_generation_strategy = load_generation_strategy_by_experiment_name(
            experiment_name=experiment.name)

        self.assertEqual(generation_strategy._curr.index,
                         loaded_generation_strategy._curr.index, 1)
        self.assertEqual(len(loaded_generation_strategy._generator_runs), 7)
Example #10
0
    def test_best_raw_objective_point_unsatisfiable_relative(self):
        exp = get_branin_experiment()

        # Optimization config with unsatisfiable constraint
        opt_conf = exp.optimization_config.clone()
        opt_conf.outcome_constraints.append(
            OutcomeConstraint(
                metric=get_branin_metric(),
                op=ComparisonOp.GEQ,
                bound=9999,
                relative=True,
            )
        )

        trial = exp.new_trial(
            generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})])
        ).run()
        trial.mark_completed()

        with self.assertLogs(logger="ax.service.utils.best_point", level="WARN") as lg:
            get_best_raw_objective_point(exp, opt_conf)
            self.assertTrue(
                any("No status quo provided" in warning for warning in lg.output),
                msg=lg.output,
            )

        exp.status_quo = Arm(parameters={"x1": 0, "x2": 0}, name="status_quo")
        sq_trial = exp.new_trial(
            generator_run=GeneratorRun(arms=[exp.status_quo])
        ).run()
        sq_trial.mark_completed()

        with self.assertRaisesRegex(ValueError, "No points satisfied"):
            get_best_raw_objective_point(exp, opt_conf)
Example #11
0
    def testContours(self):
        exp = get_branin_experiment(with_str_choice_param=True,
                                    with_batch=True)
        exp.trials[0].run()
        model = Models.BOTORCH(
            # Model bridge kwargs
            experiment=exp,
            data=exp.fetch_data(),
        )
        # Assert that each type of plot can be constructed successfully
        plot = plot_contour_plotly(model, model.parameters[0],
                                   model.parameters[1],
                                   list(model.metric_names)[0])
        self.assertIsInstance(plot, go.Figure)
        plot = interact_contour_plotly(model, list(model.metric_names)[0])
        self.assertIsInstance(plot, go.Figure)
        plot = interact_contour(model, list(model.metric_names)[0])
        self.assertIsInstance(plot, AxPlotConfig)
        plot = plot = plot_contour(model, model.parameters[0],
                                   model.parameters[1],
                                   list(model.metric_names)[0])
        self.assertIsInstance(plot, AxPlotConfig)

        # Make sure all parameters and metrics are displayed in tooltips
        tooltips = list(exp.parameters.keys()) + list(exp.metrics.keys())
        for d in plot.data["data"]:
            # Only check scatter plots hoverovers
            if d["type"] != "scatter":
                continue
            for text in d["text"]:
                for tt in tooltips:
                    self.assertTrue(tt in text)
Example #12
0
 def test_is_moo_problem(self):
     exp = get_branin_experiment()
     self.assertFalse(exp.is_moo_problem)
     exp = get_branin_experiment_with_multi_objective()
     self.assertTrue(exp.is_moo_problem)
     exp._optimization_config = None
     self.assertFalse(exp.is_moo_problem)
Example #13
0
 def test_exp_to_df(self):
     exp = get_branin_experiment(with_batch=True)
     exp.trials[0].run()
     df = exp_to_df(exp)
     self.assertIsInstance(df, pd.DataFrame)
     df = exp_to_df(exp, run_metadata_fields=["name"])
     self.assertIn("name", df.columns)
Example #14
0
    def test_get_model_from_generator_run(self):
        """Tests that it is possible to restore a model from a generator run it
        produced, if `Models` registry was used.
        """
        exp = get_branin_experiment()
        initial_sobol = Models.SOBOL(experiment=exp, seed=239)
        gr = initial_sobol.gen(n=1)
        # Restore the model as it was before generation.
        sobol = get_model_from_generator_run(generator_run=gr,
                                             experiment=exp,
                                             data=exp.fetch_data())
        self.assertEqual(sobol.model.init_position, 0)
        self.assertEqual(sobol.model.seed, 239)
        # Restore the model as it was after generation (to resume generation).
        sobol_after_gen = get_model_from_generator_run(generator_run=gr,
                                                       experiment=exp,
                                                       data=exp.fetch_data(),
                                                       after_gen=True)
        self.assertEqual(sobol_after_gen.model.init_position, 1)
        self.assertEqual(sobol_after_gen.model.seed, 239)
        self.assertEqual(
            initial_sobol.gen(n=1).arms,
            sobol_after_gen.gen(n=1).arms)
        exp.new_trial(generator_run=gr)
        # Check restoration of GPEI, to ensure proper restoration of callable kwargs
        gpei = Models.GPEI(experiment=exp, data=get_branin_data())
        # Punch GPEI model + bridge kwargs into the Sobol generator run, to avoid
        # a slow call to `gpei.gen`.
        gr._model_key = "GPEI"
        gr._model_kwargs = gpei._model_kwargs
        gr._bridge_kwargs = gpei._bridge_kwargs
        gpei_restored = get_model_from_generator_run(gr,
                                                     experiment=exp,
                                                     data=get_branin_data())
        for key in gpei.__dict__:
            self.assertIn(key, gpei_restored.__dict__)
            original, restored = gpei.__dict__[key], gpei_restored.__dict__[
                key]
            # Fit times are set in instantiation so not same and model compared below
            if key in ["fit_time", "fit_time_since_gen", "model"]:
                continue  # Fit times are set in instantiation so won't be same
            if isinstance(original, OrderedDict) and isinstance(
                    restored, OrderedDict):
                original, restored = list(original.keys()), list(
                    restored.keys())
            if isinstance(original, Model) and isinstance(restored, Model):
                continue  # Model equality is tough to compare.
            self.assertEqual(original, restored)

        for key in gpei.model.__dict__:
            self.assertIn(key, gpei_restored.model.__dict__)
            original, restored = (
                gpei.model.__dict__[key],
                gpei_restored.model.__dict__[key],
            )
            # Botorch model equality is tough to compare and training data
            # is unnecessary to compare, because data passed to model was the same
            if key in ["model", "warm_start_refitting", "Xs", "Ys"]:
                continue
            self.assertEqual(original, restored)
Example #15
0
    def setUp(self):
        self.branin_experiment = get_branin_experiment()
        sobol = Models.SOBOL(search_space=self.branin_experiment.search_space)
        sobol_run = sobol.gen(n=20)
        self.branin_experiment.new_batch_trial().add_generator_run(
            sobol_run
        ).run().mark_completed()
        data = self.branin_experiment.fetch_data()

        ms_gpei = ModelSpec(model_enum=Models.GPEI)
        ms_gpei.fit(experiment=self.branin_experiment, data=data)

        ms_gpkg = ModelSpec(model_enum=Models.GPKG)
        ms_gpkg.fit(experiment=self.branin_experiment, data=data)

        self.fitted_model_specs = [ms_gpei, ms_gpkg]

        self.model_selection_node = GenerationNode(
            model_specs=self.fitted_model_specs,
            best_model_selector=SingleDiagnosticBestModelSelector(
                diagnostic="Fisher exact test p",
                criterion=MetricAggregation.MEAN,
                metric_aggregation=DiagnosticCriterion.MIN,
            ),
        )
 def test_use_update(self, mock_fetch_trials_data, mock_update):
     exp = get_branin_experiment()
     sobol_gs_with_update = GenerationStrategy(
         steps=[GenerationStep(model=Models.SOBOL, num_trials=-1, use_update=True)]
     )
     # Try without passing data (generation strategy fetches data from experiment).
     trial = exp.new_trial(generator_run=sobol_gs_with_update.gen(experiment=exp))
     mock_update.assert_not_called()
     trial._status = TrialStatus.COMPLETED
     for i in range(3):
         trial = exp.new_trial(
             generator_run=sobol_gs_with_update.gen(experiment=exp)
         )
         self.assertEqual(
             mock_fetch_trials_data.call_args[1].get("trial_indices"), {i}
         )
         trial._status = TrialStatus.COMPLETED
     # Try with passing data.
     sobol_gs_with_update.gen(
         experiment=exp, data=get_branin_data(trial_indices=range(4))
     )
     # Only the data for the last completed trial should be considered new and passed
     # to `update`.
     self.assertEqual(
         set(mock_update.call_args[1].get("new_data").df["trial_index"].values), {3}
     )
Example #17
0
    def test_GPKG(self):
        """Tests GPKG instantiation."""
        exp = get_branin_experiment(with_batch=True)
        with self.assertRaises(ValueError):
            get_GPKG(experiment=exp, data=exp.fetch_data())
        exp.trials[0].run()
        gpkg = get_GPKG(experiment=exp, data=exp.fetch_data())
        self.assertIsInstance(gpkg, TorchModelBridge)

        # test transform_configs with winsorization
        configs = {
            "Winsorize": {
                "winsorization_lower": 0.1,
                "winsorization_upper": 0.1
            }
        }
        gpkg_win = get_GPKG(experiment=exp,
                            data=exp.fetch_data(),
                            transform_configs=configs)
        self.assertIsInstance(gpkg_win, TorchModelBridge)
        self.assertEqual(gpkg_win._transform_configs, configs)

        # test multi-fidelity optimization
        exp.parameters["x2"] = RangeParameter(
            name="x2",
            parameter_type=exp.parameters["x2"].parameter_type,
            lower=-5.0,
            upper=10.0,
            is_fidelity=True,
            target_value=10.0,
        )
        gpkg_mf = get_GPKG(experiment=exp, data=exp.fetch_data())
        self.assertIsInstance(gpkg_mf, TorchModelBridge)
Example #18
0
 def testOODStatusQuo(self):
     # An OOD status quo arm without a trial index will raise an error
     experiment = get_branin_experiment()
     experiment.add_tracking_metric(
         BraninMetric(name="m2", param_names=["x1", "x2"]))
     metrics = list(experiment.metrics.values())
     sobol = Models.SOBOL(experiment.search_space)
     a = sobol.gen(5)
     experiment.new_batch_trial(generator_run=a).run()
     # Experiments with batch trials must specify a trial index
     with self.assertRaises(UnsupportedError):
         compute_pareto_frontier(
             experiment,
             metrics[0],
             metrics[1],
             absolute_metrics=[m.name for m in metrics],
         )
     compute_pareto_frontier(
         experiment,
         metrics[0],
         metrics[1],
         trial_index=0,
         absolute_metrics=[m.name for m in metrics],
     )
     compute_pareto_frontier(
         experiment,
         metrics[0],
         metrics[1],
         data=experiment.fetch_data(),
         absolute_metrics=[m.name for m in metrics],
     )
Example #19
0
    def test_MOO_PAREGO(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        with self.assertRaises(ValueError):
            get_MOO_PAREGO(experiment=single_obj_exp, data=single_obj_exp.fetch_data())

        multi_obj_exp = get_branin_experiment_with_multi_objective(with_batch=True)
        with self.assertRaises(ValueError):
            get_MOO_PAREGO(experiment=multi_obj_exp, data=multi_obj_exp.fetch_data())

        multi_obj_exp.trials[0].run().mark_completed()
        moo_parego = get_MOO_PAREGO(
            experiment=multi_obj_exp, data=multi_obj_exp.fetch_data()
        )
        self.assertIsInstance(moo_parego, TorchModelBridge)
        self.assertEqual(
            {
                "acquisition_function_kwargs": {
                    "chebyshev_scalarization": True,
                    "sequential": True,
                }
            },
            moo_parego._default_model_gen_options,
        )
        moo_parego_run = moo_parego.gen(n=2)
        self.assertEqual(len(moo_parego_run.arms), 2)
Example #20
0
 def test_restore_from_generator_run(self):
     gs = GenerationStrategy(
         steps=[GenerationStep(model=Models.SOBOL, num_trials=5)])
     # No generator runs on GS, so can't restore from one.
     with self.assertRaises(ValueError):
         gs._restore_model_from_generator_run()
     exp = get_branin_experiment(with_batch=True)
     gs.gen(experiment=exp)
     model = gs.model
     # Create a copy of the generation strategy and check that when
     # we restore from last generator run, the model will be set
     # correctly and that `_seen_trial_indices_by_status` is filled.
     new_gs = GenerationStrategy(
         steps=[GenerationStep(model=Models.SOBOL, num_trials=5)])
     new_gs._experiment = exp
     new_gs._generator_runs = gs._generator_runs
     self.assertIsNone(new_gs._seen_trial_indices_by_status)
     new_gs._restore_model_from_generator_run()
     self.assertEqual(gs._seen_trial_indices_by_status,
                      exp.trial_indices_by_status)
     # Model should be reset, but it should be the same model with same data.
     self.assertIsNot(model, new_gs.model)
     self.assertEqual(model.__class__,
                      new_gs.model.__class__)  # Model bridge.
     self.assertEqual(model.model.__class__,
                      new_gs.model.model.__class__)  # Model.
     self.assertEqual(model._training_data, new_gs.model._training_data)
Example #21
0
 def test_transform_callback_int_log(self, *_):
     exp = get_branin_experiment(with_batch=True)
     parameters = [
         RangeParameter(
             name="x1",
             parameter_type=ParameterType.INT,
             lower=1,
             upper=100,
             log_scale=True,
         ),
         RangeParameter(
             name="x2",
             parameter_type=ParameterType.INT,
             lower=1,
             upper=100,
             log_scale=True,
         ),
     ]
     gpei = TorchModelBridge(
         experiment=exp,
         data=exp.fetch_data(),
         search_space=SearchSpace(parameters=parameters),
         model=BotorchModel(),
         transforms=[IntToFloat, Log],
         torch_dtype=torch.double,
         fit_out_of_design=True,
     )
     transformed = gpei._transform_callback([0.5, 1.5])
     self.assertTrue(np.allclose(transformed, [0.47712, 1.50515]))
Example #22
0
    def test_factorial_thompson_strategy(self, _):
        exp = get_branin_experiment()
        factorial_thompson_generation_strategy = GenerationStrategy(steps=[
            GenerationStep(
                model=Models.FACTORIAL,
                num_trials=1,
                model_kwargs=self.step_model_kwargs,
            ),
            GenerationStep(
                model=Models.THOMPSON,
                num_trials=-1,
                model_kwargs=self.step_model_kwargs,
            ),
        ])
        self.assertEqual(factorial_thompson_generation_strategy.name,
                         "Factorial+Thompson")
        self.assertEqual(
            factorial_thompson_generation_strategy.model_transitions, [1])
        mock_model_bridge = self.mock_discrete_model_bridge.return_value

        # Initial factorial batch.
        exp.new_batch_trial(
            factorial_thompson_generation_strategy.gen(experiment=exp))
        args, kwargs = mock_model_bridge._set_kwargs_to_save.call_args
        self.assertEqual(kwargs.get("model_key"), "Factorial")

        # Subsequent Thompson sampling batch.
        exp.new_batch_trial(
            factorial_thompson_generation_strategy.gen(experiment=exp))
        args, kwargs = mock_model_bridge._set_kwargs_to_save.call_args
        self.assertEqual(kwargs.get("model_key"), "Thompson")
Example #23
0
 def test_transform_callback_log(self, *_):
     parameters = [
         RangeParameter(
             name="x1",
             parameter_type=ParameterType.FLOAT,
             lower=1,
             upper=3,
             log_scale=True,
         ),
         RangeParameter(
             name="x2",
             parameter_type=ParameterType.FLOAT,
             lower=1,
             upper=3,
             log_scale=True,
         ),
     ]
     search_space = SearchSpace(parameters=parameters)
     exp = get_branin_experiment(with_batch=True, search_space=search_space)
     gpei = TorchModelBridge(
         experiment=exp,
         data=exp.fetch_data(),
         search_space=search_space,
         model=BotorchModel(),
         transforms=[Log],
         torch_dtype=torch.double,
         fit_out_of_design=True,
     )
     transformed = gpei._transform_callback([1.2, 2.5])
     self.assertTrue(np.allclose(transformed, [1.2, 2.5]))
Example #24
0
 def test_current_generator_run_limit_unlimited_second_step(self):
     NUM_INIT_TRIALS = 5
     SECOND_STEP_PARALLELISM = 3
     NUM_ROUNDS = 4
     exp = get_branin_experiment()
     sobol_gs_with_parallelism_limits = GenerationStrategy(steps=[
         GenerationStep(
             model=Models.SOBOL,
             num_trials=NUM_INIT_TRIALS,
             min_trials_observed=3,
         ),
         GenerationStep(
             model=Models.SOBOL,
             num_trials=-1,
             max_parallelism=SECOND_STEP_PARALLELISM,
         ),
     ])
     sobol_gs_with_parallelism_limits._experiment = exp
     could_gen = self._run_GS_for_N_rounds(
         gs=sobol_gs_with_parallelism_limits,
         exp=exp,
         num_rounds=NUM_ROUNDS)
     # We expect trials from first generation step + trials from remaining rounds in
     # batches limited by parallelism setting in the second step.
     self.assertEqual(
         len(exp.trials),
         NUM_INIT_TRIALS + (NUM_ROUNDS - 1) * SECOND_STEP_PARALLELISM,
     )
     self.assertTrue(all(t.status.is_completed
                         for t in exp.trials.values()))
     self.assertEqual(could_gen, [NUM_INIT_TRIALS] +
                      [SECOND_STEP_PARALLELISM] * (NUM_ROUNDS - 1))
Example #25
0
 def setUp(self) -> None:
     self.experiment = get_branin_experiment()
     sobol = Models.SOBOL(search_space=self.experiment.search_space)
     sobol_run = sobol.gen(n=20)
     self.experiment.new_batch_trial().add_generator_run(
         sobol_run).run().mark_completed()
     self.data = self.experiment.fetch_data()
Example #26
0
    def test_MOO_EHVI(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        with self.assertRaises(ValueError):
            get_MOO_EHVI(
                experiment=single_obj_exp,
                data=single_obj_exp.fetch_data(),
                ref_point=[0, 0],
            )
        multi_obj_exp = get_branin_experiment_with_multi_objective(
            with_batch=True)
        metrics = multi_obj_exp.optimization_config.objective.metrics
        with self.assertRaises(ValueError):
            get_MOO_EHVI(
                experiment=multi_obj_exp,
                data=multi_obj_exp.fetch_data(),
                ref_point={
                    metrics[0].name: 0.0,
                    metrics[1].name: 0.0
                },
            )

        multi_obj_exp.trials[0].run()
        moo_ehvi = get_MOO_EHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            ref_point={
                metrics[0].name: 0.0,
                metrics[1].name: 0.0
            },
        )
        self.assertIsInstance(moo_ehvi, MultiObjectiveTorchModelBridge)
        moo_ehvi_run = moo_ehvi.gen(n=1)
        self.assertEqual(len(moo_ehvi_run.arms), 1)
Example #27
0
    def test_MOO_RS(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        with self.assertRaises(ValueError):
            get_MOO_RS(experiment=single_obj_exp,
                       data=single_obj_exp.fetch_data())

        multi_obj_exp = get_branin_experiment_with_multi_objective(
            with_batch=True)
        with self.assertRaises(ValueError):
            get_MOO_RS(experiment=multi_obj_exp,
                       data=multi_obj_exp.fetch_data())

        multi_obj_exp.trials[0].run()
        moo_rs = get_MOO_RS(experiment=multi_obj_exp,
                            data=multi_obj_exp.fetch_data())
        self.assertIsInstance(moo_rs, TorchModelBridge)
        self.assertEqual(
            {
                "acquisition_function_kwargs": {
                    "random_scalarization": True,
                    "sequential": True,
                }
            },
            moo_rs._default_model_gen_options,
        )
        moo_rs_run = moo_rs.gen(n=5)
        self.assertEqual(len(moo_rs_run.arms), 5)
 def test_sobol_GPEI_strategy_batches(self):
     mock_GPEI_gen = self.mock_torch_model_bridge.return_value.gen
     mock_GPEI_gen.return_value = GeneratorRun(
         arms=[
             Arm(parameters={"x1": 1, "x2": 2}),
             Arm(parameters={"x1": 3, "x2": 4}),
         ]
     )
     exp = get_branin_experiment()
     sobol_GPEI_generation_strategy = GenerationStrategy(
         name="Sobol+GPEI",
         steps=[
             GenerationStep(model=Models.SOBOL, num_trials=1),
             GenerationStep(model=Models.GPEI, num_trials=6),
         ],
     )
     self.assertEqual(sobol_GPEI_generation_strategy.name, "Sobol+GPEI")
     self.assertEqual(sobol_GPEI_generation_strategy.model_transitions, [1])
     gr = sobol_GPEI_generation_strategy.gen(exp, n=2)
     exp.new_batch_trial(generator_run=gr).run()
     for i in range(1, 8):
         if i == 7:
             # Check completeness error message.
             with self.assertRaises(GenerationStrategyCompleted):
                 g = sobol_GPEI_generation_strategy.gen(exp, n=2)
         else:
             g = sobol_GPEI_generation_strategy.gen(exp, n=2)
         exp.new_batch_trial(generator_run=g).run()
     self.assertIsInstance(sobol_GPEI_generation_strategy.model, TorchModelBridge)
Example #29
0
    def testDecodeGenerationStrategy(self):
        generation_strategy = get_generation_strategy()
        experiment = get_branin_experiment()
        gs_json = object_to_json(generation_strategy)
        new_generation_strategy = generation_strategy_from_json(gs_json)
        self.assertEqual(generation_strategy, new_generation_strategy)
        self.assertGreater(len(new_generation_strategy._steps), 0)
        self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
        # Model has not yet been initialized on this GS since it hasn't generated
        # anything yet.
        self.assertIsNone(new_generation_strategy.model)

        # Check that we can encode and decode the generation strategy after
        # it has generated some generator runs.
        generation_strategy = new_generation_strategy
        gr = generation_strategy.gen(experiment)
        gs_json = object_to_json(generation_strategy)
        new_generation_strategy = generation_strategy_from_json(gs_json)
        self.assertEqual(generation_strategy, new_generation_strategy)
        self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
        # Since this GS has now generated one generator run, model should have
        # been initialized and restored when decoding from JSON.
        self.assertIsInstance(new_generation_strategy.model, ModelBridge)

        # Check that we can encode and decode the generation strategy after
        # it has generated some trials and been updated with some data.
        generation_strategy = new_generation_strategy
        experiment.new_trial(gr)  # Add previously generated GR as trial.
        # Make generation strategy aware of the trial's data via `gen`.
        generation_strategy.gen(experiment, data=get_branin_data())
        gs_json = object_to_json(generation_strategy)
        new_generation_strategy = generation_strategy_from_json(gs_json)
        self.assertEqual(generation_strategy, new_generation_strategy)
        self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
        self.assertIsInstance(new_generation_strategy.model, ModelBridge)
Example #30
0
 def test_factorial_thompson_strategy(self, mock_update, mock_gen,
                                      mock_discrete):
     exp = get_branin_experiment()
     factorial_thompson_generation_strategy = GenerationStrategy(steps=[
         GenerationStep(model=Models.FACTORIAL, num_arms=1),
         GenerationStep(model=Models.THOMPSON, num_arms=-1),
     ])
     self.assertEqual(factorial_thompson_generation_strategy.name,
                      "Factorial+Thompson")
     self.assertEqual(
         factorial_thompson_generation_strategy.generator_changes, [1])
     for i in range(2):
         data = get_data() if i > 0 else None
         factorial_thompson_generation_strategy.gen(experiment=exp,
                                                    new_data=data)
         exp.new_batch_trial().add_arm(Arm(parameters={"x1": i, "x2": i}))
         if i < 1:
             mock_discrete.assert_called()
             args, kwargs = mock_discrete.call_args
             self.assertIsInstance(kwargs.get("model"),
                                   FullFactorialGenerator)
             exp.new_batch_trial()
         else:
             mock_discrete.assert_called()
             args, kwargs = mock_discrete.call_args
             self.assertIsInstance(
                 kwargs.get("model"),
                 (ThompsonSampler, EmpiricalBayesThompsonSampler),
             )