Example #1
0
 def test_run_preattached_trials_only(self):
     # assert that pre-attached trials run when max_trials = number of
     # pre-attached trials
     scheduler = Scheduler(
         experiment=self.branin_experiment,  # Has runner and metrics.
         generation_strategy=self.two_sobol_steps_GS,
         options=SchedulerOptions(
             init_seconds_between_polls=
             0.1,  # Short between polls so test is fast.
         ),
     )
     trial = scheduler.experiment.new_trial()
     parameter_dict = {"x1": 5, "x2": 5}
     trial.add_arm(Arm(parameters=parameter_dict))
     with self.assertRaisesRegex(
             UserInputError,
             "number of pre-attached candidate trials .* is greater than"):
         scheduler.run_n_trials(max_trials=0)
     scheduler.run_n_trials(max_trials=1)
     self.assertEqual(len(scheduler.experiment.trials), 1)
     self.assertDictEqual(scheduler.experiment.trials[0].arm.parameters,
                          parameter_dict)
     self.assertTrue(  # Make sure all trials got to complete.
         all(t.completed_successfully
             for t in scheduler.experiment.trials.values()))
Example #2
0
    def test_get_best_trial(self):
        scheduler = Scheduler(
            experiment=self.branin_experiment,  # Has runner and metrics.
            generation_strategy=self.two_sobol_steps_GS,
            options=SchedulerOptions(
                init_seconds_between_polls=
                0.1,  # Short between polls so test is fast.
            ),
        )

        self.assertIsNone(scheduler.get_best_parameters())

        scheduler.run_n_trials(max_trials=1)

        trial, params, _arm = scheduler.get_best_trial()
        just_params, _just_arm = scheduler.get_best_parameters()
        just_params_unmodeled, _just_arm_unmodled = scheduler.get_best_parameters(
            use_model_predictions=False)
        with self.assertRaisesRegex(NotImplementedError,
                                    "Please use `get_best_parameters`"):
            scheduler.get_pareto_optimal_parameters()

        self.assertEqual(trial, 0)
        self.assertIn("x1", params)
        self.assertIn("x2", params)

        self.assertEqual(params, just_params)
        self.assertEqual(params, just_params_unmodeled)
Example #3
0
 def test_optimization_complete(self, _):
     # With runners & metrics, `Scheduler.run_all_trials` should run.
     scheduler = Scheduler(
         experiment=self.branin_experiment,  # Has runner and metrics.
         generation_strategy=self.two_sobol_steps_GS,
         options=SchedulerOptions(
             max_pending_trials=100,
             init_seconds_between_polls=
             0.1,  # Short between polls so test is fast.
         ),
     )
     scheduler.run_n_trials(max_trials=1)
     # no trials should run if _gen_multiple throws an OptimizationComplete error
     self.assertEqual(len(scheduler.experiment.trials), 0)
Example #4
0
 def test_stop_trial(self):
     # With runners & metrics, `Scheduler.run_all_trials` should run.
     scheduler = Scheduler(
         experiment=self.branin_experiment,  # Has runner and metrics.
         generation_strategy=self.two_sobol_steps_GS,
         options=SchedulerOptions(
             init_seconds_between_polls=
             0.1,  # Short between polls so test is fast.
         ),
     )
     with patch.object(scheduler.experiment.runner,
                       "stop",
                       return_value=None) as mock_runner_stop:
         scheduler.run_n_trials(max_trials=1)
         scheduler.stop_trial_runs(trials=[scheduler.experiment.trials[0]])
         mock_runner_stop.assert_called_once()
Example #5
0
 def test_base_report_results(self):
     self.branin_experiment.runner = NoReportResultsRunner()
     scheduler = Scheduler(
         experiment=self.branin_experiment,  # Has runner and metrics.
         generation_strategy=self.two_sobol_steps_GS,
         options=SchedulerOptions(init_seconds_between_polls=0, ),
     )
     self.assertEqual(scheduler.run_n_trials(max_trials=3),
                      OptimizationResult())
Example #6
0
 def test_run_n_trials(self):
     # With runners & metrics, `Scheduler.run_all_trials` should run.
     scheduler = Scheduler(
         experiment=self.branin_experiment,  # Has runner and metrics.
         generation_strategy=self.two_sobol_steps_GS,
         options=SchedulerOptions(
             init_seconds_between_polls=
             0.1,  # Short between polls so test is fast.
         ),
     )
     scheduler.run_n_trials(max_trials=1)
     self.assertEqual(len(scheduler.experiment.trials), 1)
     scheduler.run_n_trials(max_trials=10)
     self.assertTrue(  # Make sure all trials got to complete.
         all(t.completed_successfully
             for t in scheduler.experiment.trials.values()))
     # Check that all the data, fetched during optimization, was attached to the
     # experiment.
     dat = scheduler.experiment.fetch_data().df
     self.assertEqual(set(dat["trial_index"].values), set(range(11)))
Example #7
0
    def test_get_best_trial_moo(self):
        experiment = get_branin_experiment_with_multi_objective()
        experiment.runner = self.runner

        scheduler = Scheduler(
            experiment=experiment,
            generation_strategy=self.sobol_GPEI_GS,
            options=SchedulerOptions(init_seconds_between_polls=0.1),
        )

        scheduler.run_n_trials(max_trials=1)

        with self.assertRaisesRegex(
                NotImplementedError,
                "Please use `get_pareto_optimal_parameters`"):
            scheduler.get_best_trial()

        with self.assertRaisesRegex(
                NotImplementedError,
                "Please use `get_pareto_optimal_parameters`"):
            scheduler.get_best_parameters()

        self.assertIsNotNone(scheduler.get_pareto_optimal_parameters())