Exemplo n.º 1
0
    def _run_GS_for_N_rounds(self, gs: GenerationStrategy, exp: Experiment,
                             num_rounds: int) -> List[int]:
        could_gen = []
        for _ in range(num_rounds):
            (
                num_trials_to_gen,
                opt_complete,
            ) = gs.current_generator_run_limit()
            self.assertFalse(opt_complete)
            could_gen.append(num_trials_to_gen)
            trials = []

            for _ in range(num_trials_to_gen):
                gr = gs.gen(
                    experiment=exp,
                    pending_observations=get_pending(experiment=exp),
                )
                trials.append(
                    exp.new_trial(gr).mark_running(no_runner_required=True))

            for trial in trials:
                exp.attach_data(get_branin_data(trial_indices=[trial.index]))
                trial.mark_completed()

        return could_gen
Exemplo n.º 2
0
 def testEmptyMetrics(self):
     empty_experiment = Experiment(
         name="test_experiment",
         search_space=get_search_space(),
         default_data_type=DataType.MAP_DATA,
     )
     self.assertEqual(empty_experiment.num_trials, 0)
     empty_experiment.add_tracking_metric(Metric(name="ax_test_metric"))
     self.assertTrue(empty_experiment.fetch_data().df.empty)
     empty_experiment.attach_data(get_map_data())
Exemplo n.º 3
0
 def testEmptyMetrics(self):
     empty_experiment = Experiment(name="test_experiment",
                                   search_space=get_search_space())
     self.assertEqual(empty_experiment.num_trials, 0)
     with self.assertRaises(ValueError):
         empty_experiment.fetch_data()
     batch = empty_experiment.new_batch_trial()
     self.assertEqual(empty_experiment.num_trials, 1)
     with self.assertRaises(ValueError):
         batch.fetch_data()
     empty_experiment.add_tracking_metric(Metric(name="some_metric"))
     empty_experiment.attach_data(get_data())
     self.assertFalse(empty_experiment.fetch_data().df.empty)
Exemplo n.º 4
0
def _benchmark_replication_Dev_API(
    problem: BenchmarkProblem,
    method: GenerationStrategy,
    num_trials: int,
    experiment_name: str,
    batch_size: int = 1,
    raise_all_exceptions: bool = False,
    benchmark_trial: FunctionType = benchmark_trial,
    verbose_logging: bool = True,
    # Number of trials that need to fail for a replication to be considered failed.
    failed_trials_tolerated: int = 5,
    async_benchmark_options: Optional[AsyncBenchmarkOptions] = None,
) -> Tuple[Experiment, List[Exception]]:
    """Run a benchmark replication via the Developer API because the problem was
    set up with Ax classes (likely to allow for additional complexity like
    adding constraints or non-range parameters).
    """
    if async_benchmark_options is not None:
        raise NonRetryableBenchmarkingError(
            "`async_benchmark_options` not supported when using the Dev API."
        )

    exceptions = []
    experiment = Experiment(
        name=experiment_name,
        search_space=problem.search_space,
        optimization_config=problem.optimization_config,
        runner=SyntheticRunner(),
    )
    for trial_index in range(num_trials):
        try:
            gr = method.gen(experiment=experiment, n=batch_size)
            if batch_size == 1:
                trial = experiment.new_trial(generator_run=gr)
            else:
                assert batch_size > 1
                trial = experiment.new_batch_trial(generator_run=gr)
            trial.run()
            # TODO[T94059549]: Rm 3 lines below when attaching data in fetch is fixed.
            data = benchmark_trial(experiment=experiment, trial_index=trial_index)
            if not data.df.empty:
                experiment.attach_data(data=data)
        except Exception as err:  # TODO[T53975770]: test
            if raise_all_exceptions:
                raise
            exceptions.append(err)
        if len(exceptions) > failed_trials_tolerated:
            raise RuntimeError(  # TODO[T53975770]: test
                f"More than {failed_trials_tolerated} failed for {experiment_name}."
            )
    return experiment, exceptions
Exemplo n.º 5
0
def get_branin_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_trial: bool = False,
    with_status_quo: bool = False,
    with_fidelity_parameter: bool = False,
    with_choice_parameter: bool = False,
    with_str_choice_param: bool = False,
    search_space: Optional[SearchSpace] = None,
    minimize: bool = False,
    named: bool = True,
    with_completed_trial: bool = False,
) -> Experiment:
    search_space = search_space or get_branin_search_space(
        with_fidelity_parameter=with_fidelity_parameter,
        with_choice_parameter=with_choice_parameter,
        with_str_choice_param=with_str_choice_param,
    )
    exp = Experiment(
        name="branin_test_experiment" if named else None,
        search_space=search_space,
        optimization_config=get_branin_optimization_config(
            minimize=minimize) if has_optimization_config else None,
        runner=SyntheticRunner(),
        is_test=True,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    if with_batch:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=15)
        exp.new_batch_trial(
            optimize_for_power=with_status_quo).add_generator_run(sobol_run)

    if with_trial or with_completed_trial:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=1)
        trial = exp.new_trial(generator_run=sobol_run)

        if with_completed_trial:
            trial.mark_running(no_runner_required=True)
            exp.attach_data(
                get_branin_data(trials=[trial]))  # Add data for one trial
            trial.mark_completed()

    return exp
Exemplo n.º 6
0
 def testEmptyMetrics(self):
     empty_experiment = Experiment(name="test_experiment",
                                   search_space=get_search_space())
     self.assertEqual(empty_experiment.num_trials, 0)
     with self.assertRaises(ValueError):
         empty_experiment.fetch_data()
     batch = empty_experiment.new_batch_trial()
     batch.mark_running(no_runner_required=True)
     self.assertEqual(empty_experiment.num_trials, 1)
     with self.assertRaises(ValueError):
         batch.fetch_data()
     empty_experiment.add_tracking_metric(Metric(name="ax_test_metric"))
     self.assertTrue(empty_experiment.fetch_data().df.empty)
     empty_experiment.attach_data(get_data())
     batch.mark_completed()
     self.assertFalse(empty_experiment.fetch_data().df.empty)