Exemple #1
0
 def testEncodeDecodeSimpleBenchmarkProblem(self):
     branin_problem = get_branin_simple_benchmark_problem()
     sum_problem = get_sum_simple_benchmark_problem()
     new_branin_problem = object_from_json(
         object_to_json(
             branin_problem,
             encoder_registry=DEPRECATED_ENCODER_REGISTRY,
             class_encoder_registry=DEPRECATED_CLASS_ENCODER_REGISTRY,
         ),
         decoder_registry=DEPRECATED_DECODER_REGISTRY,
         class_decoder_registry=DEPRECATED_CLASS_DECODER_REGISTRY,
     )
     new_sum_problem = object_from_json(
         object_to_json(
             sum_problem,
             encoder_registry=DEPRECATED_ENCODER_REGISTRY,
             class_encoder_registry=DEPRECATED_CLASS_ENCODER_REGISTRY,
         ),
         decoder_registry=DEPRECATED_DECODER_REGISTRY,
         class_decoder_registry=DEPRECATED_CLASS_DECODER_REGISTRY,
     )
     self.assertEqual(
         branin_problem.f(1, 2), new_branin_problem.f(1, 2), branin(1, 2)
     )
     self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)
     # Test using `from_botorch`.
     ackley_problem = SimpleBenchmarkProblem(
         f=from_botorch(Ackley()), noise_sd=0.0, minimize=True
     )
     new_ackley_problem = object_from_json(
         object_to_json(
             ackley_problem,
             encoder_registry=DEPRECATED_ENCODER_REGISTRY,
             class_encoder_registry=DEPRECATED_CLASS_ENCODER_REGISTRY,
         ),
         decoder_registry=DEPRECATED_DECODER_REGISTRY,
         class_decoder_registry=DEPRECATED_CLASS_DECODER_REGISTRY,
     )
     self.assertEqual(
         ackley_problem.f(1, 2), new_ackley_problem.f(1, 2), ackley(1, 2)
     )
Exemple #2
0
def _extract_optimization_trace_from_synthetic_function(
    experiment: Experiment, problem: SimpleBenchmarkProblem
) -> np.ndarray:
    if any(isinstance(trial, BatchTrial) for trial in experiment.trials.values()):
        raise NotImplementedError("Batched trials are not yet supported.")
    true_values = []
    for trial in experiment.trials.values():
        parameters = not_none(checked_cast(Trial, trial).arm).parameters
        # Expecting numerical parameters only.
        value = problem.f(*[float(x) for x in parameters.values()])  # pyre-ignore[6]
        true_values.append(value)
    return best_feasible_objective(
        optimization_config=experiment.optimization_config,
        values={problem.name: true_values},
    )
Exemple #3
0
def simple_benchmark_problem_from_json(
        object_json: Dict[str, Any]) -> SimpleBenchmarkProblem:
    """Load a benchmark problem from JSON."""
    uses_synthetic_function = object_json.pop("uses_synthetic_function")
    if uses_synthetic_function:
        f = getattr(synthetic_functions, object_json.pop("function_name"))()
    else:
        f = pickle.loads(object_json.pop("f").encode())
    domain = object_from_json(object_json.pop("domain"))
    assert isinstance(domain, list) and all(
        isinstance(x, tuple) for x in domain)
    return SimpleBenchmarkProblem(
        f=f,
        name=object_json.pop("name"),
        domain=cast(List[Tuple[float, float]], domain),
        minimize=object_json.pop("minimize"),
    )
Exemple #4
0
    def test_raise_all_exceptions(self):
        """Checks that an exception nested in the benchmarking stack is raised
        when `raise_all_exceptions` is True.
        """
        def broken_benchmark_replication(*args, **kwargs) -> Experiment:
            raise ValueError("Oh, exception!")

        with self.assertRaisesRegex(ValueError, "Oh, exception!"):
            full_benchmark_run(
                problems=[SimpleBenchmarkProblem(branin, noise_sd=0.4)],
                methods=[
                    GenerationStrategy(steps=[
                        GenerationStep(model=Models.SOBOL, num_arms=-1)
                    ])
                ],
                num_replications=3,
                num_trials=5,
                raise_all_exceptions=True,
                benchmark_replication=broken_benchmark_replication,
            )
Exemple #5
0
def simple_benchmark_problem_from_json(
        object_json: Dict[str, Any]) -> SimpleBenchmarkProblem:
    """Load a benchmark problem from JSON."""
    uses_synthetic_function = object_json.pop("uses_synthetic_function")
    if uses_synthetic_function:
        function_name = object_json.pop("function_name")
        if function_name.startswith(synthetic_functions.FromBotorch.__name__):
            raise NotImplementedError  # TODO[Lena], pragma: no cover
        else:
            f = getattr(synthetic_functions, function_name)()
    else:
        f = pickle.loads(object_json.pop("f").encode())
    domain = object_from_json(object_json.pop("domain"))
    assert isinstance(domain, list) and all(
        isinstance(x, (tuple, list)) for x in domain)
    return SimpleBenchmarkProblem(
        f=f,
        name=object_json.pop("name"),
        domain=cast(List[Tuple[float, float]], domain),
        minimize=object_json.pop("minimize"),
    )
Exemple #6
0
# LICENSE file in the root directory of this source tree.


from ax.benchmark.benchmark_problem import BenchmarkProblem, SimpleBenchmarkProblem
from ax.utils.measurement.synthetic_functions import from_botorch
from ax.utils.testing.core_stubs import (
    get_augmented_branin_optimization_config,
    get_augmented_hartmann_optimization_config,
    get_branin_search_space,
    get_hartmann_search_space,
)
from botorch.test_functions.synthetic import Ackley, Branin


# Initialize the single-fidelity problems
ackley = SimpleBenchmarkProblem(f=from_botorch(Ackley()), noise_sd=0.0, minimize=True)
branin = SimpleBenchmarkProblem(f=from_botorch(Branin()), noise_sd=0.0, minimize=True)
single_fidelity_problem_group = [ackley, branin]

# Initialize the multi-fidelity problems
augmented_branin = BenchmarkProblem(
    search_space=get_branin_search_space(with_fidelity_parameter=True),
    optimization_config=get_augmented_branin_optimization_config(),
)
augmented_hartmann = BenchmarkProblem(
    search_space=get_hartmann_search_space(with_fidelity_parameter=True),
    optimization_config=get_augmented_hartmann_optimization_config(),
)
multi_fidelity_problem_group = [augmented_branin, augmented_hartmann]

# Gather all of the problems
Exemple #7
0
def _benchmark_replication_Service_API(
    problem: SimpleBenchmarkProblem,
    method: GenerationStrategy,
    num_trials: int,
    experiment_name: str,
    batch_size: int = 1,
    raise_all_exceptions: bool = False,
    benchmark_trial: FunctionType = benchmark_trial,
    verbose_logging: bool = True,
    # Number of trials that need to fail for a replication to be considered failed.
    failed_trials_tolerated: int = 5,
    async_benchmark_options: Optional[AsyncBenchmarkOptions] = None,
) -> Tuple[Experiment, List[Exception]]:
    """Run a benchmark replication via the Service API because the problem was
    set up in a simplified way, without the use of Ax classes like `OptimizationConfig`
    or `SearchSpace`.
    """
    if async_benchmark_options is not None:
        raise NonRetryableBenchmarkingError(
            "`async_benchmark_options` not supported when using the Service API."
        )

    exceptions = []
    if batch_size == 1:
        ax_client = AxClient(generation_strategy=method,
                             verbose_logging=verbose_logging)
    else:  # pragma: no cover, TODO[T53975770]
        assert batch_size > 1, "Batch size of 1 or greater is expected."
        raise NotImplementedError(
            "Batched benchmarking on `SimpleBenchmarkProblem`-s not yet implemented."
        )
    ax_client.create_experiment(
        name=experiment_name,
        parameters=problem.domain_as_ax_client_parameters(),
        minimize=problem.minimize,
        objective_name=problem.name,
    )
    parameter_names = list(ax_client.experiment.search_space.parameters.keys())
    assert num_trials > 0
    for _ in range(num_trials):
        parameterization, idx = ax_client.get_next_trial()
        param_values = np.array(
            [parameterization.get(x) for x in parameter_names])
        try:
            mean, sem = benchmark_trial(parameterization=param_values,
                                        evaluation_function=problem.f)
            # If problem indicates a noise level and is using a synthetic callable,
            # add normal noise to the measurement of the mean.
            if problem.uses_synthetic_function and problem.noise_sd != 0.0:
                noise = np.random.randn() * problem.noise_sd
                sem = (sem or 0.0) + problem.noise_sd
                logger.info(
                    f"Adding noise of {noise} to the measurement mean ({mean})."
                    f"Problem noise SD setting: {problem.noise_sd}.")
                mean = mean + noise
            ax_client.complete_trial(trial_index=idx, raw_data=(mean, sem))
        except Exception as err:  # TODO[T53975770]: test
            if raise_all_exceptions:
                raise
            exceptions.append(err)
        if len(exceptions) > failed_trials_tolerated:
            raise RuntimeError(  # TODO[T53975770]: test
                f"More than {failed_trials_tolerated} failed for {experiment_name}."
            )
    return ax_client.experiment, exceptions
Exemple #8
0
def get_mult_simple_benchmark_problem() -> SimpleBenchmarkProblem:
    return SimpleBenchmarkProblem(
        f=cast(FunctionType, sample_multiplication_fxn),
        name="Sum",
        domain=[(0.0, 1.0), (0.0, 1.0), (0.0, 1.0)],
    )
Exemple #9
0
def get_sum_simple_benchmark_problem() -> SimpleBenchmarkProblem:
    return SimpleBenchmarkProblem(f=sum,
                                  name="Sum",
                                  domain=[(0.0, 1.0), (0.0, 1.0)])
Exemple #10
0
def get_branin_simple_benchmark_problem() -> SimpleBenchmarkProblem:
    return SimpleBenchmarkProblem(f=branin)