Esempio n. 1
0
    def test_raise_all_exceptions(self):
        """Checks that an exception nested in the benchmarking stack is raised
        when `raise_all_exceptions` is True.
        """
        def broken_benchmark_replication(*args, **kwargs) -> Experiment:
            raise ValueError("Oh, exception!")

        with self.assertRaisesRegex(ValueError, "Oh, exception!"):
            full_benchmark_run(
                problem_groups={
                    self.CATEGORY_NAME:
                    [SimpleBenchmarkProblem(branin, noise_sd=0.4)]
                },
                method_groups={
                    self.CATEGORY_NAME: [
                        GenerationStrategy(steps=[
                            GenerationStep(model=Models.SOBOL, num_trials=-1)
                        ])
                    ]
                },
                num_replications=3,
                num_trials=5,
                raise_all_exceptions=True,
                benchmark_replication=broken_benchmark_replication,
            )
Esempio n. 2
0
 def test_basic(self):
     """Run through the benchmarking loop."""
     results = full_benchmark_run(
         problem_groups={
             self.CATEGORY_NAME: [
                 SimpleBenchmarkProblem(branin, noise_sd=0.4),
                 BenchmarkProblem(
                     name="Branin",
                     search_space=get_branin_search_space(),
                     optimization_config=get_branin_optimization_config(),
                 ),
                 BenchmarkProblem(
                     search_space=get_branin_search_space(),
                     optimization_config=get_optimization_config(),
                 ),
             ]
         },
         method_groups={
             self.CATEGORY_NAME: [
                 GenerationStrategy(steps=[
                     GenerationStep(model=Models.SOBOL, num_trials=-1)
                 ])
             ]
         },
         num_replications=3,
         num_trials=5,
         # Just to have it be more telling if something is broken
         raise_all_exceptions=True,
         batch_size=[[1], [3], [1]],
     )
     self.assertEqual(len(results["Branin"]["Sobol"]), 3)
Esempio n. 3
0
def run_branin_and_gramacy_100_benchmarks(rep):
    strategy0 = GenerationStrategy(
        name="Sobol",
        steps=[
            GenerationStep(model=Models.SOBOL,
                           num_arms=-1,
                           model_kwargs={'seed': rep + 1})
        ],
    )
    strategy1 = ALEBOStrategy(D=100, d=4, init_size=10)
    strategy2 = REMBOStrategy(D=100, d=2, init_per_proj=2)
    strategy3 = HeSBOStrategy(D=100,
                              d=4,
                              init_per_proj=10,
                              name=f"HeSBO, d=2d")

    all_benchmarks = full_benchmark_run(
        num_replications=1,
        num_trials=50,
        batch_size=1,
        methods=[strategy0, strategy1, strategy2, strategy3],
        problems=[branin_100, gramacy_100],
    )

    with open(
            f'results/branin_gramacy_100_alebo_rembo_hesbo_sobol_rep_{rep}.json',
            "w") as fout:
        json.dump(object_to_json(all_benchmarks), fout)
Esempio n. 4
0
def run_hartmann6_benchmarks(D, rep, random_subspace=False):
    if D == 100:
        problem = hartmann6_100
    elif D == 1000 and not random_subspace:
        problem = hartmann6_1000
    elif D == 1000 and random_subspace:
        problem = hartmann6_random_subspace_1000

    strategy0 = GenerationStrategy(
        name="Sobol",
        steps=[
            GenerationStep(model=Models.SOBOL,
                           num_arms=-1,
                           model_kwargs={'seed': rep + 1})
        ],
    )
    strategy1 = ALEBOStrategy(D=D, d=12, init_size=10)
    strategy2 = REMBOStrategy(D=D, d=6, init_per_proj=2)
    strategy3 = HeSBOStrategy(D=D, d=6, init_per_proj=10, name=f"HeSBO, d=d")
    strategy4 = HeSBOStrategy(D=D, d=12, init_per_proj=10, name=f"HeSBO, d=2d")

    all_benchmarks = full_benchmark_run(
        num_replications=1,  # Running them 1 at a time for distributed
        num_trials=200,
        batch_size=1,
        methods=[strategy0, strategy1, strategy2, strategy3, strategy4],
        problems=[problem],
    )

    rs_str = 'random_subspace_' if random_subspace else ''
    with open(
            f'results/hartmann6_{rs_str}{D}_alebo_rembo_hesbo_sobol_rep_{rep}.json',
            "w") as fout:
        json.dump(object_to_json(all_benchmarks), fout)
Esempio n. 5
0
def run_sensitivity_d_e_benchmarks(rep):
    strategies = [
        ALEBOStrategy(D=100, d=d_e, init_size=10, name=f'ALEBO, d={d_e}')
        for d_e in [2, 3, 5, 6, 7, 8]
    ]

    all_benchmarks = full_benchmark_run(
        num_replications=1,
        num_trials=50,
        batch_size=1,
        methods=strategies,
        problems=[branin_100],
    )

    with open(f'results/sensitivity_d_e_rep_{rep}.json', "w") as fout:
        json.dump(object_to_json(all_benchmarks), fout)
Esempio n. 6
0
def run_sensitivity_D_benchmarks(rep):
    results_dict = {}
    for D, problem in branin_by_D.items():
        strategy1 = ALEBOStrategy(D=D, d=4, init_size=10)

        all_benchmarks = full_benchmark_run(
            num_replications=1,
            num_trials=50,
            batch_size=1,
            methods=[strategy1],
            problems=[problem],
        )

        results_dict[D] = object_to_json(all_benchmarks)

    with open(f'results/sensitivity_D_rep_{rep}.json', "w") as fout:
        json.dump(results_dict, fout)
Esempio n. 7
0
def run_ablation_benchmarks(rep):
    strategies = [
        ALEBOStrategy_projection_ablation(D=100,
                                          d=4,
                                          init_size=10,
                                          name='ALEBO, projection ablation'),
        ALEBOStrategy_kernel_ablation(D=100,
                                      d=4,
                                      init_size=10,
                                      name='ALEBO, kernel ablation'),
        ALEBOStrategy(D=100, d=4, init_size=10, name='ALEBO, base'),
    ]

    all_benchmarks = full_benchmark_run(
        num_replications=1,
        num_trials=50,
        batch_size=1,
        methods=strategies,
        problems=[branin_100],
    )

    with open(f'results/ablation_rep_{rep}.json', "w") as fout:
        json.dump(object_to_json(all_benchmarks), fout)