def test_creation_algorithms(self, benchmark): """Test study creation with all support algorithms input format""" algorithms = [ {"algorithm": {"gridsearch": {"n_values": 1}}, "deterministic": True}, {"algorithm": "tpe"}, {"random": {"seed": 1}}, "asha", ] study = Study(benchmark, algorithms, AverageResult(2), RosenBrock(25, dim=3)) assert study.algorithms[0].name == "gridsearch" assert study.algorithms[0].experiment_algorithm == { "gridsearch": {"n_values": 1} } assert study.algorithms[0].is_deterministic assert study.algorithms[1].name == "tpe" assert study.algorithms[1].experiment_algorithm == "tpe" assert not study.algorithms[1].is_deterministic assert study.algorithms[2].name == "random" assert study.algorithms[2].experiment_algorithm == {"random": {"seed": 1}} assert not study.algorithms[2].is_deterministic assert study.algorithms[3].name == "asha" assert study.algorithms[3].experiment_algorithm == "asha" assert not study.algorithms[3].is_deterministic
def test_create_with_invalid_targets(self, benchmark_config_py): """Test creation with invalid Task and Assessment""" with OrionState(): with pytest.raises(AttributeError) as exc: config = copy.deepcopy(benchmark_config_py) config["targets"] = [{ "assess": [AverageResult(2)], "task": [DummyTask] }] get_or_create_benchmark(**config) assert "type object '{}' has no attribute ".format( "DummyTask") in str(exc.value) with pytest.raises(AttributeError) as exc: config = copy.deepcopy(benchmark_config_py) config["targets"] = [{ "assess": [DummyAssess], "task": [RosenBrock(25, dim=3)] }] get_or_create_benchmark(**config) assert "type object '{}' has no attribute ".format( "DummyAssess") in str(exc.value)
def test_create_with_different_configure(self, benchmark_config_py, caplog): """Test creation with same name but different configure""" with OrionState(): config = copy.deepcopy(benchmark_config_py) bm1 = get_or_create_benchmark(**config) config = copy.deepcopy(benchmark_config_py) config["targets"][0]["assess"] = [AverageResult(2)] with caplog.at_level(logging.WARNING, logger="orion.benchmark.benchmark_client"): bm2 = get_or_create_benchmark(**config) assert bm2.configuration == bm1.configuration assert ( "Benchmark with same name is found but has different configuration, " "which will be used for this creation." in caplog.text) caplog.clear() config = copy.deepcopy(benchmark_config_py) config["targets"][0]["task"] = [ RosenBrock(26, dim=3), CarromTable(20) ] with caplog.at_level(logging.WARNING, logger="orion.benchmark.benchmark_client"): bm3 = get_or_create_benchmark(**config) assert bm3.configuration == bm1.configuration assert ( "Benchmark with same name is found but has different configuration, " "which will be used for this creation." in caplog.text)
def test_call(self): """Test to get task function""" rb = RosenBrock(2) assert callable(rb) objectives = rb([1, 2]) assert type(objectives[0]) == dict
def test_creation(self): """Test creation""" branin = RosenBrock(max_trials=2, dim=3) assert branin.max_trials == 2 assert branin.configuration == { "RosenBrock": { "dim": 3, "max_trials": 2 } }
def benchmark_config_py(benchmark_algorithms): config = dict( name="bm00001", algorithms=benchmark_algorithms, targets=[{ "assess": [AverageResult(2), AverageRank(2)], "task": [RosenBrock(25, dim=3), CarromTable(20)], }], ) return config
def benchmark(benchmark_algorithms): """Return a benchmark instance""" return Benchmark( name="benchmark007", algorithms=benchmark_algorithms, targets=[{ "assess": [AverageResult(2), AverageRank(2)], "task": [RosenBrock(25, dim=3), CarromTable(20)], }], )
def test_simple(): """Test a end 2 end exucution of benchmark""" task_num = 2 trial_num = 20 assessments = [AverageResult(task_num), AverageRank(task_num)] tasks = [ RosenBrock(trial_num, dim=3), EggHolder(trial_num, dim=4), CarromTable(trial_num), Branin(trial_num), BirdLike(trial_num), ] benchmark = get_or_create_benchmark( name="bm001", algorithms=algorithms, targets=[{ "assess": assessments, "task": tasks }], ) benchmark.process() assert len(benchmark.studies) == len(assessments) * len(tasks) status = benchmark.status() experiments = benchmark.experiments() assert len(experiments ) == len(algorithms) * task_num * len(assessments) * len(tasks) assert len(status) == len(algorithms) * len(assessments) * len(tasks) figures = benchmark.analysis() assert len(figures) == len(benchmark.studies) assert type(figures[0]) is plotly.graph_objects.Figure benchmark = get_or_create_benchmark(name="bm001") figures = benchmark.analysis() assert len(figures) == len(benchmark.studies) assert type(figures[0]) is plotly.graph_objects.Figure
def study(benchmark, benchmark_algorithms): """Return a study instance""" with benchmark.executor: yield Study( benchmark, benchmark_algorithms, AverageResult(2), RosenBrock(25, dim=3) )
def test_search_space(self): """Test to get task search space""" rb = RosenBrock(2) assert rb.get_search_space() == {"x": "uniform(-5, 10, shape=2)"}
def study(benchmark, benchmark_algorithms): """Return a study instance""" return Study( benchmark, benchmark_algorithms, AverageResult(2), RosenBrock(25, dim=3) )