Esempio n. 1
0
    def test_creation(self):
        """Test creation"""
        ar1 = AverageRank()
        assert ar1.task_num == 1
        assert ar1.configuration == {"AverageRank": {"task_num": 1}}

        ar2 = AverageRank(task_num=5)
        assert ar2.task_num == 5
        assert ar2.configuration == {"AverageRank": {"task_num": 5}}
Esempio n. 2
0
    def test_analysis(self, experiment_config, trial_config):
        """Test assessment plot"""
        ar1 = AverageRank()

        with create_experiment(experiment_config, trial_config, ["completed"]) as (
            _,
            experiment,
            _,
        ):
            plot = ar1.analysis("task_name", [(0, experiment)])

        assert type(plot) is plotly.graph_objects.Figure
Esempio n. 3
0
    def test_figure_layout(self, study_experiments_config):
        """Test assessment plot format"""
        ar1 = AverageRank()

        with create_study_experiments(**study_experiments_config) as experiments:
            plot = ar1.analysis("task_name", experiments)

            assert_rankings_plot(
                plot,
                [
                    list(algorithm["algorithm"].keys())[0]
                    for algorithm in study_experiments_config["algorithms"]
                ],
                balanced=study_experiments_config["max_trial"],
                with_avg=True,
            )
Esempio n. 4
0
def benchmark_config_py(benchmark_algorithms):
    config = dict(
        name="bm00001",
        algorithms=benchmark_algorithms,
        targets=[{
            "assess": [AverageResult(2), AverageRank(2)],
            "task": [RosenBrock(25, dim=3),
                     CarromTable(20)],
        }],
    )
    return config
Esempio n. 5
0
def benchmark(benchmark_algorithms):
    """Return a benchmark instance"""
    return Benchmark(
        name="benchmark007",
        algorithms=benchmark_algorithms,
        targets=[{
            "assess": [AverageResult(2), AverageRank(2)],
            "task": [RosenBrock(25, dim=3),
                     CarromTable(20)],
        }],
    )
Esempio n. 6
0
def test_simple():
    """Test a end 2 end exucution of benchmark"""
    task_num = 2
    trial_num = 20
    assessments = [AverageResult(task_num), AverageRank(task_num)]
    tasks = [
        RosenBrock(trial_num, dim=3),
        EggHolder(trial_num, dim=4),
        CarromTable(trial_num),
        Branin(trial_num),
        BirdLike(trial_num),
    ]
    benchmark = get_or_create_benchmark(
        name="bm001",
        algorithms=algorithms,
        targets=[{
            "assess": assessments,
            "task": tasks
        }],
    )
    benchmark.process()

    assert len(benchmark.studies) == len(assessments) * len(tasks)

    status = benchmark.status()

    experiments = benchmark.experiments()

    assert len(experiments
               ) == len(algorithms) * task_num * len(assessments) * len(tasks)

    assert len(status) == len(algorithms) * len(assessments) * len(tasks)

    figures = benchmark.analysis()

    assert len(figures) == len(benchmark.studies)
    assert type(figures[0]) is plotly.graph_objects.Figure

    benchmark = get_or_create_benchmark(name="bm001")
    figures = benchmark.analysis()

    assert len(figures) == len(benchmark.studies)
    assert type(figures[0]) is plotly.graph_objects.Figure
Esempio n. 7
0
def test_simple():
    """Test a end 2 end exucution of benchmark"""
    task_num = 2
    max_trials = 10
    assessments = [AverageResult(task_num), AverageRank(task_num)]
    tasks = [
        Branin(max_trials),
        BirdLike(max_trials),
    ]

    benchmark = get_or_create_benchmark(
        name="bm001",
        algorithms=algorithms,
        targets=[{
            "assess": assessments,
            "task": tasks
        }],
    )

    benchmark.process()

    assert len(benchmark.studies) == len(assessments) * len(tasks)

    status = benchmark.status()

    experiments = benchmark.experiments()

    assert len(experiments
               ) == len(algorithms) * task_num * len(assessments) * len(tasks)

    assert len(status) == len(algorithms) * len(assessments) * len(tasks)

    figures = benchmark.analysis()

    assert_benchmark_figures(figures, 4, assessments, tasks)

    benchmark = get_or_create_benchmark(name="bm001")
    figures = benchmark.analysis()

    assert_benchmark_figures(figures, 4, assessments, tasks)
    benchmark.close()