Ejemplo n.º 1
0
    def test_search_space(self):
        """Test to get task search space"""
        branin = Branin(2)

        assert branin.get_search_space() == {
            "x": "uniform(0, 1, shape=2, precision=10)"
        }
Ejemplo n.º 2
0
    def test_call(self):
        """Test to get task function"""
        branin = Branin(2)

        assert callable(branin)

        objectives = branin([1, 2])
        assert type(objectives[0]) == dict
Ejemplo n.º 3
0
    def test_call(self):
        """Test to get task function"""
        task = Branin(2)

        assert callable(task)

        objectives = task([1, 2])
        assert type(objectives[0]) == dict
Ejemplo n.º 4
0
def test_simple():
    """Test a end 2 end exucution of benchmark"""
    task_num = 2
    trial_num = 20
    assessments = [AverageResult(task_num), AverageRank(task_num)]
    tasks = [
        RosenBrock(trial_num, dim=3),
        EggHolder(trial_num, dim=4),
        CarromTable(trial_num),
        Branin(trial_num),
        BirdLike(trial_num),
    ]
    benchmark = get_or_create_benchmark(
        name="bm001",
        algorithms=algorithms,
        targets=[{
            "assess": assessments,
            "task": tasks
        }],
    )
    benchmark.process()

    assert len(benchmark.studies) == len(assessments) * len(tasks)

    status = benchmark.status()

    experiments = benchmark.experiments()

    assert len(experiments
               ) == len(algorithms) * task_num * len(assessments) * len(tasks)

    assert len(status) == len(algorithms) * len(assessments) * len(tasks)

    figures = benchmark.analysis()

    assert len(figures) == len(benchmark.studies)
    assert type(figures[0]) is plotly.graph_objects.Figure

    benchmark = get_or_create_benchmark(name="bm001")
    figures = benchmark.analysis()

    assert len(figures) == len(benchmark.studies)
    assert type(figures[0]) is plotly.graph_objects.Figure
Ejemplo n.º 5
0
def test_simple():
    """Test a end 2 end exucution of benchmark"""
    task_num = 2
    max_trials = 10
    assessments = [AverageResult(task_num), AverageRank(task_num)]
    tasks = [
        Branin(max_trials),
        BirdLike(max_trials),
    ]

    benchmark = get_or_create_benchmark(
        name="bm001",
        algorithms=algorithms,
        targets=[{
            "assess": assessments,
            "task": tasks
        }],
    )

    benchmark.process()

    assert len(benchmark.studies) == len(assessments) * len(tasks)

    status = benchmark.status()

    experiments = benchmark.experiments()

    assert len(experiments
               ) == len(algorithms) * task_num * len(assessments) * len(tasks)

    assert len(status) == len(algorithms) * len(assessments) * len(tasks)

    figures = benchmark.analysis()

    assert_benchmark_figures(figures, 4, assessments, tasks)

    benchmark = get_or_create_benchmark(name="bm001")
    figures = benchmark.analysis()

    assert_benchmark_figures(figures, 4, assessments, tasks)
    benchmark.close()
Ejemplo n.º 6
0
 def test_creation(self):
     """Test creation"""
     branin = Branin(2)
     assert branin.max_trials == 2
     assert branin.configuration == {"Branin": {"max_trials": 2}}