Esempio n. 1
0
    def test_status(
        self,
        study,
        study_experiments_config,
        task_number,
        max_trial,
    ):
        """Test to get status of a study"""
        with create_study_experiments(**study_experiments_config) as experiments:

            study.experiments_info = experiments

            assert study.status() == [
                {
                    "algorithm": "random",
                    "assessment": "AverageResult",
                    "task": "RosenBrock",
                    "experiments": task_number,
                    "completed": task_number,
                    "trials": task_number * max_trial,
                },
                {
                    "algorithm": "tpe",
                    "assessment": "AverageResult",
                    "task": "RosenBrock",
                    "experiments": task_number,
                    "completed": task_number,
                    "trials": task_number * max_trial,
                },
            ]
Esempio n. 2
0
    def test_status(
        self,
        benchmark,
        study,
        study_experiments_config,
        task_number,
        max_trial,
    ):
        """Test to get the status of a benchmark"""
        with create_study_experiments(**study_experiments_config) as experiments:

            study.experiments_info = experiments

            benchmark.studies = [study]

            assert benchmark.status() == [
                {
                    "Algorithms": "random",
                    "Assessments": "AverageResult",
                    "Tasks": "RosenBrock",
                    "Total Experiments": task_number,
                    "Completed Experiments": task_number,
                    "Submitted Trials": task_number * max_trial,
                },
                {
                    "Algorithms": "tpe",
                    "Assessments": "AverageResult",
                    "Tasks": "RosenBrock",
                    "Total Experiments": task_number,
                    "Completed Experiments": task_number,
                    "Submitted Trials": task_number * max_trial,
                },
            ]
Esempio n. 3
0
    def test_analysis(self, benchmark, study, study_experiments_config):
        """Test to analysis benchmark result"""
        with create_study_experiments(**study_experiments_config) as experiments:

            study.experiments_info = experiments

            benchmark.studies = [study]

            figures = benchmark.analysis()

            assert len(figures) == 1
            assert type(figures[0]) is plotly.graph_objects.Figure
Esempio n. 4
0
    def test_experiments(self, study, study_experiments_config, task_number):
        """Test to get experiments of a study"""
        algo_num = len(study_experiments_config["algorithms"])
        with create_study_experiments(**study_experiments_config) as experiments:

            study.experiments_info = experiments

            experiments = study.experiments()

            assert (
                len(experiments) == study_experiments_config["task_number"] * algo_num
            )
            assert isinstance(experiments[0], Experiment)
Esempio n. 5
0
    def test_analysis(
        self,
        study,
        study_experiments_config,
    ):
        """Test to get the ploty figure of a study"""
        with create_study_experiments(**study_experiments_config) as experiments:

            study.experiments_info = experiments

            plot = study.analysis()

            assert type(plot) is plotly.graph_objects.Figure
Esempio n. 6
0
    def test_figure_layout(self, study_experiments_config):
        """Test assessment plot format"""
        ar1 = AverageResult()

        with create_study_experiments(**study_experiments_config) as experiments:
            plot = ar1.analysis("task_name", experiments)

            assert_regrets_plot(
                plot,
                [
                    list(algorithm["algorithm"].keys())[0]
                    for algorithm in study_experiments_config["algorithms"]
                ],
                balanced=study_experiments_config["max_trial"],
                with_avg=True,
            )
Esempio n. 7
0
    def test_analysis(
        self,
        study,
        study_experiments_config,
    ):
        """Test to get the ploty figure of a study"""
        with create_study_experiments(**study_experiments_config) as experiments:

            study.experiments_info = experiments

            figure = study.analysis()

            assert (
                type(figure[study.assess_name][study.task_name]["regrets"])
                is plotly.graph_objects.Figure
            )
Esempio n. 8
0
    def test_experiments(
        self,
        benchmark,
        study,
        study_experiments_config,
        max_trial,
    ):
        """Test to get experiments list of a benchmark"""
        with create_study_experiments(
                **study_experiments_config) as experiments:

            study.experiments_info = experiments

            benchmark.studies = [study]

            assert benchmark.experiments() == [
                {
                    "Algorithm": "random",
                    "Experiment Name": "experiment-name-0",
                    "Number Trial": max_trial,
                    "Best Evaluation": 0,
                },
                {
                    "Algorithm": "tpe",
                    "Experiment Name": "experiment-name-1",
                    "Number Trial": max_trial,
                    "Best Evaluation": 0,
                },
                {
                    "Algorithm": "random",
                    "Experiment Name": "experiment-name-2",
                    "Number Trial": max_trial,
                    "Best Evaluation": 0,
                },
                {
                    "Algorithm": "tpe",
                    "Experiment Name": "experiment-name-3",
                    "Number Trial": max_trial,
                    "Best Evaluation": 0,
                },
            ]
Esempio n. 9
0
    def test_analysis(self, study_experiments_config):
        """Test assessment plot format"""
        task_num = 2
        n_workers = [1, 2, 4]
        pa1 = ParallelAssessment(task_num=task_num, n_workers=n_workers)

        study_experiments_config["task_number"] = task_num
        study_experiments_config["n_workers"] = n_workers
        with create_study_experiments(
                **study_experiments_config) as experiments:
            figure = pa1.analysis("task_name", experiments)

            names = []
            algorithms = []
            for algorithm in study_experiments_config["algorithms"]:
                algo = list(algorithm["algorithm"].keys())[0]
                algorithms.append(algo)

                for worker in n_workers:
                    names.append((algo + "_workers_" + str(worker)))

            assert len(figure["ParallelAssessment"]["task_name"]) == 3
            assert_regrets_plot(
                figure["ParallelAssessment"]["task_name"]["regrets"],
                names,
                balanced=study_experiments_config["max_trial"],
                with_avg=True,
            )

            asset_parallel_assessment_plot(
                figure["ParallelAssessment"]["task_name"]
                ["parallel_assessment"],
                algorithms,
                3,
            )

            assert_durations_plot(
                figure["ParallelAssessment"]["task_name"]["durations"], names)