Esempio n. 1
0
    def test_dict_of_experiments(self, monkeypatch):
        """Tests the regrets with renamed experiments"""
        mock_experiment_with_random_to_pandas(monkeypatch)
        with create_experiment(config, trial_config, ["completed"]) as (
                _,
                _,
                experiment,
        ):
            plot = regrets({"exp-1": experiment, "exp-2": experiment})

        assert_regrets_plot(plot, ["exp-1", "exp-2"])
Esempio n. 2
0
    def test_graph_layout(self, monkeypatch):
        """Tests the layout of the plot"""
        mock_experiment_with_random_to_pandas(monkeypatch)
        with create_experiment(config, trial_config, ["completed"]) as (
                _,
                _,
                experiment,
        ):
            plot = regrets([experiment])

        assert_regrets_plot(plot, [f"{experiment.name}-v{experiment.version}"])
Esempio n. 3
0
    def test_unbalanced_experiments(self, monkeypatch):
        """Tests the regrets with avg of unbalanced experiments"""
        mock_experiment_with_random_to_pandas(monkeypatch, unbalanced=True)
        with create_experiment(config, trial_config, ["completed"]) as (
            _,
            _,
            experiment,
        ):
            plot = regrets({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10})

        assert_regrets_plot(plot, ["exp-1", "exp-2"], with_avg=True, balanced=0)
Esempio n. 4
0
    def test_figure_layout(self, study_experiments_config):
        """Test assessment plot format"""
        ar1 = AverageResult()

        with create_study_experiments(**study_experiments_config) as experiments:
            plot = ar1.analysis("task_name", experiments)

            assert_regrets_plot(
                plot,
                [
                    list(algorithm["algorithm"].keys())[0]
                    for algorithm in study_experiments_config["algorithms"]
                ],
                balanced=study_experiments_config["max_trial"],
                with_avg=True,
            )
Esempio n. 5
0
    def test_list_of_experiments(self, monkeypatch):
        """Tests the regrets with list of experiments"""
        mock_experiment_with_random_to_pandas(monkeypatch)
        with create_experiment(config, trial_config, ["completed"]) as (
                _,
                _,
                experiment,
        ):
            child = orion.client.create_experiment(
                experiment.name, branching={"branch_to": "child"})

            plot = regrets([experiment, child])

        # Exps are sorted alphabetically by names.
        assert_regrets_plot(
            plot,
            [f"{exp.name}-v{exp.version}" for exp in [child, experiment]])
Esempio n. 6
0
    def test_ignore_uncompleted_statuses(self, monkeypatch):
        """Tests that uncompleted statuses are ignored"""
        mock_experiment_with_random_to_pandas(
            monkeypatch,
            status=[
                "completed",
                "new",
                "reserved",
                "completed",
                "broken",
                "completed",
                "interrupted",
                "completed",
            ],
        )
        with create_experiment(config, trial_config) as (_, _, experiment):
            plot = regrets([experiment])

        assert_regrets_plot(plot, [f"{experiment.name}-v{experiment.version}"],
                            balanced=4)
Esempio n. 7
0
    def test_list_of_experiments_name_conflict(self, monkeypatch):
        """Tests the regrets with list of experiments with the same name"""
        mock_experiment_with_random_to_pandas(monkeypatch)
        with create_experiment(config, trial_config, ["completed"]) as (
            _,
            _,
            experiment,
        ):
            child = orion.client.create_experiment(
                experiment.name,
                branching={"branch_to": experiment.name, "enable": True},
            )
            assert child.name == experiment.name
            assert child.version == experiment.version + 1
            plot = regrets([experiment, child])

        # Exps are sorted alphabetically by names.
        assert_regrets_plot(
            plot, [f"{exp.name}-v{exp.version}" for exp in [experiment, child]]
        )
Esempio n. 8
0
    def test_analysis(self, study_experiments_config):
        """Test assessment plot format"""
        task_num = 2
        n_workers = [1, 2, 4]
        pa1 = ParallelAssessment(task_num=task_num, n_workers=n_workers)

        study_experiments_config["task_number"] = task_num
        study_experiments_config["n_workers"] = n_workers
        with create_study_experiments(
                **study_experiments_config) as experiments:
            figure = pa1.analysis("task_name", experiments)

            names = []
            algorithms = []
            for algorithm in study_experiments_config["algorithms"]:
                algo = list(algorithm["algorithm"].keys())[0]
                algorithms.append(algo)

                for worker in n_workers:
                    names.append((algo + "_workers_" + str(worker)))

            assert len(figure["ParallelAssessment"]["task_name"]) == 3
            assert_regrets_plot(
                figure["ParallelAssessment"]["task_name"]["regrets"],
                names,
                balanced=study_experiments_config["max_trial"],
                with_avg=True,
            )

            asset_parallel_assessment_plot(
                figure["ParallelAssessment"]["task_name"]
                ["parallel_assessment"],
                algorithms,
                3,
            )

            assert_durations_plot(
                figure["ParallelAssessment"]["task_name"]["durations"], names)