Exemplo n.º 1
0
    def test_returns_plotly_object(self, monkeypatch):
        """Tests that the plotly backend returns a plotly object"""
        mock_experiment_with_random_to_pandas(monkeypatch)
        with create_experiment(config, trial_config, ["completed"]) as (
                _,
                _,
                experiment,
        ):
            plot = regrets([experiment])

        assert type(plot) is plotly.graph_objects.Figure
Exemplo n.º 2
0
    def test_graph_layout(self, monkeypatch):
        """Tests the layout of the plot"""
        mock_experiment_with_random_to_pandas(monkeypatch)
        with create_experiment(config, trial_config, ["completed"]) as (
                _,
                _,
                experiment,
        ):
            plot = regrets([experiment])

        assert_regrets_plot(plot, [f"{experiment.name}-v{experiment.version}"])
Exemplo n.º 3
0
    def test_dict_of_experiments(self, monkeypatch):
        """Tests the regrets with renamed experiments"""
        mock_experiment_with_random_to_pandas(monkeypatch)
        with create_experiment(config, trial_config, ["completed"]) as (
                _,
                _,
                experiment,
        ):
            plot = regrets({"exp-1": experiment, "exp-2": experiment})

        assert_regrets_plot(plot, ["exp-1", "exp-2"])
Exemplo n.º 4
0
    def test_unbalanced_experiments(self, monkeypatch):
        """Tests the regrets with avg of unbalanced experiments"""
        mock_experiment_with_random_to_pandas(monkeypatch, unbalanced=True)
        with create_experiment(config, trial_config, ["completed"]) as (
            _,
            _,
            experiment,
        ):
            plot = regrets({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10})

        assert_regrets_plot(plot, ["exp-1", "exp-2"], with_avg=True, balanced=0)
Exemplo n.º 5
0
    def test_list_of_experiments(self, monkeypatch):
        """Tests the regrets with list of experiments"""
        mock_experiment_with_random_to_pandas(monkeypatch)
        with create_experiment(config, trial_config, ["completed"]) as (
                _,
                _,
                experiment,
        ):
            child = orion.client.create_experiment(
                experiment.name, branching={"branch_to": "child"})

            plot = regrets([experiment, child])

        # Exps are sorted alphabetically by names.
        assert_regrets_plot(
            plot,
            [f"{exp.name}-v{exp.version}" for exp in [child, experiment]])
Exemplo n.º 6
0
    def analysis(self, task, experiments):
        """
        Generate a `plotly.graph_objects.Figure` to display average performance
        for each search algorithm.

        task: str
            Name of the task
        experiments: list
            A list of (task_index, experiment), where task_index is the index of task to run for
            this assessment, and experiment is an instance of `orion.core.worker.experiment`.
        """
        algorithm_groups = defaultdict(list)

        for _, exp in experiments:
            algorithm_name = list(exp.configuration["algorithms"].keys())[0]
            algorithm_groups[algorithm_name].append(exp)

        return regrets(algorithm_groups)
Exemplo n.º 7
0
    def test_ignore_uncompleted_statuses(self, monkeypatch):
        """Tests that uncompleted statuses are ignored"""
        mock_experiment_with_random_to_pandas(
            monkeypatch,
            status=[
                "completed",
                "new",
                "reserved",
                "completed",
                "broken",
                "completed",
                "interrupted",
                "completed",
            ],
        )
        with create_experiment(config, trial_config) as (_, _, experiment):
            plot = regrets([experiment])

        assert_regrets_plot(plot, [f"{experiment.name}-v{experiment.version}"],
                            balanced=4)
Exemplo n.º 8
0
    def test_list_of_experiments_name_conflict(self, monkeypatch):
        """Tests the regrets with list of experiments with the same name"""
        mock_experiment_with_random_to_pandas(monkeypatch)
        with create_experiment(config, trial_config, ["completed"]) as (
            _,
            _,
            experiment,
        ):
            child = orion.client.create_experiment(
                experiment.name,
                branching={"branch_to": experiment.name, "enable": True},
            )
            assert child.name == experiment.name
            assert child.version == experiment.version + 1
            plot = regrets([experiment, child])

        # Exps are sorted alphabetically by names.
        assert_regrets_plot(
            plot, [f"{exp.name}-v{exp.version}" for exp in [experiment, child]]
        )
Exemplo n.º 9
0
    def analysis(self, task, experiments):
        """
        Generate a `plotly.graph_objects.Figure` to display average performance
        for each search algorithm.

        task: str
            Name of the task
        experiments: list
            A list of (task_index, experiment), where task_index is the index of task to run for
            this assessment, and experiment is an instance of `orion.core.worker.experiment`.
        """

        algorithm_groups = defaultdict(list)
        algorithm_worker_groups = defaultdict(list)
        for task_index, exp in experiments:
            algorithm_name = list(exp.configuration["algorithms"].keys())[0]
            algorithm_groups[algorithm_name].append(exp)

            n_worker = self.workers[task_index]
            algo_key = algorithm_name + "_workers_" + str(n_worker)
            algorithm_worker_groups[algo_key].append(exp)

        assessment = self.__class__.__name__

        figure = defaultdict(dict)
        figure[assessment][task] = dict()

        figure[assessment][task][
            parallel_assessment.__name__] = parallel_assessment(
                algorithm_groups)
        figure[assessment][task][durations.__name__] = durations(
            algorithm_worker_groups)
        figure[assessment][task][regrets.__name__] = regrets(
            algorithm_worker_groups)

        return figure
Exemplo n.º 10
0
 def test_requires_argument(self):
     """Tests that the experiment data are required."""
     with pytest.raises(ValueError):
         regrets(None)
Exemplo n.º 11
0
 def test_unsupported_order_key(self):
     """Tests that unsupported order keys are rejected"""
     with create_experiment(config, trial_config) as (_, _, experiment):
         with pytest.raises(ValueError):
             regrets([experiment], order_by="unsupported")