def test_dict_of_list_of_experiments(self, monkeypatch): """Tests the regrets with avg of experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = durations({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10}) assert_durations_plot(plot, ["exp-1", "exp-2"])
def test_dict_of_experiments(self, monkeypatch): """Tests the durations with renamed experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = durations({"exp-1": experiment, "exp-2": experiment}) assert_durations_plot(plot, ["exp-1", "exp-2"])
def test_graph_layout(self, monkeypatch): """Tests the layout of the plot""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = durations([experiment]) assert_durations_plot(plot, [f"{experiment.name}-v{experiment.version}"])
def test_returns_plotly_object(self, monkeypatch): """Tests that the plotly backend returns a plotly object""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = durations([experiment]) assert type(plot) is plotly.graph_objects.Figure
def test_list_of_experiments(self, monkeypatch): """Tests the regrets with list of experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): child = orion.client.create_experiment( experiment.name, branching={"branch_to": "child", "enable": True} ) plot = durations([experiment, child]) # Exps are sorted alphabetically by names. assert_durations_plot( plot, [f"{exp.name}-v{exp.version}" for exp in [child, experiment]] )
def test_ignore_uncompleted_statuses(self, monkeypatch): """Tests that uncompleted statuses are ignored""" mock_experiment_with_random_to_pandas( monkeypatch, status=[ "completed", "new", "reserved", "completed", "broken", "completed", "interrupted", "completed", ], ) with create_experiment(config, trial_config) as (_, _, experiment): plot = durations([experiment]) assert_durations_plot(plot, [f"{experiment.name}-v{experiment.version}"])
def analysis(self, task, experiments): """ Generate a `plotly.graph_objects.Figure` to display average performance for each search algorithm. task: str Name of the task experiments: list A list of (task_index, experiment), where task_index is the index of task to run for this assessment, and experiment is an instance of `orion.core.worker.experiment`. """ algorithm_groups = defaultdict(list) algorithm_worker_groups = defaultdict(list) for task_index, exp in experiments: algorithm_name = list(exp.configuration["algorithms"].keys())[0] algorithm_groups[algorithm_name].append(exp) n_worker = self.workers[task_index] algo_key = algorithm_name + "_workers_" + str(n_worker) algorithm_worker_groups[algo_key].append(exp) assessment = self.__class__.__name__ figure = defaultdict(dict) figure[assessment][task] = dict() figure[assessment][task][ parallel_assessment.__name__] = parallel_assessment( algorithm_groups) figure[assessment][task][durations.__name__] = durations( algorithm_worker_groups) figure[assessment][task][regrets.__name__] = regrets( algorithm_worker_groups) return figure
def test_requires_argument(self): """Tests that the experiment data are required.""" with pytest.raises(ValueError): durations(None)