def test_list_of_experiments(self, monkeypatch): """Tests the parallel_assessment with list of experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): child = orion.client.create_experiment( experiment.name, branching={"branch_to": "child", "enable": True} ) plot = parallel_assessment({"random": [experiment, child]}) asset_parallel_assessment_plot(plot, ["random"], 1) mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = parallel_assessment( {"exp-1": [experiment] * 10, "exp-2": [experiment] * 10} ) asset_parallel_assessment_plot(plot, ["exp-1", "exp-2"], 1)
def test_dict_of_experiments(self, monkeypatch): """Tests the parallel_assessment with renamed experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = parallel_assessment({"exp-1": experiment, "exp-2": experiment}) asset_parallel_assessment_plot(plot, ["exp-1", "exp-2"], 1)
def test_graph_layout(self, monkeypatch): """Tests the layout of the plot""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = parallel_assessment({"random": [experiment] * 2}) asset_parallel_assessment_plot(plot, [f"random"], 1)
def test_returns_plotly_object(self, monkeypatch): """Tests that the plotly backend returns a plotly object""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = parallel_assessment({"random": [experiment]}) assert type(plot) is plotly.graph_objects.Figure
def test_dict_of_list_of_experiments(self, monkeypatch): """Tests the regrparallel_assessmentets with avg of experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = parallel_assessment( {"exp-1": [experiment] * 10, "exp-2": [experiment] * 10} ) asset_parallel_assessment_plot(plot, ["exp-1", "exp-2"], 1)
def test_list_of_experiments_name_conflict(self, monkeypatch): """Tests the parallel_assessment with list of experiments with the same name""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): child = orion.client.create_experiment( experiment.name, branching={"branch_to": experiment.name, "enable": True}, ) assert child.name == experiment.name assert child.version == experiment.version + 1 plot = parallel_assessment({"random": [experiment, child]}) asset_parallel_assessment_plot(plot, ["random"], 1)
def test_ignore_uncompleted_statuses(self, monkeypatch): """Tests that uncompleted statuses are ignored""" mock_experiment_with_random_to_pandas( monkeypatch, status=[ "completed", "new", "reserved", "completed", "broken", "completed", "interrupted", "completed", ], ) with create_experiment(config, trial_config) as (_, _, experiment): plot = parallel_assessment({"random": [experiment]}) asset_parallel_assessment_plot(plot, ["random"], 1)
def analysis(self, task, experiments): """ Generate a `plotly.graph_objects.Figure` to display average performance for each search algorithm. task: str Name of the task experiments: list A list of (task_index, experiment), where task_index is the index of task to run for this assessment, and experiment is an instance of `orion.core.worker.experiment`. """ algorithm_groups = defaultdict(list) algorithm_worker_groups = defaultdict(list) for task_index, exp in experiments: algorithm_name = list(exp.configuration["algorithms"].keys())[0] algorithm_groups[algorithm_name].append(exp) n_worker = self.workers[task_index] algo_key = algorithm_name + "_workers_" + str(n_worker) algorithm_worker_groups[algo_key].append(exp) assessment = self.__class__.__name__ figure = defaultdict(dict) figure[assessment][task] = dict() figure[assessment][task][ parallel_assessment.__name__] = parallel_assessment( algorithm_groups) figure[assessment][task][durations.__name__] = durations( algorithm_worker_groups) figure[assessment][task][regrets.__name__] = regrets( algorithm_worker_groups) return figure
def test_requires_argument(self): """Tests that the experiment data are required.""" with pytest.raises(ValueError): parallel_assessment(None)