def test_dict_of_experiments(self, monkeypatch): """Tests the rankings with renamed experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings({"exp-1": experiment, "exp-2": experiment}) assert_rankings_plot(plot, ["exp-1", "exp-2"])
def test_unbalanced_experiments(self, monkeypatch): """Tests the regrets with avg of unbalanced experiments""" mock_experiment_with_random_to_pandas(monkeypatch, unbalanced=True) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10}) assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True, balanced=0)
def test_dict_of_list_of_experiments(self, monkeypatch): """Tests the rankings with avg of experiments separated in lists""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10}) assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True)
def test_graph_layout(self, monkeypatch): """Tests the layout of the plot""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings([experiment]) assert_rankings_plot(plot, [f"{experiment.name}-v{experiment.version}"])
def test_list_of_dict_of_experiments(self, monkeypatch): """Tests the rankings with avg of competitions""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings( [{"exp-1": experiment, "exp-2": experiment} for _ in range(10)] ) assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True)
def test_figure_layout(self, study_experiments_config): """Test assessment plot format""" ar1 = AverageRank() with create_study_experiments(**study_experiments_config) as experiments: plot = ar1.analysis("task_name", experiments) assert_rankings_plot( plot, [ list(algorithm["algorithm"].keys())[0] for algorithm in study_experiments_config["algorithms"] ], balanced=study_experiments_config["max_trial"], with_avg=True, )
def test_list_of_experiments(self, monkeypatch): """Tests the rankings with list of experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): child = orion.client.create_experiment( experiment.name, branching={"branch_to": "child"}) plot = rankings([experiment, child]) # Exps are sorted alphabetically by names. assert_rankings_plot( plot, [f"{exp.name}-v{exp.version}" for exp in [child, experiment]])
def test_list_of_experiments_name_conflict(self, monkeypatch): """Tests the rankings with list of experiments with the same name""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): child = orion.client.create_experiment( experiment.name, branching={"branch_to": experiment.name, "enable": True}, ) assert child.name == experiment.name assert child.version == experiment.version + 1 plot = rankings([experiment, child]) # Exps are sorted alphabetically by names. assert_rankings_plot( plot, [f"{exp.name}-v{exp.version}" for exp in [experiment, child]] )
def test_ignore_uncompleted_statuses(self, monkeypatch): """Tests that uncompleted statuses are ignored""" mock_experiment_with_random_to_pandas( monkeypatch, status=[ "completed", "new", "reserved", "completed", "broken", "completed", "interrupted", "completed", ], ) with create_experiment(config, trial_config) as (_, _, experiment): plot = rankings([experiment]) assert_rankings_plot(plot, [f"{experiment.name}-v{experiment.version}"], balanced=4)