def test_broken_trial(self): """Test that broken trials are detected""" with create_experiment(config, base_trial) as (cfg, experiment, client): with pytest.raises(RuntimeError): with client.suggest() as trial: assert trial.status == "reserved" raise RuntimeError("Dummy failure!") assert client._pacemakers == {} assert client.get_trial(trial).status == "broken"
def test_atexit_with_multiple_clients_unregister(self, monkeypatch): """Test that each client has a separate atexit function that can be unregistered""" config1 = copy.deepcopy(config) config2 = copy.deepcopy(config) config2["name"] = "cloned" with create_experiment(exp_config=config1, trial_config=base_trial) as ( _, _, client1, ): def please_dont_call_me(client): raise RuntimeError("Please don't call me!!!") monkeypatch.setattr( "orion.client.experiment.set_broken_trials", please_dont_call_me ) with create_experiment(exp_config=config2, trial_config=base_trial) as ( _, _, client2, ): trial1 = client1.suggest() trial2 = client2.suggest() # The registered function in atexit is called as expected with pytest.raises(RuntimeError) as exc: atexit._run_exitfuncs() assert "Please don't call me!!!" == str(exc.value) # Unregister the function client2.release(trial2) client2.close() # It should not be called atexit._run_exitfuncs() assert client1._pacemakers == {} assert client2._pacemakers == {} assert client1.get_trial(trial1).status == "broken" assert client2.get_trial(trial2).status == "interrupted"
def test_insert_params_bad_results(self): """Test that results with from format cannot be saved (trial is registered anyhow)""" with create_experiment(config, base_trial) as (cfg, experiment, client): with pytest.raises(ValueError) as exc: client.insert( dict(x=100), [dict(name="objective", type="bad bad bad", value=0)] ) assert "Given type, bad bad bad, not one of: " in str(exc.value) assert client._pacemakers == {}
def test_reserve(self): """Test reservation of registered trials""" with create_experiment(config, base_trial) as (cfg, experiment, client): trial = experiment.get_trial(uid=cfg.trials[1]["_id"]) assert trial.status != "reserved" client.reserve(trial) assert trial.status == "reserved" assert experiment.get_trial(trial).status == "reserved" assert client._pacemakers[trial.id].is_alive() client._pacemakers.pop(trial.id).stop()
def test_insert_params_fails_not_reserved(self, monkeypatch): """Test that failed insertion because of duplicated trials will not reserve the original trial """ mock_space_iterate(monkeypatch) with create_experiment(config, base_trial) as (cfg, experiment, client): with pytest.raises(DuplicateKeyError): client.insert(dict(x=1), reserve=True) assert client._pacemakers == {}
def test_insert_params_wo_results(self): """Test insertion without results without reservation""" with create_experiment(config, base_trial) as (cfg, experiment, client): trial = client.insert(dict(x=100)) assert trial.status == "interrupted" assert trial.params["x"] == 100 assert trial.id in set(trial.id for trial in experiment.fetch_trials()) compare_without_heartbeat(trial, client.get_trial(uid=trial.id)) assert client._pacemakers == {}
def test_insert_bad_params(self): """Test that bad params cannot be registered in storage""" with create_experiment(config, base_trial) as (cfg, experiment, client): with pytest.raises(ValueError) as exc: client.insert(dict(x="bad bad bad")) assert "Parameters values {'x': 'bad bad bad'} are outside of space" in str( exc.value ) assert client._pacemakers == {}
def test_interrupted_trial(self): """Test that interrupted trials are not set to broken""" with create_experiment(config, base_trial) as (cfg, experiment, client): with pytest.raises(KeyboardInterrupt): with client.suggest() as trial: assert trial.status == "reserved" raise KeyboardInterrupt assert client._pacemakers == {} assert client.get_trial(trial).status == "interrupted"
def test_experiment_worker_as_parameter(self): """Tests that ``Experiment`` is a valid parameter""" with create_experiment(config, trial_config, ["completed"]) as ( _, experiment, _, ): plot = parallel_coordinates(experiment) assert_parallel_coordinates_plot(plot, order=["x", "loss"])
def test_call_nonexistent_kind(): """Tests that specifying a non existent kind will fail""" with create_experiment(config, trial_config, ["completed"]) as (_, _, experiment): pa = PlotAccessor(experiment) with pytest.raises(ValueError) as exception: pa(kind="nonexistent") assert "Plot of kind 'nonexistent' is not one of" in str( exception.value)
def test_graph_layout(self): """Tests the layout of the plot""" with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = parallel_coordinates(experiment) assert_parallel_coordinates_plot(plot, order=["x", "loss"])
def test_returns_plotly_object(self): """Tests that the plotly backend returns a plotly object""" with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = lpi(experiment, model_kwargs=dict(random_state=1)) assert type(plot) is plotly.graph_objects.Figure
def test_returns_plotly_object(self): """Tests that the plotly backend returns a plotly object""" with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = parallel_coordinates(experiment) assert type(plot) is plotly.graph_objects.Figure
def test_categorical_multidim(self): """Tests that multidim categorical is supported""" categorical_config = copy.deepcopy(config) categorical_config["space"]["z"] = 'choices(["a", "b", "c"], shape=3)' with create_experiment(categorical_config, trial_config) as (_, _, experiment): plot = parallel_coordinates(experiment) assert_parallel_coordinates_plot( plot, order=["x", "z[0]", "z[1]", "z[2]", "loss"])
def test_graph_layout(self): """Tests the layout of the plot""" with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = regret(experiment) assert_regret_plot(plot)
def test_release_invalid_status(self): """Test releasing with a specific status""" with create_experiment(config, base_trial) as (cfg, experiment, client): trial = experiment.get_trial(uid=cfg.trials[1]["_id"]) client.reserve(trial) with pytest.raises(ValueError) as exc: client.release(trial, "mouf mouf") assert exc.match("Given status `mouf mouf` not one of")
def test_multidim(self): """Tests that dimensions with shape > 1 are flattened properly""" multidim_config = copy.deepcopy(config) multidim_config["space"]["y"] = "uniform(0, 200, shape=4)" with create_experiment(multidim_config, trial_config) as (_, _, experiment): plot = parallel_coordinates(experiment) assert_parallel_coordinates_plot( plot, order=["x", "y[0]", "y[1]", "y[2]", "y[3]", "loss"])
def test_experiment_worker_as_parameter(self): """Tests that ``Experiment`` is a valid parameter""" with create_experiment(config, trial_config, ["completed"]) as ( _, experiment, _, ): plot = regret(experiment) assert_regret_plot(plot)
def test_multidim(self, monkeypatch): """Tests that dimensions with shape > 1 are flattened properly""" mock_train_regressor(monkeypatch) config = mock_space(y="uniform(0, 3, shape=2)") mock_experiment(monkeypatch, y=[[3, 3], [2, 3], [1, 2], [0, 3]]) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies(experiment, n_grid_points=5, model_kwargs=dict(random_state=1)) assert_partial_dependencies_plot(plot, dims=["x", "y[0]", "y[1]"])
def test_order_columns(self): """Tests that columns are sorted according to ``order``""" multidim_config = copy.deepcopy(config) for k in "yzutv": multidim_config["space"][k] = "uniform(0, 200)" with create_experiment(multidim_config, trial_config) as (_, _, experiment): plot = parallel_coordinates(experiment, order="vzyx") assert_parallel_coordinates_plot(plot, order=["v", "z", "y", "x", "loss"])
def test_graph_layout(self, monkeypatch): """Tests the layout of the plot""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = parallel_assessment({"random": [experiment] * 2}) asset_parallel_assessment_plot(plot, [f"random"], 1)
def test_returns_plotly_object(self, monkeypatch): """Tests that the plotly backend returns a plotly object""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = regrets([experiment]) assert type(plot) is plotly.graph_objects.Figure
def test_unbalanced_experiments(self, monkeypatch): """Tests the regrets with avg of unbalanced experiments""" mock_experiment_with_random_to_pandas(monkeypatch, unbalanced=True) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10}) assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True, balanced=0)
def test_dict_of_list_of_experiments(self, monkeypatch): """Tests the rankings with avg of experiments separated in lists""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10}) assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True)
def test_dict_of_list_of_experiments(self, monkeypatch): """Tests the regrets with avg of experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = durations({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10}) assert_durations_plot(plot, ["exp-1", "exp-2"])
def test_fidelity(self, monkeypatch): """Tests that fidelity is supported""" mock_train_regressor(monkeypatch) config = mock_space(y="fidelity(1, 200, base=3)") mock_experiment(monkeypatch, y=[1, 3**2, 1, 3**4]) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies(experiment, n_grid_points=5, model_kwargs=dict(random_state=1)) assert_partial_dependencies_plot(plot, dims=["x", "y"], log_dims=["y"])
def test_dict_of_experiments(self, monkeypatch): """Tests the rankings with renamed experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings({"exp-1": experiment, "exp-2": experiment}) assert_rankings_plot(plot, ["exp-1", "exp-2"])
def test_no_trials(self, client): """Tests that the API returns an empty figure when no trials are found.""" with create_experiment(config, trial_config, []) as ( _, _, experiment, ): response = client.simulate_get("/plots/regret/experiment-name") assert response.status == "200 OK" assert list(response.json.keys()) == ["data", "layout"]
def test_graph_layout(self, monkeypatch): """Tests the layout of the plot""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = regrets([experiment]) assert_regrets_plot(plot, [f"{experiment.name}-v{experiment.version}"])
def test_suggest(self, monkeypatch): """Verify that suggest reserved availabe trials.""" mock_space_iterate(monkeypatch) with create_experiment(config, base_trial) as (cfg, experiment, client): trial = client.suggest() assert trial.status == "reserved" assert trial.params["x"] == 1 assert len(experiment.fetch_trials()) == 5 assert client._pacemakers[trial.id].is_alive() client._pacemakers.pop(trial.id).stop()