def test_plot_slice_log_scale() -> None: study = create_study() study.add_trial( create_trial( value=0.0, params={"x_linear": 1.0, "y_log": 1e-3}, distributions={ "x_linear": UniformDistribution(0.0, 3.0), "y_log": LogUniformDistribution(1e-5, 1.0), }, ) ) # Plot a parameter. # TODO(ytknzw): Add more specific assertion with the test case. figure = plot_slice(study, params=["y_log"]) assert figure.has_data() figure = plot_slice(study, params=["x_linear"]) assert figure.has_data() # Plot multiple parameters. # TODO(ytknzw): Add more specific assertion with the test case. figure = plot_slice(study) assert len(figure) == 2 assert figure[0].has_data() assert figure[1].has_data()
def test_plot_slice_log_scale() -> None: study = create_study() study.add_trial( create_trial( value=0.0, params={"x_linear": 1.0, "y_log": 1e-3}, distributions={ "x_linear": UniformDistribution(0.0, 3.0), "y_log": LogUniformDistribution(1e-5, 1.0), }, ) ) # Plot a parameter. figure = plot_slice(study, params=["y_log"]) assert len(figure.get_lines()) == 0 assert figure.xaxis.label.get_text() == "y_log" figure = plot_slice(study, params=["x_linear"]) assert len(figure.get_lines()) == 0 assert figure.xaxis.label.get_text() == "x_linear" # Plot multiple parameters. figure = plot_slice(study) assert len(figure) == 2 assert len(figure[0].get_lines()) == 0 assert len(figure[1].get_lines()) == 0 assert figure[0].xaxis.label.get_text() == "x_linear" assert figure[1].xaxis.label.get_text() == "y_log"
def _log_plots(run, study: optuna.Study, visualization_backend='plotly', log_plot_contour=True, log_plot_edf=True, log_plot_parallel_coordinate=True, log_plot_param_importances=True, log_plot_pareto_front=True, log_plot_slice=True, log_plot_intermediate_values=True, log_plot_optimization_history=True, ): if visualization_backend == 'matplotlib': import optuna.visualization.matplotlib as vis elif visualization_backend == 'plotly': import optuna.visualization as vis else: raise NotImplementedError(f'{visualization_backend} visualisation backend is not implemented') if vis.is_available: params = list(p_name for t in study.trials for p_name in t.params.keys()) if log_plot_contour and any(params): run['visualizations/plot_contour'] = neptune.types.File.as_html(vis.plot_contour(study)) if log_plot_edf: run['visualizations/plot_edf'] = neptune.types.File.as_html(vis.plot_edf(study)) if log_plot_parallel_coordinate: run['visualizations/plot_parallel_coordinate'] = \ neptune.types.File.as_html(vis.plot_parallel_coordinate(study)) if log_plot_param_importances and len(study.get_trials(states=(optuna.trial.TrialState.COMPLETE, optuna.trial.TrialState.PRUNED,))) > 1: try: run['visualizations/plot_param_importances'] = neptune.types.File.as_html(vis.plot_param_importances(study)) except (RuntimeError, ValueError, ZeroDivisionError): # Unable to compute importances pass if log_plot_pareto_front and study._is_multi_objective() and visualization_backend == 'plotly': run['visualizations/plot_pareto_front'] = neptune.types.File.as_html(vis.plot_pareto_front(study)) if log_plot_slice and any(params): run['visualizations/plot_slice'] = neptune.types.File.as_html(vis.plot_slice(study)) if log_plot_intermediate_values and any(trial.intermediate_values for trial in study.trials): # Intermediate values plot if available only if the above condition is met run['visualizations/plot_intermediate_values'] = \ neptune.types.File.as_html(vis.plot_intermediate_values(study)) if log_plot_optimization_history: run['visualizations/plot_optimization_history'] = \ neptune.types.File.as_html(vis.plot_optimization_history(study))
def test_plot_slice() -> None: # Test with no trial. study = prepare_study_with_trials(no_trials=True) figure = plot_slice(study) assert not figure.has_data() study = prepare_study_with_trials(with_c_d=False) # Test with a trial. # TODO(ytknzw): Add more specific assertion with the test case. figure = plot_slice(study) assert len(figure) == 2 assert figure[0].has_data() assert figure[1].has_data() # Test with a trial to select parameter. # TODO(ytknzw): Add more specific assertion with the test case. figure = plot_slice(study, params=["param_a"]) assert figure.has_data() # Test with wrong parameters. with pytest.raises(ValueError): plot_slice(study, params=["optuna"]) # Ignore failed trials. def fail_objective(_: Trial) -> float: raise ValueError study = create_study() study.optimize(fail_objective, n_trials=1, catch=(ValueError, )) figure = plot_slice(study) assert not figure.has_data()
def test_plot_slice_log_scale() -> None: study = create_study() study.add_trial( create_trial( value=0.0, params={ "x_linear": 1.0, "y_log": 1e-3 }, distributions={ "x_linear": FloatDistribution(0.0, 3.0), "y_log": FloatDistribution(1e-5, 1.0, log=True), }, )) # Plot a parameter. figure = plot_slice(study, params=["y_log"]) assert len(figure.findobj(PathCollection)) == 1 assert figure.xaxis.label.get_text() == "y_log" assert figure.xaxis.get_scale() == "log" plt.savefig(BytesIO()) figure = plot_slice(study, params=["x_linear"]) assert len(figure.findobj(PathCollection)) == 1 assert figure.xaxis.label.get_text() == "x_linear" assert figure.xaxis.get_scale() == "linear" plt.savefig(BytesIO()) # Plot multiple parameters. figure = plot_slice(study) assert len(figure) == 2 assert len(figure[0].findobj(PathCollection)) == 1 assert len(figure[1].findobj(PathCollection)) == 1 assert figure[0].xaxis.label.get_text() == "x_linear" assert figure[1].xaxis.label.get_text() == "y_log" assert figure[0].xaxis.get_scale() == "linear" assert figure[1].xaxis.get_scale() == "log" plt.savefig(BytesIO())
def test_plot_slice() -> None: # Test with no trial. study = prepare_study_with_trials(no_trials=True) figure = plot_slice(study) assert len(figure.get_lines()) == 0 study = prepare_study_with_trials(with_c_d=False) # Test with a trial. figure = plot_slice(study) assert len(figure) == 2 assert len(figure[0].get_lines()) == 0 assert len(figure[1].get_lines()) == 0 assert figure[0].yaxis.label.get_text() == "Objective Value" # Test with a trial to select parameter. figure = plot_slice(study, params=["param_a"]) assert len(figure.get_lines()) == 0 assert figure.yaxis.label.get_text() == "Objective Value" # Test with a customized target value. with pytest.warns(UserWarning): figure = plot_slice(study, params=["param_a"], target=lambda t: t.params["param_b"]) assert len(figure.get_lines()) == 0 assert figure.yaxis.label.get_text() == "Objective Value" # Test with a customized target name. figure = plot_slice(study, target_name="Target Name") assert len(figure) == 2 assert len(figure[0].get_lines()) == 0 assert len(figure[1].get_lines()) == 0 assert figure[0].yaxis.label.get_text() == "Target Name" # Test with wrong parameters. with pytest.raises(ValueError): plot_slice(study, params=["optuna"]) # Ignore failed trials. def fail_objective(_: Trial) -> float: raise ValueError study = create_study() study.optimize(fail_objective, n_trials=1, catch=(ValueError,)) figure = plot_slice(study) assert len(figure.get_lines()) == 0
def test_target_is_none_and_study_is_multi_obj() -> None: study = create_study(directions=["minimize", "minimize"]) with pytest.raises(ValueError): plot_slice(study)
def test_plot_slice() -> None: # Test with no trial. study = prepare_study_with_trials(no_trials=True) figure = plot_slice(study) assert len(figure.findobj(PathCollection)) == 0 plt.savefig(BytesIO()) study = prepare_study_with_trials(with_c_d=False) # Test with a trial. figure = plot_slice(study) assert len(figure) == 2 assert len(figure[0].findobj(PathCollection)) == 1 assert len(figure[1].findobj(PathCollection)) == 1 assert figure[0].yaxis.label.get_text() == "Objective Value" plt.savefig(BytesIO()) # Scatter plot data is available as PathCollection. data0 = figure[0].findobj(PathCollection)[0].get_offsets().data data1 = figure[1].findobj(PathCollection)[0].get_offsets().data assert np.allclose(data0, [[1.0, 0.0], [2.5, 1.0]]) assert np.allclose(data1, [[2.0, 0.0], [0.0, 2.0], [1.0, 1.0]]) # Test with a trial to select parameter. figure = plot_slice(study, params=["param_a"]) assert len(figure.findobj(PathCollection)) == 1 assert figure.yaxis.label.get_text() == "Objective Value" data0 = figure.findobj(PathCollection)[0].get_offsets().data assert np.allclose(data0, [[1.0, 0.0], [2.5, 1.0]]) plt.savefig(BytesIO()) # Test with a customized target value. with pytest.warns(UserWarning): figure = plot_slice(study, params=["param_a"], target=lambda t: t.params["param_b"]) assert len(figure.findobj(PathCollection)) == 1 assert figure.yaxis.label.get_text() == "Objective Value" data0 = figure.findobj(PathCollection)[0].get_offsets().data assert np.allclose(data0, [[1.0, 2.0], [2.5, 1.0]]) plt.savefig(BytesIO()) # Test with a customized target name. figure = plot_slice(study, target_name="Target Name") assert len(figure) == 2 assert len(figure[0].findobj(PathCollection)) == 1 assert len(figure[1].findobj(PathCollection)) == 1 assert figure[0].yaxis.label.get_text() == "Target Name" plt.savefig(BytesIO()) # Test with wrong parameters. with pytest.raises(ValueError): plot_slice(study, params=["optuna"]) # Ignore failed trials. def fail_objective(_: Trial) -> float: raise ValueError study = create_study() study.optimize(fail_objective, n_trials=1, catch=(ValueError,)) figure = plot_slice(study) assert len(figure.get_lines()) == 0 assert len(figure.findobj(PathCollection)) == 0 plt.savefig(BytesIO())
def test_plot_slice() -> None: # Test with no trial. study = create_study(direction="minimize") figure = plot_slice(study) assert len(figure.findobj(PathCollection)) == 0 plt.savefig(BytesIO()) study = create_study(direction="minimize") study.add_trial( create_trial( value=0.0, params={ "param_a": 1.0, "param_b": 2.0 }, distributions={ "param_a": FloatDistribution(0.0, 3.0), "param_b": FloatDistribution(0.0, 3.0), }, )) study.add_trial( create_trial( value=2.0, params={"param_b": 0.0}, distributions={"param_b": FloatDistribution(0.0, 3.0)}, )) study.add_trial( create_trial( value=1.0, params={ "param_a": 2.5, "param_b": 1.0 }, distributions={ "param_a": FloatDistribution(0.0, 3.0), "param_b": FloatDistribution(0.0, 3.0), }, )) # Test with a trial. figure = plot_slice(study) assert len(figure) == 2 assert len(figure[0].findobj(PathCollection)) == 1 assert len(figure[1].findobj(PathCollection)) == 1 assert figure[0].yaxis.label.get_text() == "Objective Value" plt.savefig(BytesIO()) # Scatter plot data is available as PathCollection. data0 = figure[0].findobj(PathCollection)[0].get_offsets().data data1 = figure[1].findobj(PathCollection)[0].get_offsets().data assert np.allclose(data0, [[1.0, 0.0], [2.5, 1.0]]) assert np.allclose(data1, [[2.0, 0.0], [0.0, 2.0], [1.0, 1.0]]) # Test with a trial to select parameter. figure = plot_slice(study, params=["param_a"]) assert len(figure.findobj(PathCollection)) == 1 assert figure.yaxis.label.get_text() == "Objective Value" data0 = figure.findobj(PathCollection)[0].get_offsets().data assert np.allclose(data0, [[1.0, 0.0], [2.5, 1.0]]) plt.savefig(BytesIO()) # Test with a customized target value. with pytest.warns(UserWarning): figure = plot_slice(study, params=["param_a"], target=lambda t: t.params["param_b"]) assert len(figure.findobj(PathCollection)) == 1 assert figure.yaxis.label.get_text() == "Objective Value" data0 = figure.findobj(PathCollection)[0].get_offsets().data assert np.allclose(data0, [[1.0, 2.0], [2.5, 1.0]]) plt.savefig(BytesIO()) # Test with a customized target name. figure = plot_slice(study, target_name="Target Name") assert len(figure) == 2 assert len(figure[0].findobj(PathCollection)) == 1 assert len(figure[1].findobj(PathCollection)) == 1 assert figure[0].yaxis.label.get_text() == "Target Name" plt.savefig(BytesIO()) # Test with wrong parameters. with pytest.raises(ValueError): plot_slice(study, params=["optuna"]) # Ignore failed trials. study = create_study() study.optimize(fail_objective, n_trials=1, catch=(ValueError, )) figure = plot_slice(study) assert len(figure.get_lines()) == 0 assert len(figure.findobj(PathCollection)) == 0 plt.savefig(BytesIO())