def test_plot_contour_log_scale_and_str_category() -> None: # If the search space has three parameters, plot_contour generates nine plots. study = create_study() study.add_trial( create_trial( value=0.0, params={ "param_a": 1e-6, "param_b": "100", "param_c": "one" }, distributions={ "param_a": FloatDistribution(1e-7, 1e-2, log=True), "param_b": CategoricalDistribution(["100", "101"]), "param_c": CategoricalDistribution(["one", "two"]), }, )) study.add_trial( create_trial( value=1.0, params={ "param_a": 1e-5, "param_b": "101", "param_c": "two" }, distributions={ "param_a": FloatDistribution(1e-7, 1e-2, log=True), "param_b": CategoricalDistribution(["100", "101"]), "param_c": CategoricalDistribution(["one", "two"]), }, )) figure = plot_contour(study) subplots = [plot for plot in figure.flatten() if plot.has_data()] expected = { "param_a": [1e-6, 1e-5], "param_b": [0.0, 1.0], "param_c": [0.0, 1.0] } ranges = itertools.permutations(expected.keys(), 2) for plot, (yrange, xrange) in zip(subplots, ranges): # Take 5% axis padding into account. np.testing.assert_allclose(plot.get_xlim(), expected[xrange], atol=5e-2) np.testing.assert_allclose(plot.get_ylim(), expected[yrange], atol=5e-2) plt.savefig(BytesIO())
def test_dominates_invalid() -> None: directions = [StudyDirection.MINIMIZE, StudyDirection.MAXIMIZE] # The numbers of objectives for `t1` and `t2` don't match. t1 = create_trial(values=[1]) # One objective. t2 = create_trial(values=[1, 2]) # Two objectives. with pytest.raises(ValueError): _dominates(t1, t2, directions) # The numbers of objectives and directions don't match. t1 = create_trial(values=[1]) # One objective. t2 = create_trial(values=[1]) # One objective. with pytest.raises(ValueError): _dominates(t1, t2, directions)
def test_color_map(direction: str) -> None: study = create_study(direction=direction) for i in range(3): study.add_trial( create_trial( value=float(i), params={ "param_a": float(i), "param_b": float(i) }, distributions={ "param_a": FloatDistribution(0.0, 3.0), "param_b": FloatDistribution(0.0, 3.0), }, )) # `target` is `None`. line = plotly_plot_parallel_coordinate(study).data[0]["line"] assert COLOR_SCALE == [v[1] for v in line["colorscale"]] if direction == "minimize": assert line["reversescale"] else: assert not line["reversescale"] # When `target` is not `None`, `reversescale` is always `True`. line = plotly_plot_parallel_coordinate( study, target=lambda t: t.number).data[0]["line"] assert COLOR_SCALE == [v[1] for v in line["colorscale"]] assert line["reversescale"] # Multi-objective optimization. study = create_study(directions=[direction, direction]) for i in range(3): study.add_trial( create_trial( values=[float(i), float(i)], params={ "param_a": float(i), "param_b": float(i) }, distributions={ "param_a": FloatDistribution(0.0, 3.0), "param_b": FloatDistribution(0.0, 3.0), }, )) line = plotly_plot_parallel_coordinate( study, target=lambda t: t.number).data[0]["line"] assert COLOR_SCALE == [v[1] for v in line["colorscale"]] assert line["reversescale"]
def test_plot_parallel_coordinate_unique_hyper_param() -> None: # Test case when one unique value is suggested during the optimization. study_categorical_params = create_study() distributions: Dict[str, BaseDistribution] = { "category_a": CategoricalDistribution(("preferred", "opt")), "param_b": FloatDistribution(1, 1000, log=True), } study_categorical_params.add_trial( create_trial( value=0.0, params={"category_a": "preferred", "param_b": 30}, distributions=distributions, ) ) # Both hyperparameters contain unique values. figure = plot_parallel_coordinate(study_categorical_params) assert len(figure.data[0]["dimensions"]) == 3 assert figure.data[0]["dimensions"][0]["label"] == "Objective Value" assert figure.data[0]["dimensions"][0]["range"] == (0.0, 0.0) assert figure.data[0]["dimensions"][0]["values"] == (0.0,) assert figure.data[0]["dimensions"][1]["label"] == "category_a" assert figure.data[0]["dimensions"][1]["range"] == (0, 0) assert figure.data[0]["dimensions"][1]["values"] == (0.0,) assert figure.data[0]["dimensions"][1]["ticktext"] == ("preferred",) assert figure.data[0]["dimensions"][1]["tickvals"] == (0,) assert figure.data[0]["dimensions"][2]["label"] == "param_b" assert figure.data[0]["dimensions"][2]["range"] == (math.log10(30), math.log10(30)) assert figure.data[0]["dimensions"][2]["values"] == (math.log10(30),) assert figure.data[0]["dimensions"][2]["ticktext"] == ("30",) assert figure.data[0]["dimensions"][2]["tickvals"] == (math.log10(30),) study_categorical_params.add_trial( create_trial( value=2.0, params={"category_a": "preferred", "param_b": 20}, distributions=distributions, ) ) # Still "category_a" contains unique suggested value during the optimization. figure = plot_parallel_coordinate(study_categorical_params) assert len(figure.data[0]["dimensions"]) == 3 assert figure.data[0]["dimensions"][1]["label"] == "category_a" assert figure.data[0]["dimensions"][1]["range"] == (0, 0) assert figure.data[0]["dimensions"][1]["values"] == (0.0, 0.0) assert figure.data[0]["dimensions"][1]["ticktext"] == ("preferred",) assert figure.data[0]["dimensions"][1]["tickvals"] == (0,)
def test_generate_contour_plot_for_few_observations(params: List[str]) -> None: study = create_study(direction="minimize") study.add_trial( create_trial( values=[0.0], params={ "param_a": 1.0, "param_b": 2.0 }, distributions={ "param_a": FloatDistribution(0.0, 3.0), "param_b": FloatDistribution(0.0, 3.0), }, )) study.add_trial( create_trial( values=[2.0], params={"param_b": 0.0}, distributions={"param_b": FloatDistribution(0.0, 3.0)}, )) info = _get_contour_info(study, params=params) assert info == _ContourInfo( sorted_params=sorted(params), sub_plot_infos=[[ _SubContourInfo( xaxis=_AxisInfo( name=sorted(params)[0], range=(1.0, 1.0), is_log=False, is_cat=False, indices=[1.0], values=[1.0, None], ), yaxis=_AxisInfo( name=sorted(params)[1], range=(-0.1, 2.1), is_log=False, is_cat=False, indices=[-0.1, 0.0, 2.0, 2.1], values=[2.0, 0.0], ), z_values={}, ) ]], reverse_scale=True, target_name="Objective Value", )
def test_plot_parallel_coordinate_log_params() -> None: # Test with log params. study_log_params = create_study() study_log_params.add_trial( create_trial( value=0.0, params={"param_a": 1e-6, "param_b": 10}, distributions={ "param_a": LogUniformDistribution(1e-7, 1e-2), "param_b": LogUniformDistribution(1, 1000), }, ) ) study_log_params.add_trial( create_trial( value=1.0, params={"param_a": 2e-5, "param_b": 200}, distributions={ "param_a": LogUniformDistribution(1e-7, 1e-2), "param_b": LogUniformDistribution(1, 1000), }, ) ) study_log_params.add_trial( create_trial( value=0.1, params={"param_a": 1e-4, "param_b": 30}, distributions={ "param_a": LogUniformDistribution(1e-7, 1e-2), "param_b": LogUniformDistribution(1, 1000), }, ) ) figure = plot_parallel_coordinate(study_log_params) assert len(figure.data[0]["dimensions"]) == 3 assert figure.data[0]["dimensions"][0]["label"] == "Objective Value" assert figure.data[0]["dimensions"][0]["range"] == (0.0, 1.0) assert figure.data[0]["dimensions"][0]["values"] == (0.0, 1.0, 0.1) assert figure.data[0]["dimensions"][1]["label"] == "param_a" assert figure.data[0]["dimensions"][1]["range"] == (-6.0, -4.0) assert figure.data[0]["dimensions"][1]["values"] == (-6, math.log10(2e-5), -4) assert figure.data[0]["dimensions"][1]["ticktext"] == ("1e-06", "1e-05", "0.0001") assert figure.data[0]["dimensions"][1]["tickvals"] == (-6, -5, -4.0) assert figure.data[0]["dimensions"][2]["label"] == "param_b" assert figure.data[0]["dimensions"][2]["range"] == (1.0, math.log10(200)) assert figure.data[0]["dimensions"][2]["values"] == (1.0, math.log10(200), math.log10(30)) assert figure.data[0]["dimensions"][2]["ticktext"] == ("10", "100", "200") assert figure.data[0]["dimensions"][2]["tickvals"] == (1.0, 2.0, math.log10(200))
def test_inconsistent_number_of_trial_values() -> None: studies: List[Study] = [] n_studies = 5 for i in range(n_studies): study = prepare_study_with_trials() if i % 2 == 0: study.add_trial(create_trial(value=1.0)) studies.append(study) edf_info = _get_edf_info(studies) x_values = edf_info.x_values min_objective = 0.0 max_objective = 2.0 assert np.min(x_values) == min_objective assert np.max(x_values) == max_objective assert len(x_values) == NUM_SAMPLES_X_AXIS lines = edf_info.lines assert len(lines) == n_studies for line, study in zip(lines, studies): assert line.study_name == study.study_name _validate_edf_values(line.y_values)
def enqueue_trial(self, params: Dict[str, Any]) -> None: """Enqueue a trial with given parameter values. You can fix the next sampling parameters which will be evaluated in your objective function. Example: .. testcode:: import optuna def objective(trial): x = trial.suggest_uniform("x", 0, 10) return x ** 2 study = optuna.create_study() study.enqueue_trial({"x": 5}) study.enqueue_trial({"x": 0}) study.optimize(objective, n_trials=2) assert study.trials[0].params == {"x": 5} assert study.trials[1].params == {"x": 0} Args: params: Parameter values to pass your objective function. """ self.add_trial( create_trial(state=TrialState.WAITING, system_attrs={"fixed_params": params}))
def test_plot_slice_log_scale() -> None: study = create_study() study.add_trial( create_trial( value=0.0, params={"x_linear": 1.0, "y_log": 1e-3}, distributions={ "x_linear": UniformDistribution(0.0, 3.0), "y_log": LogUniformDistribution(1e-5, 1.0), }, ) ) # Plot a parameter. figure = plot_slice(study, params=["y_log"]) assert len(figure.get_lines()) == 0 assert figure.xaxis.label.get_text() == "y_log" figure = plot_slice(study, params=["x_linear"]) assert len(figure.get_lines()) == 0 assert figure.xaxis.label.get_text() == "x_linear" # Plot multiple parameters. figure = plot_slice(study) assert len(figure) == 2 assert len(figure[0].get_lines()) == 0 assert len(figure[1].get_lines()) == 0 assert figure[0].xaxis.label.get_text() == "x_linear" assert figure[1].xaxis.label.get_text() == "y_log"
def _create_trial( trial_type: type, params: Optional[Dict[str, Any]] = None, distributions: Optional[Dict[str, BaseDistribution]] = None, ) -> BaseTrial: if params is None: params = {"x": 10} assert params is not None if distributions is None: distributions = {"x": FloatDistribution(5, 12)} assert distributions is not None if trial_type == FixedTrial: return FixedTrial(params) elif trial_type == FrozenTrial: trial = create_trial(value=0.2, params=params, distributions=distributions) trial.number = 0 return trial elif trial_type == Trial: study = create_study() study.enqueue_trial(params) return study.ask() else: assert False
def test_multi_objective_fanova_importance_evaluator_with_infinite( target_idx: int, inf_value: float ) -> None: # The test ensures that trials with infinite values are ignored to calculate importance scores. n_trial = 10 seed = 13 # Importance scores are calculated without a trial with an inf value. study = create_study(directions=["minimize", "minimize"], sampler=RandomSampler(seed=seed)) study.optimize(multi_objective_function, n_trials=n_trial) evaluator = FanovaImportanceEvaluator(seed=seed) param_importance_without_inf = evaluator.evaluate(study, target=lambda t: t.values[target_idx]) # A trial with an inf value is added into the study manually. study.add_trial( create_trial( values=[inf_value, inf_value], params={"x1": 1.0, "x2": 1.0, "x3": 3.0}, distributions={ "x1": FloatDistribution(low=0.1, high=3), "x2": FloatDistribution(low=0.1, high=3, log=True), "x3": FloatDistribution(low=2, high=4, log=True), }, ) ) # Importance scores are calculated with a trial with an inf value. param_importance_with_inf = evaluator.evaluate(study, target=lambda t: t.values[target_idx]) # Obtained importance scores should be the same between with inf and without inf, # because the last trial whose objective value is an inf is ignored. assert param_importance_with_inf == param_importance_without_inf
def test_plot_slice_log_scale() -> None: study = create_study() study.add_trial( create_trial( value=0.0, params={"x_linear": 1.0, "y_log": 1e-3}, distributions={ "x_linear": UniformDistribution(0.0, 3.0), "y_log": LogUniformDistribution(1e-5, 1.0), }, ) ) # Plot a parameter. # TODO(ytknzw): Add more specific assertion with the test case. figure = plot_slice(study, params=["y_log"]) assert figure.has_data() figure = plot_slice(study, params=["x_linear"]) assert figure.has_data() # Plot multiple parameters. # TODO(ytknzw): Add more specific assertion with the test case. figure = plot_slice(study) assert len(figure) == 2 assert figure[0].has_data() assert figure[1].has_data()
def test_shap_importance_evaluator_with_infinite(inf_value: float) -> None: # The test ensures that trials with infinite values are ignored to calculate importance scores. n_trial = 10 seed = 13 # Importance scores are calculated without a trial with an inf value. study = create_study(sampler=RandomSampler(seed=seed)) study.optimize(objective, n_trials=n_trial) evaluator = ShapleyImportanceEvaluator(seed=seed) param_importance_without_inf = evaluator.evaluate(study) # A trial with an inf value is added into the study manually. study.add_trial( create_trial( value=inf_value, params={"x1": 1.0, "x2": 1.0, "x3": 3.0, "x4": 0.1}, distributions={ "x1": FloatDistribution(low=0.1, high=3), "x2": FloatDistribution(low=0.1, high=3, log=True), "x3": IntDistribution(low=2, high=4, log=True), "x4": CategoricalDistribution([0.1, 1, 10]), }, ) ) # Importance scores are calculated with a trial with an inf value. param_importance_with_inf = evaluator.evaluate(study) # Obtained importance scores should be the same between with inf and without inf, # because the last trial whose objective value is an inf is ignored. assert param_importance_with_inf == param_importance_without_inf
def test_plot_slice_log_scale() -> None: study = create_study() study.add_trial( create_trial( value=0.0, params={ "x_linear": 1.0, "y_log": 1e-3 }, distributions={ "x_linear": UniformDistribution(0.0, 3.0), "y_log": LogUniformDistribution(1e-5, 1.0), }, )) # Plot a parameter. figure = plot_slice(study, params=["y_log"]) assert figure.layout["xaxis_type"] == "log" figure = plot_slice(study, params=["x_linear"]) assert figure.layout["xaxis_type"] is None # Plot multiple parameters. figure = plot_slice(study) assert figure.layout["xaxis_type"] is None assert figure.layout["xaxis2_type"] == "log"
def test_create_trial(state: TrialState) -> None: value = 0.2 params = {"x": 10} distributions = {"x": UniformDistribution(5, 12)} user_attrs = {"foo": "bar"} system_attrs = {"baz": "qux"} intermediate_values = {0: 0.0, 1: 0.1, 2: 0.1} trial = create_trial( state=state, value=value, params=params, distributions=distributions, user_attrs=user_attrs, system_attrs=system_attrs, intermediate_values=intermediate_values, ) assert isinstance(trial, FrozenTrial) assert trial.state == (state if state is not None else TrialState.COMPLETE) assert trial.value == value assert trial.params == params assert trial.distributions == distributions assert trial.user_attrs == user_attrs assert trial.system_attrs == system_attrs assert trial.intermediate_values == intermediate_values assert trial.datetime_start is not None assert (trial.datetime_complete is not None) == (state is None or state.is_finished())
def test_called_single_methods_when_multi() -> None: state = TrialState.COMPLETE values = (0.2, 0.3) params = {"x": 10} distributions = {"x": UniformDistribution(5, 12)} user_attrs = {"foo": "bar"} system_attrs = {"baz": "qux"} intermediate_values = {0: 0.0, 1: 0.1, 2: 0.1} trial = create_trial( state=state, values=values, params=params, distributions=distributions, user_attrs=user_attrs, system_attrs=system_attrs, intermediate_values=intermediate_values, ) with pytest.raises(RuntimeError): trial.value with pytest.raises(RuntimeError): trial.value = 0.1 with pytest.raises(RuntimeError): trial.value = [0.1]
def test_plot_parallel_coordinate_log_params() -> None: # Test with log params. study_log_params = create_study() distributions: Dict[str, BaseDistribution] = { "param_a": FloatDistribution(1e-7, 1e-2, log=True), "param_b": FloatDistribution(1, 1000, log=True), } study_log_params.add_trial( create_trial( value=0.0, params={ "param_a": 1e-6, "param_b": 10 }, distributions=distributions, )) study_log_params.add_trial( create_trial( value=1.0, params={ "param_a": 2e-5, "param_b": 200 }, distributions=distributions, )) study_log_params.add_trial( create_trial( value=0.1, params={ "param_a": 1e-4, "param_b": 30 }, distributions=distributions, )) figure = plot_parallel_coordinate(study_log_params) axes = figure.get_figure().axes assert len(axes) == 3 + 1 assert axes[0].get_ylim() == (0.0, 1.0) assert axes[1].get_ylabel() == "Objective Value" assert axes[1].get_ylim() == (0.0, 1.0) objectives = _fetch_objectives_from_figure(figure) assert objectives == [0.0, 1.0, 0.1] assert axes[2].get_ylim() == (1e-6, 1e-4) np.testing.assert_almost_equal(axes[3].get_ylim(), (10.0, 200)) expected_labels = ["Objective Value", "param_a", "param_b"] _test_xtick_labels(axes, expected_labels) plt.savefig(BytesIO())
def test_dominates_2d() -> None: directions = [StudyDirection.MINIMIZE, StudyDirection.MAXIMIZE] # Check all pairs of trials consisting of these values, i.e., # [-inf, -inf], [-inf, -1], [-inf, 1], [-inf, inf], [-1, -inf], ... # These values should be specified in ascending order. vals = [-float("inf"), -1, 1, float("inf")] # The following table illustrates an example of dominance relations. # "d" cells in the table dominates the "t" cell in (MINIMIZE, MAXIMIZE) setting. # # value1 # ╔═════╤═════╤═════╤═════╤═════╗ # ║ │ -∞ │ -1 │ 1 │ ∞ ║ # ╟─────┼─────┼─────┼─────┼─────╢ # ║ -∞ │ │ │ d │ d ║ # ╟─────┼─────┼─────┼─────┼─────╢ # ║ -1 │ │ │ d │ d ║ # value0 ╟─────┼─────┼─────┼─────┼─────╢ # ║ 1 │ │ │ t │ d ║ # ╟─────┼─────┼─────┼─────┼─────╢ # ║ ∞ │ │ │ │ ║ # ╚═════╧═════╧═════╧═════╧═════╝ # # In the following code, we check that for each position of "t" cell, the relation # above holds. # Generate the set of all possible indices. all_indices = set( (i, j) for i in range(len(vals)) for j in range(len(vals))) for (t_i, t_j) in all_indices: # Generate the set of all indices that dominates the current index. dominating_indices = set((d_i, d_j) for d_i in range(t_i + 1) for d_j in range(t_j, len(vals))) dominating_indices -= {(t_i, t_j)} for (d_i, d_j) in dominating_indices: trial1 = create_trial(values=[vals[t_i], vals[t_j]]) trial2 = create_trial(values=[vals[d_i], vals[d_j]]) assert _dominates(trial2, trial1, directions) for (d_i, d_j) in all_indices - dominating_indices: trial1 = create_trial(values=[vals[t_i], vals[t_j]]) trial2 = create_trial(values=[vals[d_i], vals[d_j]]) assert not _dominates(trial2, trial1, directions)
def test_plot_parallel_coordinate_categorical_numeric_params() -> None: # Test with categorical params that can be interpreted as numeric params. study_categorical_params = create_study() distributions: Dict[str, BaseDistribution] = { "category_a": CategoricalDistribution((1, 2)), "category_b": CategoricalDistribution((10, 20, 30)), } study_categorical_params.add_trial( create_trial( value=0.0, params={"category_a": 2, "category_b": 20}, distributions=distributions, ) ) study_categorical_params.add_trial( create_trial( value=1.0, params={"category_a": 1, "category_b": 30}, distributions=distributions, ) ) study_categorical_params.add_trial( create_trial( value=2.0, params={"category_a": 2, "category_b": 10}, distributions=distributions, ) ) # Trials are sorted by using param_a and param_b, i.e., trial#1, trial#2, and trial#0. figure = plot_parallel_coordinate(study_categorical_params) assert len(figure.data[0]["dimensions"]) == 3 assert figure.data[0]["dimensions"][0]["label"] == "Objective Value" assert figure.data[0]["dimensions"][0]["range"] == (0.0, 2.0) assert figure.data[0]["dimensions"][0]["values"] == (1.0, 2.0, 0.0) assert figure.data[0]["dimensions"][1]["label"] == "category_a" assert figure.data[0]["dimensions"][1]["range"] == (0, 1) assert figure.data[0]["dimensions"][1]["values"] == (0, 1, 1) assert figure.data[0]["dimensions"][1]["ticktext"] == (1, 2) assert figure.data[0]["dimensions"][1]["tickvals"] == (0, 1) assert figure.data[0]["dimensions"][2]["label"] == "category_b" assert figure.data[0]["dimensions"][2]["range"] == (0, 2) assert figure.data[0]["dimensions"][2]["values"] == (2, 0, 1) assert figure.data[0]["dimensions"][2]["ticktext"] == (10, 20, 30) assert figure.data[0]["dimensions"][2]["tickvals"] == (0, 1, 2)
def test_plot_parallel_coordinate_categorical_numeric_params() -> None: # Test with categorical params that can be interpreted as numeric params. study_categorical_params = create_study() study_categorical_params.add_trial( create_trial( value=0.0, params={ "category_a": 2, "category_b": 20 }, distributions={ "category_a": CategoricalDistribution((1, 2)), "category_b": CategoricalDistribution((10, 20, 30)), }, )) study_categorical_params.add_trial( create_trial( value=1.0, params={ "category_a": 1, "category_b": 30 }, distributions={ "category_a": CategoricalDistribution((1, 2)), "category_b": CategoricalDistribution((10, 20, 30)), }, )) study_categorical_params.add_trial( create_trial( value=2.0, params={ "category_a": 2, "category_b": 10 }, distributions={ "category_a": CategoricalDistribution((1, 2)), "category_b": CategoricalDistribution((10, 20, 30)), }, )) figure = plot_parallel_coordinate(study_categorical_params) assert len(figure.get_lines()) == 0 plt.savefig(BytesIO())
def test_get_slice_plot_info_for_few_observations(params: List[str]) -> None: study = create_study(direction="minimize") study.add_trial( create_trial( values=[0.0], params={ "param_a": 1.0, "param_b": 2.0 }, distributions={ "param_a": FloatDistribution(0.0, 3.0), "param_b": FloatDistribution(0.0, 3.0), }, )) study.add_trial( create_trial( values=[2.0], params={"param_b": 0.0}, distributions={"param_b": FloatDistribution(0.0, 3.0)}, )) info = _get_slice_plot_info(study, params, None, "Objective Value") assert info == _SlicePlotInfo( target_name="Objective Value", subplots=[ _SliceSubplotInfo( param_name="param_a", x=[1.0], y=[0.0], trial_numbers=[0], is_log=False, is_numerical=True, ), _SliceSubplotInfo( param_name="param_b", x=[2.0, 0.0], y=[0.0, 2.0], trial_numbers=[0, 1], is_log=False, is_numerical=True, ), ], )
def test_plot_parallel_coordinate_log_params() -> None: # Test with log params study_log_params = create_study() study_log_params.add_trial( create_trial( value=0.0, params={ "param_a": 1e-6, "param_b": 10 }, distributions={ "param_a": FloatDistribution(1e-7, 1e-2, log=True), "param_b": FloatDistribution(1, 1000, log=True), }, )) study_log_params.add_trial( create_trial( value=1.0, params={ "param_a": 2e-5, "param_b": 200 }, distributions={ "param_a": FloatDistribution(1e-7, 1e-2, log=True), "param_b": FloatDistribution(1, 1000, log=True), }, )) study_log_params.add_trial( create_trial( value=0.1, params={ "param_a": 1e-4, "param_b": 30 }, distributions={ "param_a": FloatDistribution(1e-7, 1e-2, log=True), "param_b": FloatDistribution(1, 1000, log=True), }, )) figure = plot_parallel_coordinate(study_log_params) assert len(figure.get_lines()) == 0 plt.savefig(BytesIO())
def test_plot_parallel_coordinate_categorical_params() -> None: # Test with categorical params that cannot be converted to numeral. study_categorical_params = create_study() distributions: Dict[str, BaseDistribution] = { "category_a": CategoricalDistribution(("preferred", "opt")), "category_b": CategoricalDistribution(("net", "una")), } study_categorical_params.add_trial( create_trial( value=0.0, params={ "category_a": "preferred", "category_b": "net" }, distributions=distributions, )) study_categorical_params.add_trial( create_trial( value=2.0, params={ "category_a": "opt", "category_b": "una" }, distributions=distributions, )) figure = plot_parallel_coordinate(study_categorical_params) axes = figure.get_figure().axes assert len(axes) == 3 + 1 assert axes[0].get_ylim() == (0.0, 2.0) assert axes[1].get_ylabel() == "Objective Value" assert axes[1].get_ylim() == (0.0, 2.0) assert axes[2].get_ylim() == (0, 1) assert [line.get_text() for line in axes[2].get_yticklabels()] == ["preferred", "opt"] assert axes[3].get_ylim() == (0, 1) assert [line.get_text() for line in axes[3].get_yticklabels()] == ["net", "una"] objectives = _fetch_objectives_from_figure(figure) assert objectives == [0.0, 2.0] expected_labels = ["Objective Value", "category_a", "category_b"] _test_xtick_labels(axes, expected_labels) plt.savefig(BytesIO())
def test_filter_inf_trials(value: float, expected: int) -> None: study = create_study() study.add_trial( create_trial( value=0.0, params={"x": 1.0}, distributions={"x": FloatDistribution(0.0, 1.0)}, )) study.add_trial( create_trial( value=value, params={"x": 0.0}, distributions={"x": FloatDistribution(0.0, 1.0)}, )) trials = _filter_nonfinite( study.get_trials(states=(TrialState.COMPLETE, ))) assert len(trials) == expected assert all([t.number == num for t, num in zip(trials, range(expected))])
def test_plot_parallel_coordinate_log_params() -> None: # Test with log params study_log_params = create_study() study_log_params.add_trial( create_trial( value=0.0, params={ "param_a": 1e-6, "param_b": 10 }, distributions={ "param_a": LogUniformDistribution(1e-7, 1e-2), "param_b": LogUniformDistribution(1, 1000), }, )) study_log_params.add_trial( create_trial( value=1.0, params={ "param_a": 2e-5, "param_b": 200 }, distributions={ "param_a": LogUniformDistribution(1e-7, 1e-2), "param_b": LogUniformDistribution(1, 1000), }, )) study_log_params.add_trial( create_trial( value=0.1, params={ "param_a": 1e-4, "param_b": 30 }, distributions={ "param_a": LogUniformDistribution(1e-7, 1e-2), "param_b": LogUniformDistribution(1, 1000), }, )) figure = plot_parallel_coordinate(study_log_params) assert figure.has_data()
def test_multi_objective_trial_with_infinite_value_ignored( target_idx: int, inf_value: float, evaluator: BaseImportanceEvaluator, n_trial: int) -> None: def _multi_objective_function(trial: Trial) -> Tuple[float, float]: x1 = trial.suggest_float("x1", 0.1, 3) x2 = trial.suggest_float("x2", 0.1, 3, log=True) x3 = trial.suggest_float("x3", 2, 4, log=True) return x1, x2 * x3 seed = 13 target_name = "Objective Value" study = create_study(directions=["minimize", "minimize"], sampler=RandomSampler(seed=seed)) study.optimize(_multi_objective_function, n_trials=n_trial) # Create param importances info without inf value. info_without_inf = _get_importances_info( study, evaluator=evaluator, params=None, target=lambda t: t.values[target_idx], target_name=target_name, ) # A trial with an inf value is added into the study manually. study.add_trial( create_trial( values=[inf_value, inf_value], params={ "x1": 1.0, "x2": 1.0, "x3": 3.0 }, distributions={ "x1": FloatDistribution(low=0.1, high=3), "x2": FloatDistribution(low=0.1, high=3, log=True), "x3": FloatDistribution(low=2, high=4, log=True), }, )) # Create param importances info with inf value. info_with_inf = _get_importances_info( study, evaluator=evaluator, params=None, target=lambda t: t.values[target_idx], target_name=target_name, ) # Obtained info instances should be the same between with inf and without inf, # because the last trial whose objective value is an inf is ignored. assert info_with_inf == info_without_inf
def _create_study_mixture_category_types() -> Study: study = create_study() distributions: Dict[str, BaseDistribution] = { "param_a": CategoricalDistribution([None, "100"]), "param_b": CategoricalDistribution([101, 102.0]), } study.add_trial( create_trial(value=0.0, params={ "param_a": None, "param_b": 101 }, distributions=distributions)) study.add_trial( create_trial(value=0.5, params={ "param_a": "100", "param_b": 102.0 }, distributions=distributions)) return study
def _create_study_with_log_scale_and_str_category_2d() -> Study: study = create_study() distributions = { "param_a": FloatDistribution(1e-7, 1e-2, log=True), "param_b": CategoricalDistribution(["100", "101"]), } study.add_trial( create_trial(value=0.0, params={ "param_a": 1e-6, "param_b": "101" }, distributions=distributions)) study.add_trial( create_trial(value=1.0, params={ "param_a": 1e-5, "param_b": "100" }, distributions=distributions)) return study
def _create_trial(mo_trial: "multi_objective.trial.FrozenMultiObjectiveTrial") -> FrozenTrial: with warnings.catch_warnings(): warnings.simplefilter("ignore", ExperimentalWarning) trial = create_trial( state=mo_trial.state, values=mo_trial.values, params=mo_trial.params, distributions=mo_trial.distributions, user_attrs=mo_trial.user_attrs, system_attrs=mo_trial.system_attrs, ) return trial
def test_plot_parallel_coordinate_categorical_params() -> None: # Test with categorical params that cannot be converted to numeral. study_categorical_params = create_study() study_categorical_params.add_trial( create_trial( value=0.0, params={ "category_a": "preferred", "category_b": "net" }, distributions={ "category_a": CategoricalDistribution(("preferred", "opt")), "category_b": CategoricalDistribution(("net", "una")), }, )) study_categorical_params.add_trial( create_trial( value=2.0, params={ "category_a": "opt", "category_b": "una" }, distributions={ "category_a": CategoricalDistribution(("preferred", "opt")), "category_b": CategoricalDistribution(("net", "una")), }, )) figure = plot_parallel_coordinate(study_categorical_params) assert len(figure.data[0]["dimensions"]) == 3 assert figure.data[0]["dimensions"][0]["label"] == "Objective Value" assert figure.data[0]["dimensions"][0]["range"] == (0.0, 2.0) assert figure.data[0]["dimensions"][0]["values"] == (0.0, 2.0) assert figure.data[0]["dimensions"][1]["label"] == "category_a" assert figure.data[0]["dimensions"][1]["range"] == (0, 1) assert figure.data[0]["dimensions"][1]["values"] == (0, 1) assert figure.data[0]["dimensions"][1]["ticktext"] == ("preferred", "opt") assert figure.data[0]["dimensions"][2]["label"] == "category_b" assert figure.data[0]["dimensions"][2]["range"] == (0, 1) assert figure.data[0]["dimensions"][2]["values"] == (0, 1) assert figure.data[0]["dimensions"][2]["ticktext"] == ("net", "una")