Esempio n. 1
0
    def __init__(
        self,
        consider_prior=True,  # type: bool
        prior_weight=1.0,  # type: float
        consider_magic_clip=True,  # type: bool
        consider_endpoints=False,  # type: bool
        n_startup_trials=10,  # type: int
        n_ei_candidates=24,  # type: int
        gamma=default_gamma,  # type: Callable[[int], int]
        weights=default_weights,  # type: Callable[[int], np.ndarray]
        seed=None,  # type: Optional[int]
    ):
        # type: (...) -> None

        self._parzen_estimator_parameters = _ParzenEstimatorParameters(
            consider_prior, prior_weight, consider_magic_clip, consider_endpoints, weights
        )
        self._prior_weight = prior_weight
        self._n_startup_trials = n_startup_trials
        self._n_ei_candidates = n_ei_candidates
        self._gamma = gamma
        self._weights = weights

        self._rng = np.random.RandomState(seed)
        self._random_sampler = RandomSampler(seed=seed)
Esempio n. 2
0
    def testConvertOptuna(self):
        from ray.tune.suggest.optuna import OptunaSearch, param
        from optuna.samplers import RandomSampler

        config = {
            "a": tune.sample.Categorical([2, 3, 4]).uniform(),
            "b": {
                "x": tune.sample.Integer(0, 5).quantized(2),
                "y": 4,
                "z": tune.sample.Float(1e-4, 1e-2).loguniform()
            }
        }
        converted_config = OptunaSearch.convert_search_space(config)
        optuna_config = [
            param.suggest_categorical("a", [2, 3, 4]),
            param.suggest_int("b/x", 0, 5, 2),
            param.suggest_loguniform("b/z", 1e-4, 1e-2)
        ]

        sampler1 = RandomSampler(seed=1234)
        searcher1 = OptunaSearch(space=converted_config,
                                 sampler=sampler1,
                                 metric="a",
                                 mode="max")

        sampler2 = RandomSampler(seed=1234)
        searcher2 = OptunaSearch(space=optuna_config,
                                 sampler=sampler2,
                                 metric="a",
                                 mode="max")

        config1 = searcher1.suggest("0")
        config2 = searcher2.suggest("0")

        self.assertEqual(config1, config2)
        self.assertIn(config1["a"], [2, 3, 4])
        self.assertIn(config1["b"]["x"], list(range(5)))
        self.assertLess(1e-4, config1["b"]["z"])
        self.assertLess(config1["b"]["z"], 1e-2)

        searcher = OptunaSearch(metric="a", mode="max")
        analysis = tune.run(_mock_objective,
                            config=config,
                            search_alg=searcher,
                            num_samples=1)
        trial = analysis.trials[0]
        assert trial.config["a"] in [2, 3, 4]

        mixed_config = {
            "a": tune.uniform(5, 6),
            "b": tune.uniform(8, 9)  # Cannot mix List and Dict
        }
        searcher = OptunaSearch(space=mixed_config, metric="a", mode="max")
        config = searcher.suggest("0")
        self.assertTrue(5 <= config["a"] <= 6)
        self.assertTrue(8 <= config["b"] <= 9)
Esempio n. 3
0
    def testOptunaReportTooOften(self):
        from ray.tune.search.optuna import OptunaSearch
        from optuna.samplers import RandomSampler

        searcher = OptunaSearch(
            sampler=RandomSampler(seed=1234),
            space=OptunaSearch.convert_search_space(self.config),
            metric="metric",
            mode="max",
        )
        searcher.suggest("trial_1")
        searcher.on_trial_result("trial_1", {
            "training_iteration": 1,
            "metric": 1
        })
        searcher.on_trial_complete("trial_1", {
            "training_iteration": 2,
            "metric": 1
        })

        # Report after complete should not fail
        searcher.on_trial_result("trial_1", {
            "training_iteration": 3,
            "metric": 1
        })

        searcher.on_trial_complete("trial_1", {
            "training_iteration": 4,
            "metric": 1
        })
Esempio n. 4
0
def test_multi_objective_fanova_importance_evaluator_with_infinite(
    target_idx: int, inf_value: float
) -> None:
    # The test ensures that trials with infinite values are ignored to calculate importance scores.
    n_trial = 10
    seed = 13

    # Importance scores are calculated without a trial with an inf value.
    study = create_study(directions=["minimize", "minimize"], sampler=RandomSampler(seed=seed))
    study.optimize(multi_objective_function, n_trials=n_trial)

    evaluator = FanovaImportanceEvaluator(seed=seed)
    param_importance_without_inf = evaluator.evaluate(study, target=lambda t: t.values[target_idx])

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            values=[inf_value, inf_value],
            params={"x1": 1.0, "x2": 1.0, "x3": 3.0},
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": FloatDistribution(low=2, high=4, log=True),
            },
        )
    )
    # Importance scores are calculated with a trial with an inf value.
    param_importance_with_inf = evaluator.evaluate(study, target=lambda t: t.values[target_idx])

    # Obtained importance scores should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert param_importance_with_inf == param_importance_without_inf
Esempio n. 5
0
def test_shap_importance_evaluator_with_infinite(inf_value: float) -> None:
    # The test ensures that trials with infinite values are ignored to calculate importance scores.
    n_trial = 10
    seed = 13

    # Importance scores are calculated without a trial with an inf value.
    study = create_study(sampler=RandomSampler(seed=seed))
    study.optimize(objective, n_trials=n_trial)

    evaluator = ShapleyImportanceEvaluator(seed=seed)
    param_importance_without_inf = evaluator.evaluate(study)

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            value=inf_value,
            params={"x1": 1.0, "x2": 1.0, "x3": 3.0, "x4": 0.1},
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": IntDistribution(low=2, high=4, log=True),
                "x4": CategoricalDistribution([0.1, 1, 10]),
            },
        )
    )
    # Importance scores are calculated with a trial with an inf value.
    param_importance_with_inf = evaluator.evaluate(study)

    # Obtained importance scores should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert param_importance_with_inf == param_importance_without_inf
def test_warnings(
    tmpdir: Path,
    search_space: Optional[DictConfig],
    params: Optional[DictConfig],
    raise_warning: bool,
    msg: Optional[str],
) -> None:
    partial_sweeper = partial(
        OptunaSweeperImpl,
        sampler=RandomSampler(),
        direction=Direction.minimize,
        storage=None,
        study_name="test",
        n_trials=1,
        n_jobs=1,
        custom_search_space=None,
    )
    if search_space is not None:
        search_space = OmegaConf.create(search_space)
    if params is not None:
        params = OmegaConf.create(params)
    sweeper = partial_sweeper(search_space=search_space, params=params)
    if raise_warning:
        with warns(
                UserWarning,
                match=msg,
        ):
            sweeper._process_searchspace_config()
    else:
        sweeper._process_searchspace_config()
Esempio n. 7
0
    def __init__(
        self,
        *,
        candidates_func: Callable[
            [
                "torch.Tensor",
                "torch.Tensor",
                Optional["torch.Tensor"],
                "torch.Tensor",
            ],
            "torch.Tensor",
        ] = None,
        constraints_func: Optional[Callable[[FrozenTrial], Sequence[float]]] = None,
        n_startup_trials: int = 10,
        independent_sampler: Optional[BaseSampler] = None,
    ):
        _imports.check()

        self._candidates_func = candidates_func
        self._constraints_func = constraints_func
        self._independent_sampler = independent_sampler or RandomSampler()
        self._n_startup_trials = n_startup_trials

        self._study_id: Optional[int] = None
        self._search_space = IntersectionSearchSpace()
Esempio n. 8
0
 def _make_sampler(self):
     if self.sampler_method == 'random':
         sampler = RandomSampler(seed=self.seed)
     elif self.sampler_method == 'tpe':
         sampler = TPESampler(n_startup_trials=5, seed=self.seed)
     else:
         raise ValueError('Unknown sampler: {}'.format(self.sampler_method))
     return sampler
Esempio n. 9
0
def test_reseed_rng() -> None:
    independent_sampler = RandomSampler()
    sampler = BoTorchSampler(independent_sampler=independent_sampler)
    original_independent_sampler_seed = cast(
        RandomSampler, sampler._independent_sampler)._rng.seed

    sampler.reseed_rng()
    assert (original_independent_sampler_seed != cast(
        RandomSampler, sampler._independent_sampler)._rng.seed)
Esempio n. 10
0
def create_sampler(sampler_mode: str) -> BaseSampler:
    if sampler_mode == "random":
        return RandomSampler()
    elif sampler_mode == "tpe":
        return TPESampler()
    elif sampler_mode == "cmaes":
        return CmaEsSampler()
    else:
        assert False
Esempio n. 11
0
def test_multi_objective_trial_with_infinite_value_ignored(
        target_idx: int, inf_value: float, evaluator: BaseImportanceEvaluator,
        n_trial: int) -> None:
    def _multi_objective_function(trial: Trial) -> Tuple[float, float]:
        x1 = trial.suggest_float("x1", 0.1, 3)
        x2 = trial.suggest_float("x2", 0.1, 3, log=True)
        x3 = trial.suggest_float("x3", 2, 4, log=True)
        return x1, x2 * x3

    seed = 13
    target_name = "Objective Value"

    study = create_study(directions=["minimize", "minimize"],
                         sampler=RandomSampler(seed=seed))
    study.optimize(_multi_objective_function, n_trials=n_trial)

    # Create param importances info without inf value.
    info_without_inf = _get_importances_info(
        study,
        evaluator=evaluator,
        params=None,
        target=lambda t: t.values[target_idx],
        target_name=target_name,
    )

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            values=[inf_value, inf_value],
            params={
                "x1": 1.0,
                "x2": 1.0,
                "x3": 3.0
            },
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": FloatDistribution(low=2, high=4, log=True),
            },
        ))

    # Create param importances info with inf value.
    info_with_inf = _get_importances_info(
        study,
        evaluator=evaluator,
        params=None,
        target=lambda t: t.values[target_idx],
        target_name=target_name,
    )

    # Obtained info instances should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert info_with_inf == info_without_inf
Esempio n. 12
0
def test_call_after_trial_of_base_sampler() -> None:
    base_sampler = RandomSampler()
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        sampler = PartialFixedSampler(fixed_params={},
                                      base_sampler=base_sampler)
    study = optuna.create_study(sampler=sampler)
    with patch.object(base_sampler,
                      "after_trial",
                      wraps=base_sampler.after_trial) as mock_object:
        study.optimize(lambda _: 1.0, n_trials=1)
        assert mock_object.call_count == 1
Esempio n. 13
0
def test_mean_abs_shap_importance_evaluator_n_trees() -> None:
    # Assumes that `seed` can be fixed to reproduce identical results.

    study = create_study(sampler=RandomSampler(seed=0))
    study.optimize(objective, n_trials=3)

    evaluator = ShapleyImportanceEvaluator(n_trees=10, seed=0)
    param_importance = evaluator.evaluate(study)

    evaluator = ShapleyImportanceEvaluator(n_trees=20, seed=0)
    param_importance_different_n_trees = evaluator.evaluate(study)

    assert param_importance != param_importance_different_n_trees
Esempio n. 14
0
def test_fanova_importance_evaluator_max_depth() -> None:
    # Assumes that `seed` can be fixed to reproduce identical results.

    study = create_study(sampler=RandomSampler(seed=0))
    study.optimize(objective, n_trials=3)

    evaluator = FanovaImportanceEvaluator(max_depth=1, seed=0)
    param_importance = evaluator.evaluate(study)

    evaluator = FanovaImportanceEvaluator(max_depth=2, seed=0)
    param_importance_different_max_depth = evaluator.evaluate(study)

    assert param_importance != param_importance_different_max_depth
def test_multi_objective_trial_with_infinite_value_ignored(
        target_idx: int, inf_value: float, evaluator: BaseImportanceEvaluator,
        n_trial: int) -> None:
    def _multi_objective_function(trial: Trial) -> Tuple[float, float]:
        x1 = trial.suggest_float("x1", 0.1, 3)
        x2 = trial.suggest_float("x2", 0.1, 3, log=True)
        x3 = trial.suggest_float("x3", 2, 4, log=True)
        return x1, x2 * x3

    seed = 13

    study = create_study(directions=["minimize", "minimize"],
                         sampler=RandomSampler(seed=seed))
    study.optimize(_multi_objective_function, n_trials=n_trial)

    # A figure is created without a trial with an inf value.
    plot_param_importances(study,
                           evaluator=evaluator,
                           target=lambda t: t.values[target_idx])
    with BytesIO() as byte_io:
        plt.savefig(byte_io)
        figure_with_inf = byte_io.getvalue()

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            values=[inf_value, inf_value],
            params={
                "x1": 1.0,
                "x2": 1.0,
                "x3": 3.0
            },
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": FloatDistribution(low=2, high=4, log=True),
            },
        ))

    # A figure is created with a trial with an inf value.
    plot_param_importances(study,
                           evaluator=evaluator,
                           target=lambda t: t.values[target_idx])
    with BytesIO() as byte_io:
        plt.savefig(byte_io)
        figure_without_inf = byte_io.getvalue()

    # Obtained figures should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert len(figure_without_inf) > 0
    assert figure_without_inf == figure_with_inf
Esempio n. 16
0
    def __init__(
        self,
        consider_prior: bool = True,
        prior_weight: float = 1.0,
        consider_magic_clip: bool = True,
        consider_endpoints: bool = False,
        n_startup_trials: int = 10,
        n_ei_candidates: int = 24,
        gamma: Callable[[int], int] = default_gamma,
        weights: Callable[[int], np.ndarray] = default_weights,
        seed: Optional[int] = None,
        *,
        multivariate: bool = False,
        warn_independent_sampling: bool = True,
    ) -> None:

        self._parzen_estimator_parameters = _ParzenEstimatorParameters(
            consider_prior, prior_weight, consider_magic_clip,
            consider_endpoints, weights)
        self._prior_weight = prior_weight
        self._n_startup_trials = n_startup_trials
        self._n_ei_candidates = n_ei_candidates
        self._gamma = gamma
        self._weights = weights

        self._warn_independent_sampling = warn_independent_sampling
        self._rng = np.random.RandomState(seed)
        self._random_sampler = RandomSampler(seed=seed)

        self._multivariate = multivariate
        self._search_space = IntersectionSearchSpace()

        if multivariate:
            warnings.warn(
                "``multivariate`` option is an experimental feature."
                " The interface can change in the future.",
                ExperimentalWarning,
            )
Esempio n. 17
0
def test_fixed_sampling() -> None:
    def objective(trial: Trial) -> float:
        x = trial.suggest_float("x", -10, 10)
        y = trial.suggest_float("y", -10, 10)
        return x**2 + y**2

    study0 = optuna.create_study()
    study0.sampler = RandomSampler(seed=42)
    study0.optimize(objective, n_trials=1)
    x_sampled0 = study0.trials[0].params["x"]

    # Fix parameter ``y`` as 0.
    study1 = optuna.create_study()
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        study1.sampler = PartialFixedSampler(
            fixed_params={"y": 0}, base_sampler=RandomSampler(seed=42))
    study1.optimize(objective, n_trials=1)

    x_sampled1 = study1.trials[0].params["x"]
    y_sampled1 = study1.trials[0].params["y"]
    assert x_sampled1 == x_sampled0
    assert y_sampled1 == 0
Esempio n. 18
0
def test_fanova_importance_evaluator_with_target() -> None:
    # Assumes that `seed` can be fixed to reproduce identical results.

    study = create_study(sampler=RandomSampler(seed=0))
    study.optimize(objective, n_trials=3)

    evaluator = FanovaImportanceEvaluator(seed=0)
    param_importance = evaluator.evaluate(study)
    param_importance_with_target = evaluator.evaluate(
        study,
        target=lambda t: t.params["x1"] + t.params["x2"],
    )

    assert param_importance != param_importance_with_target
Esempio n. 19
0
def test_fanova_importance_evaluator_seed() -> None:
    study = create_study(sampler=RandomSampler(seed=0))
    study.optimize(objective, n_trials=3)

    evaluator = FanovaImportanceEvaluator(seed=2)
    param_importance = evaluator.evaluate(study)

    evaluator = FanovaImportanceEvaluator(seed=2)
    param_importance_same_seed = evaluator.evaluate(study)
    assert param_importance == param_importance_same_seed

    evaluator = FanovaImportanceEvaluator(seed=3)
    param_importance_different_seed = evaluator.evaluate(study)
    assert param_importance != param_importance_different_seed
Esempio n. 20
0
def test_botorch_n_startup_trials() -> None:
    independent_sampler = RandomSampler()
    sampler = BoTorchSampler(n_startup_trials=2, independent_sampler=independent_sampler)
    study = optuna.create_study(directions=["minimize", "maximize"], sampler=sampler)

    with patch.object(
        independent_sampler, "sample_independent", wraps=independent_sampler.sample_independent
    ) as mock_independent, patch.object(
        sampler, "sample_relative", wraps=sampler.sample_relative
    ) as mock_relative:
        study.optimize(
            lambda t: [t.suggest_float("x0", 0, 1), t.suggest_float("x1", 0, 1)], n_trials=3
        )
        assert mock_independent.call_count == 4  # The objective function has two parameters.
        assert mock_relative.call_count == 3
Esempio n. 21
0
    def __init__(
        self,
        consider_prior: bool = True,
        prior_weight: float = 1.0,
        consider_magic_clip: bool = True,
        consider_endpoints: bool = False,
        n_startup_trials: int = 10,
        n_ei_candidates: int = 24,
        gamma: Callable[[int], int] = default_gamma,
        weights: Callable[[int], np.ndarray] = default_weights,
        seed: Optional[int] = None,
    ) -> None:

        self._parzen_estimator_parameters = _ParzenEstimatorParameters(
            consider_prior, prior_weight, consider_magic_clip, consider_endpoints, weights
        )
        self._prior_weight = prior_weight
        self._n_startup_trials = n_startup_trials
        self._n_ei_candidates = n_ei_candidates
        self._gamma = gamma
        self._weights = weights

        self._rng = np.random.RandomState(seed)
        self._random_sampler = RandomSampler(seed=seed)
Esempio n. 22
0
 def _create_sampler(self, sampler_method: str) -> BaseSampler:
     # n_warmup_steps: Disable pruner until the trial reaches the given number of step.
     if sampler_method == "random":
         sampler = RandomSampler(seed=self.seed)
     elif sampler_method == "tpe":
         # TODO: try with multivariate=True
         sampler = TPESampler(n_startup_trials=self.n_startup_trials, seed=self.seed)
     elif sampler_method == "skopt":
         # cf https://scikit-optimize.github.io/#skopt.Optimizer
         # GP: gaussian process
         # Gradient boosted regression: GBRT
         sampler = SkoptSampler(skopt_kwargs={"base_estimator": "GP", "acq_func": "gp_hedge"})
     else:
         raise ValueError(f"Unknown sampler: {sampler_method}")
     return sampler
Esempio n. 23
0
def test_reseed_rng() -> None:
    base_sampler = RandomSampler()
    study = optuna.create_study(sampler=base_sampler)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        sampler = PartialFixedSampler(fixed_params={"x": 0},
                                      base_sampler=study.sampler)
    original_seed = base_sampler._rng.seed

    with patch.object(base_sampler,
                      "reseed_rng",
                      wraps=base_sampler.reseed_rng) as mock_object:
        sampler.reseed_rng()
        assert mock_object.call_count == 1
        assert original_seed != base_sampler._rng.seed
Esempio n. 24
0
def test_get_info_importances_nonfinite_removed(
        inf_value: float, evaluator: BaseImportanceEvaluator,
        n_trials: int) -> None:
    def _objective(trial: Trial) -> float:
        x1 = trial.suggest_float("x1", 0.1, 3)
        x2 = trial.suggest_float("x2", 0.1, 3, log=True)
        x3 = trial.suggest_float("x3", 2, 4, log=True)
        return x1 + x2 * x3

    seed = 13
    target_name = "Objective Value"

    study = create_study(sampler=RandomSampler(seed=seed))
    study.optimize(_objective, n_trials=n_trials)

    # Create param importances info without inf value.
    info_without_inf = _get_importances_info(study,
                                             evaluator=evaluator,
                                             params=None,
                                             target=None,
                                             target_name=target_name)

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            value=inf_value,
            params={
                "x1": 1.0,
                "x2": 1.0,
                "x3": 3.0
            },
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": FloatDistribution(low=2, high=4, log=True),
            },
        ))

    # Create param importances info with inf value.
    info_with_inf = _get_importances_info(study,
                                          evaluator=evaluator,
                                          params=None,
                                          target=None,
                                          target_name=target_name)

    # Obtained info instances should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert info_with_inf == info_without_inf
Esempio n. 25
0
    def testOptuna(self):
        from ray.tune.suggest.optuna import OptunaSearch
        from optuna.samplers import RandomSampler

        np.random.seed(1000)  # At least one nan, inf, -inf and float

        out = tune.run(
            _invalid_objective,
            search_alg=OptunaSearch(sampler=RandomSampler(seed=1234)),
            config=self.config,
            mode="max",
            num_samples=8,
            reuse_actors=False)

        best_trial = out.best_trial
        self.assertLessEqual(best_trial.config["report"], 2.0)
Esempio n. 26
0
    def __init__(self, argument, grid_search_space=None):
        self.name = ''
        self.argument = argument
        self.grid_search_space = grid_search_space

        if self.argument.sampler == "grid":
            assert self.grid_search_space is not None, "grid search spaceを指定してください"

            self.sampler = GridSampler(self.grid_search_space)
            self.n_trials = 1
            for value in self.grid_search_space.values():
                self.n_trials *= len(value)

            # トライアル回数制限


#            if self.n_trials > self.argument.n_trials:
#                self.n_trials = self.argument.n_trials

            self.obj_func_name = self.objective_grid
        elif self.argument.sampler == "random":
            self.sampler = RandomSampler(seed=self.argument.seed)
            self.n_trials = self.argument.n_trials
            self.obj_func_name = self.objective_no_grid
        else:
            self.sampler = TPESampler(**TPESampler.hyperopt_parameters(),
                                      seed=self.argument.seed)
            self.n_trials = self.argument.n_trials
            self.obj_func_name = self.objective_no_grid

        if self.n_trials == 1:
            try:
                mlflow.set_experiment(self.argument.experiment)
            except Exception as e:
                print(e)
        else:
            try:
                mlflow.set_experiment(
                    self.argument.experiment + "_" +
                    datetime.now().strftime('%Y%m%d_%H:%M:%S'))
            except Exception as e:
                print(e)

        self.study = optuna.create_study(sampler=self.sampler)
Esempio n. 27
0
def test_importance_evaluator_with_target(evaluator_init_func: Any) -> None:
    def objective(trial: Trial) -> float:
        x1 = trial.suggest_float("x1", 0.1, 3)
        x2 = trial.suggest_float("x2", 0.1, 3, log=True)
        x3 = trial.suggest_float("x3", 2, 4, log=True)
        return x1 + x2 * x3

    # Assumes that `seed` can be fixed to reproduce identical results.
    study = create_study(sampler=RandomSampler(seed=0))
    study.optimize(objective, n_trials=3)

    evaluator = evaluator_init_func(seed=0)
    param_importance = evaluator.evaluate(study)
    param_importance_with_target = evaluator.evaluate(
        study,
        target=lambda t: t.params["x1"] + t.params["x2"],
    )

    assert param_importance != param_importance_with_target
Esempio n. 28
0
def test_importance_evaluator_seed(evaluator_init_func: Any) -> None:
    def objective(trial: Trial) -> float:
        x1 = trial.suggest_float("x1", 0.1, 3)
        x2 = trial.suggest_float("x2", 0.1, 3, log=True)
        x3 = trial.suggest_float("x3", 2, 4, log=True)
        return x1 + x2 * x3

    study = create_study(sampler=RandomSampler(seed=0))
    study.optimize(objective, n_trials=3)

    evaluator = evaluator_init_func(seed=2)
    param_importance = evaluator.evaluate(study)

    evaluator = evaluator_init_func(seed=2)
    param_importance_same_seed = evaluator.evaluate(study)
    assert param_importance == param_importance_same_seed

    evaluator = evaluator_init_func(seed=3)
    param_importance_different_seed = evaluator.evaluate(study)
    assert param_importance != param_importance_different_seed
Esempio n. 29
0
def test_multi_objective_shap_importance_evaluator_with_infinite(
    target_idx: int, inf_value: float
) -> None:
    def multi_objective_function(trial: Trial) -> Tuple[float, float]:
        x1: float = trial.suggest_float("x1", 0.1, 3)
        x2: float = trial.suggest_float("x2", 0.1, 3, log=True)
        x3: int = trial.suggest_int("x3", 2, 4, log=True)
        x4 = trial.suggest_categorical("x4", [0.1, 1.0, 10.0])
        assert isinstance(x4, float)
        return (x1 + x2 * x3 + x4, x1 * x4)

    # The test ensures that trials with infinite values are ignored to calculate importance scores.
    n_trial = 10
    seed = 13

    # Importance scores are calculated without a trial with an inf value.
    study = create_study(directions=["minimize", "minimize"], sampler=RandomSampler(seed=seed))
    study.optimize(multi_objective_function, n_trials=n_trial)

    evaluator = ShapleyImportanceEvaluator(seed=seed)
    param_importance_without_inf = evaluator.evaluate(study, target=lambda t: t.values[target_idx])

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            values=[inf_value, inf_value],
            params={"x1": 1.0, "x2": 1.0, "x3": 3.0, "x4": 0.1},
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": IntDistribution(low=2, high=4, log=True),
                "x4": CategoricalDistribution([0.1, 1, 10]),
            },
        )
    )
    # Importance scores are calculated with a trial with an inf value.
    param_importance_with_inf = evaluator.evaluate(study, target=lambda t: t.values[target_idx])

    # Obtained importance scores should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert param_importance_with_inf == param_importance_without_inf
Esempio n. 30
0
    def testOptuna(self):
        from ray.tune.suggest.optuna import OptunaSearch
        from optuna.samplers import RandomSampler

        np.random.seed(1000)

        out = tune.run(_multi_objective,
                       search_alg=OptunaSearch(
                           sampler=RandomSampler(seed=1234),
                           metric=["a", "b", "c"],
                           mode=["max", "min", "max"],
                       ),
                       config=self.config,
                       num_samples=16,
                       reuse_actors=False)

        best_trial_a = out.get_best_trial("a", "max")
        self.assertGreaterEqual(best_trial_a.config["a"], 0.8)
        best_trial_b = out.get_best_trial("b", "min")
        self.assertGreaterEqual(best_trial_b.config["b"], 0.8)
        best_trial_c = out.get_best_trial("c", "max")
        self.assertGreaterEqual(best_trial_c.config["c"], 0.8)