Exemplo n.º 1
0
def test_reseed_rng() -> None:
    base_sampler = RandomSampler()
    study = optuna.create_study(sampler=base_sampler)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        sampler = PartialFixedSampler(fixed_params={"x": 0},
                                      base_sampler=study.sampler)
    original_seed = base_sampler._rng.seed

    with patch.object(base_sampler,
                      "reseed_rng",
                      wraps=base_sampler.reseed_rng) as mock_object:
        sampler.reseed_rng()
        assert mock_object.call_count == 1
        assert original_seed != base_sampler._rng.seed
Exemplo n.º 2
0
def test_call_after_trial_of_base_sampler() -> None:
    base_sampler = RandomSampler()
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        sampler = PartialFixedSampler(fixed_params={},
                                      base_sampler=base_sampler)
    study = optuna.create_study(sampler=sampler)
    with patch.object(base_sampler,
                      "after_trial",
                      wraps=base_sampler.after_trial) as mock_object:
        study.optimize(lambda _: 1.0, n_trials=1)
        assert mock_object.call_count == 1
Exemplo n.º 3
0
def test_out_of_the_range_numerical(fixed_y: int) -> None:
    def objective(trial: Trial) -> float:
        x = trial.suggest_int("x", -1, 1)
        y = trial.suggest_int("y", -1, 1)
        return x**2 + y**2

    # It is possible to fix numerical parameters as out-of-the-range value.
    # `UserWarning` will occur.
    study = optuna.create_study()
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        study.sampler = PartialFixedSampler(fixed_params={"y": fixed_y},
                                            base_sampler=study.sampler)
    with pytest.warns(UserWarning):
        study.optimize(objective, n_trials=1)
    assert study.trials[0].params["y"] == fixed_y
Exemplo n.º 4
0
def test_float_to_int() -> None:
    def objective(trial: Trial) -> float:
        x = trial.suggest_int("x", -10, 10)
        y = trial.suggest_int("y", -10, 10)
        return x**2 + y**2

    fixed_y = 0.5

    # Parameters of Int-type-distribution are rounded to int-type,
    # even if they are defined as float-type.
    study = optuna.create_study()
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        study.sampler = PartialFixedSampler(fixed_params={"y": fixed_y},
                                            base_sampler=study.sampler)
    study.optimize(objective, n_trials=1)
    assert study.trials[0].params["y"] == int(fixed_y)
Exemplo n.º 5
0
def test_out_of_the_range_categorical() -> None:
    def objective(trial: Trial) -> float:
        x = trial.suggest_int("x", -1, 1)
        y = trial.suggest_categorical("y", [-1, 0, 1])
        y = cast(int, y)
        return x**2 + y**2

    fixed_y = 2

    # It isn't possible to fix categorical parameters as out-of-the-range value.
    # `ValueError` will occur.
    study = optuna.create_study()
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        study.sampler = PartialFixedSampler(fixed_params={"y": fixed_y},
                                            base_sampler=study.sampler)
    with pytest.raises(ValueError):
        study.optimize(objective, n_trials=1)
Exemplo n.º 6
0
def test_partial_fixed_sampling(sampler_class: Callable[[], BaseSampler]) -> None:

    study = optuna.create_study(sampler=sampler_class())

    def objective(trial: Trial) -> float:
        x = trial.suggest_float("x", -1, 1)
        y = trial.suggest_int("y", -1, 1)
        z = trial.suggest_float("z", -1, 1)
        return x + y + z

    # First trial.
    study.optimize(objective, n_trials=1)

    # Second trial. Here, the parameter ``y`` is fixed as 0.
    fixed_params = {"y": 0}
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        study.sampler = PartialFixedSampler(fixed_params, study.sampler)
    study.optimize(objective, n_trials=1)
    trial_params = study.trials[-1].params
    assert trial_params["y"] == fixed_params["y"]
Exemplo n.º 7
0
def test_fixed_sampling() -> None:
    def objective(trial: Trial) -> float:
        x = trial.suggest_float("x", -10, 10)
        y = trial.suggest_float("y", -10, 10)
        return x**2 + y**2

    study0 = optuna.create_study()
    study0.sampler = RandomSampler(seed=42)
    study0.optimize(objective, n_trials=1)
    x_sampled0 = study0.trials[0].params["x"]

    # Fix parameter ``y`` as 0.
    study1 = optuna.create_study()
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        study1.sampler = PartialFixedSampler(
            fixed_params={"y": 0}, base_sampler=RandomSampler(seed=42))
    study1.optimize(objective, n_trials=1)

    x_sampled1 = study1.trials[0].params["x"]
    y_sampled1 = study1.trials[0].params["y"]
    assert x_sampled1 == x_sampled0
    assert y_sampled1 == 0
Exemplo n.º 8
0
def main(cfg):
    experiment_path = os.path.join(cfg.experiment_path, cfg.experiment_name)
    if cfg.evaluate:
        assert cfg.evaluate in ["target_train_with_labels", "target_val_with_labels"]
        experiment_path = os.path.join(experiment_path, cfg.evaluate_trial)
        (
            framework,
            adapter,
            datasets,
            validator,
            saver,
            _,
            _,
            _,
        ) = get_adapter_datasets_etc(
            cfg, experiment_path, cfg.evaluate_validator, cfg.evaluate_target_domains
        )
        adapter = framework(adapter)
        evaluate(cfg, experiment_path, adapter, datasets, validator, saver)
    else:
        optuna.logging.set_verbosity(optuna.logging.WARNING)
        study_path = os.path.join(experiment_path, "study.pkl")
        plot_path = os.path.join(experiment_path, "plots")
        log_path = os.path.join(experiment_path, "trials.csv")

        if os.path.isdir(experiment_path) and os.path.isfile(study_path):
            study = joblib.load(study_path)
        else:
            c_f.makedir_if_not_there(experiment_path)
            c_f.makedir_if_not_there(plot_path)
            pruner = optuna.pruners.NopPruner()
            study = optuna.create_study(
                direction="maximize",
                pruner=pruner,
                sampler=TPESampler(n_startup_trials=cfg.n_startup_trials),
            )

        num_fixed_params = 0
        if cfg.fixed_param_source:
            fp_source_path = cfg.fixed_param_source
            fp_source_best_trial_json = os.path.join(fp_source_path, "best_trial.json")
            if not os.path.isfile(fp_source_best_trial_json):
                FileNotFoundError(
                    "Fixed param source needs to be complete to use its best params"
                )
            fp_source_path = os.path.join(fp_source_path, "study.pkl")
            fp_source_study = joblib.load(fp_source_path)
            study.sampler = PartialFixedSampler(
                fp_source_study.best_params, study.sampler
            )
            num_fixed_params = len(fp_source_study.best_params)

        i = len([st for st in study.trials if st.value is not None])

        study.sampler.reseed_rng()

        while i < cfg.num_trials:
            study.optimize(
                lambda trial: objective(
                    cfg, experiment_path, trial, num_fixed_params=num_fixed_params
                ),
                n_trials=1,
                timeout=None,
                callbacks=[
                    main_utils.save_study(study_path),
                    main_utils.plot_visualizations(plot_path),
                    main_utils.save_dataframe(log_path),
                    main_utils.delete_suboptimal_models(experiment_path),
                ],
                gc_after_trial=True,
            )
            if study.trials[-1].value is not None:
                i += 1

        i = main_utils.num_reproductions_complete(experiment_path)
        print("num_reproduce_complete", i)
        while i < cfg.num_reproduce:
            result = objective(
                cfg,
                experiment_path,
                optuna.trial.FixedTrial(study.best_trial.params),
                i,
                num_fixed_params=num_fixed_params,
            )
            if not np.isnan(result):
                i += 1

        best_json = {
            field: str(getattr(study.best_trial, field))
            for field in study.best_trial._ordered_fields
        }
        with open(os.path.join(experiment_path, "best_trial.json"), "w") as f:
            json.dump(best_json, f, indent=2)