Example #1
0
def test_conditional_sample_independent(sampler_class: Callable[[], BaseSampler]) -> None:
    # This test case reproduces the error reported in #2734.
    # See https://github.com/optuna/optuna/pull/2734#issuecomment-857649769.

    study = optuna.study.create_study(sampler=sampler_class())
    categorical_distribution = CategoricalDistribution(choices=["x", "y"])
    dependent_distribution = CategoricalDistribution(choices=["a", "b"])

    study.add_trial(
        optuna.create_trial(
            params={"category": "x", "x": "a"},
            distributions={"category": categorical_distribution, "x": dependent_distribution},
            value=0.1,
        )
    )

    study.add_trial(
        optuna.create_trial(
            params={"category": "y", "y": "b"},
            distributions={"category": categorical_distribution, "y": dependent_distribution},
            value=0.1,
        )
    )

    _trial = _create_new_trial(study)
    category = study.sampler.sample_independent(
        study, _trial, "category", categorical_distribution
    )
    assert category in ["x", "y"]
    value = study.sampler.sample_independent(study, _trial, category, dependent_distribution)
    assert value in ["a", "b"]
Example #2
0
def test_trials_dataframe_with_multi_objective_optimization_with_fail_and_pruned(
        attrs: Tuple[str, ...], multi_index: bool) -> None:
    study = create_study(directions=["minimize", "maximize"])
    study.add_trial(create_trial(state=TrialState.FAIL))
    study.add_trial(create_trial(state=TrialState.PRUNED))
    df = study.trials_dataframe(attrs=attrs, multi_index=multi_index)

    if multi_index:
        for i in range(2):
            assert df.get("values")[0][i] is None
            assert df.get("values")[1][i] is None
    else:
        for i in range(2):
            assert df.values_0[i] is None
            assert df.values_1[i] is None
Example #3
0
    def after_trial(
        self,
        study,
        trial,
        state,
        values,
    ):
        if not self._warm_starting:
            return self._sampler.after_trial(study, trial, state, values)

        if len(self._source_trials) < self._warm_starting_trials:
            assert state == optuna.trial.TrialState.PRUNED

            self._source_trials.append(
                optuna.create_trial(
                    params=trial.params,
                    distributions=trial.distributions,
                    values=values,
                ))
        if len(self._source_trials) == self._warm_starting_trials:
            self._sampler = optuna.samplers.CmaEsSampler(
                seed=self._seed + 1, source_trials=self._source_trials or None)
            self._warm_starting = False
        else:
            return self._sampler.after_trial(study, trial, state, values)
Example #4
0
    def __call__(self, study: "optuna.study.Study", trial: FrozenTrial) -> None:
        system_attrs = {"failed_trial": trial.number}

        # Update the new object with the values in the trial.system_attrs.
        # By doing this, if this failed try is already a rety, the 'failed_trial' value
        # will be the first failed trial number.
        system_attrs.update(trial.system_attrs)

        retries = sum(
            ("failed_trial", system_attrs["failed_trial"]) in t.system_attrs.items()
            for t in study.trials
        )

        if self._max_retry is not None and retries + 1 > self._max_retry:
            return

        study.add_trial(
            optuna.create_trial(
                state=optuna.trial.TrialState.WAITING,
                params=trial.params,
                distributions=trial.distributions,
                user_attrs=trial.user_attrs,
                system_attrs=system_attrs,
                intermediate_values=(
                    trial.intermediate_values if self._inherit_intermediate_values else None
                ),
            )
        )
Example #5
0
def test_should_raise_exception() -> None:
    dummy_source_trials = [create_trial(value=i, state=TrialState.COMPLETE) for i in range(10)]

    with pytest.raises(ValueError):
        optuna.samplers.CmaEsSampler(
            x0={"x": 0.1, "y": 0.1},
            source_trials=dummy_source_trials,
        )

    with pytest.raises(ValueError):
        optuna.samplers.CmaEsSampler(
            sigma0=0.1,
            source_trials=dummy_source_trials,
        )

    with pytest.raises(ValueError):
        optuna.samplers.CmaEsSampler(
            use_separable_cma=True,
            source_trials=dummy_source_trials,
        )

    with pytest.raises(ValueError):
        optuna.samplers.CmaEsSampler(
            restart_strategy="invalid-restart-strategy",
        )
Example #6
0
    def test_different_distributions(self) -> None:
        distributions = [
            {
                "x0": UniformDistribution(low=0, high=10),
                "x1": UniformDistribution(low=0, high=10),
            },
            {
                "x0": UniformDistribution(low=0, high=5),
                "x1": UniformDistribution(low=0, high=10),
            },
        ]
        params = [
            {
                "x0": 0.5,
                "x1": 0.5,
            },
            {
                "x0": 0.5,
                "x1": 0.5,
            },
        ]
        trials = [
            create_trial(state=TrialState.COMPLETE, value=0, distributions=d, params=p)
            for d, p in zip(distributions, params)
        ]
        search_space = _SearchSpace()
        search_space.update(trials)

        self.assertEqual(len(search_space.intersection), 1)
        self.assertEqual(len(search_space.union), 3)
Example #7
0
def test_plot_slice_log_scale():
    # type: () -> None

    study = create_study()
    study.add_trial(
        create_trial(
            value=0.0,
            params={
                "x_linear": 1.0,
                "y_log": 1e-3
            },
            distributions={
                "x_linear": UniformDistribution(0.0, 3.0),
                "y_log": LogUniformDistribution(1e-5, 1.0),
            },
        ))

    # Plot a parameter.
    figure = plot_slice(study, params=["y_log"])
    assert figure.layout["xaxis_type"] == "log"
    figure = plot_slice(study, params=["x_linear"])
    assert figure.layout["xaxis_type"] is None

    # Plot multiple parameters.
    figure = plot_slice(study)
    assert figure.layout["xaxis_type"] is None
    assert figure.layout["xaxis2_type"] == "log"
Example #8
0
def test_restore_optimizer_keeps_backward_compatibility() -> None:
    sampler = optuna.samplers.CmaEsSampler()
    optimizer = CMA(np.zeros(2), sigma=1.3)
    optimizer_str = pickle.dumps(optimizer).hex()

    completed_trials = [
        create_trial(state=TrialState.COMPLETE, value=0.1),
        create_trial(
            state=TrialState.COMPLETE,
            value=0.1,
            system_attrs={"cma:optimizer": optimizer_str, "cma:n_restarts": 1},
        ),
        create_trial(state=TrialState.COMPLETE, value=0.1),
    ]
    optimizer, n_restarts = sampler._restore_optimizer(completed_trials)
    assert isinstance(optimizer, CMA)
    assert n_restarts == 1
Example #9
0
def test_add_trial(storage_mode: str) -> None:

    with StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage)
        assert len(study.trials) == 0

        trial = create_trial(value=0.8)
        study.add_trial(trial)
        assert len(study.trials) == 1
        assert study.trials[0].number == 0
        assert study.best_value == 0.8
Example #10
0
def test_restore_optimizer_from_substrings() -> None:
    sampler = optuna.samplers.CmaEsSampler()
    optimizer = CMA(np.zeros(10), sigma=1.3)
    optimizer_str = pickle.dumps(optimizer).hex()

    system_attrs: Dict[str, Any] = _split_optimizer_str(optimizer_str)
    assert len(system_attrs) > 1
    system_attrs["cma:n_restarts"] = 1

    completed_trials = [
        create_trial(state=TrialState.COMPLETE, value=0.1),
        create_trial(
            state=TrialState.COMPLETE,
            value=0.1,
            system_attrs=system_attrs,
        ),
        create_trial(state=TrialState.COMPLETE, value=0.1),
    ]
    optimizer, n_restarts = sampler._restore_optimizer(completed_trials)
    assert isinstance(optimizer, CMA)
    assert n_restarts == 1
Example #11
0
def test_run_initialized(wandb: mock.MagicMock) -> None:

    wandb.sdk.wandb_run.Run = mock.MagicMock

    n_trials = 10
    wandb_kwargs = {
        "project": "optuna",
        "group": "summary",
        "job_type": "logging",
        "mode": "offline",
        "tags": ["test-tag"],
    }

    WeightsAndBiasesCallback(metric_name="mse",
                             wandb_kwargs=wandb_kwargs,
                             as_multirun=False)
    wandb.init.assert_called_once_with(project="optuna",
                                       group="summary",
                                       job_type="logging",
                                       mode="offline",
                                       tags=["test-tag"])

    wandbc = WeightsAndBiasesCallback(metric_name="mse",
                                      wandb_kwargs=wandb_kwargs,
                                      as_multirun=True)
    wandb.run = None

    study = optuna.create_study(direction="minimize")
    _wrapped_func = wandbc.track_in_wandb()(lambda t: 1.0)
    wandb.init.reset_mock()
    trial = optuna.create_trial(value=1.0)
    _wrapped_func(trial)

    wandb.init.assert_called_once_with(project="optuna",
                                       group="summary",
                                       job_type="logging",
                                       mode="offline",
                                       tags=["test-tag"])

    wandb.init.reset_mock()
    study.optimize(_objective_func, n_trials=n_trials, callbacks=[wandbc])

    wandb.init.assert_called_with(project="optuna",
                                  group="summary",
                                  job_type="logging",
                                  mode="offline",
                                  tags=["test-tag"])

    assert wandb.init.call_count == n_trials

    wandb.init().finish.assert_called()
    assert wandb.init().finish.call_count == n_trials
Example #12
0
def test_multi_objective_get_observation_pairs(
    int_value: int,
    categorical_value: optuna.distributions.CategoricalChoiceType,
    objective_value: float,
    multivariate: bool,
    constant_liar: bool,
) -> None:
    def objective(trial: optuna.trial.Trial) -> Tuple[float, float]:
        trial.suggest_int("x", int_value, int_value)
        trial.suggest_categorical("y", [categorical_value])
        return objective_value, objective_value

    sampler = TPESampler(seed=0, multivariate=multivariate, constant_liar=constant_liar)
    study = optuna.create_study(directions=["minimize", "maximize"], sampler=sampler)
    study.optimize(objective, n_trials=2)
    study.add_trial(
        optuna.create_trial(
            state=optuna.trial.TrialState.RUNNING,
            params={"x": int_value, "y": categorical_value},
            distributions={
                "x": optuna.distributions.IntDistribution(int_value, int_value),
                "y": optuna.distributions.CategoricalDistribution([categorical_value]),
            },
        )
    )

    assert _tpe.sampler._get_observation_pairs(study, ["x"], multivariate, constant_liar) == (
        {"x": [int_value, int_value]},
        [(-float("inf"), [objective_value, -objective_value]) for _ in range(2)],
        None,
    )
    assert _tpe.sampler._get_observation_pairs(study, ["y"], multivariate, constant_liar) == (
        {"y": [0, 0]},
        [(-float("inf"), [objective_value, -objective_value]) for _ in range(2)],
        None,
    )
    assert _tpe.sampler._get_observation_pairs(study, ["x", "y"], True, constant_liar) == (
        {"x": [int_value, int_value], "y": [0, 0]},
        [(-float("inf"), [objective_value, -objective_value]) for _ in range(2)],
        None,
    )
    assert _tpe.sampler._get_observation_pairs(study, ["z"], multivariate, constant_liar) == (
        (
            {"z": [None, None]},
            [(-float("inf"), [objective_value, -objective_value]) for _ in range(2)],
            None,
        )
        if not multivariate
        else ({"z": []}, [], None)
    )
Example #13
0
def test_is_log_scale():
    # type: () -> None

    study = create_study()
    study.add_trial(
        create_trial(
            value=0.0,
            params={"param_linear": 1.0},
            distributions={"param_linear": UniformDistribution(0.0, 3.0)},
        ))
    study.add_trial(
        create_trial(
            value=2.0,
            params={
                "param_linear": 2.0,
                "param_log": 1e-3
            },
            distributions={
                "param_linear": UniformDistribution(0.0, 3.0),
                "param_log": LogUniformDistribution(1e-5, 1.0),
            },
        ))
    assert _is_log_scale(study.trials, "param_log")
    assert not _is_log_scale(study.trials, "param_linear")
Example #14
0
def restart_from_checkpoint(study, trial):
    # Enqueue trial with the same parameters as the stale trial to use saved information.

    path = f"pytorch_checkpoint/{trial.number}/model.pt"
    user_attrs = copy.deepcopy(trial.user_attrs)
    if os.path.exists(path):
        user_attrs["checkpoint_path"] = path

    study.add_trial(
        optuna.create_trial(
            state=optuna.trial.TrialState.WAITING,
            params=trial.params,
            distributions=trial.distributions,
            user_attrs=user_attrs,
            system_attrs=trial.system_attrs,
        ))
Example #15
0
    def __call__(self, study: "optuna.study.Study", trial: FrozenTrial) -> None:
        system_attrs = {"failed_trial": trial.number, "retry_history": [], **trial.system_attrs}
        system_attrs["retry_history"].append(trial.number)  # type: ignore
        if self._max_retry is not None:
            if self._max_retry < len(system_attrs["retry_history"]):  # type: ignore
                return

        study.add_trial(
            optuna.create_trial(
                state=optuna.trial.TrialState.WAITING,
                params=trial.params,
                distributions=trial.distributions,
                user_attrs=trial.user_attrs,
                system_attrs=system_attrs,
                intermediate_values=(
                    trial.intermediate_values if self._inherit_intermediate_values else None
                ),
            )
        )
Example #16
0
def test_color_map(direction: str) -> None:
    study = create_study(directions=[direction, direction])
    for i in range(3):
        study.add_trial(
            create_trial(
                values=[float(i), float(i)],
                params={"param_a": 1.0, "param_b": 2.0},
                distributions={
                    "param_a": FloatDistribution(0.0, 3.0),
                    "param_b": FloatDistribution(0.0, 3.0),
                },
            )
        )

    # Since `plot_pareto_front`'s colormap depends on only trial.number,
    # `reversecale` is not in the plot.
    marker = plot_pareto_front(study).data[0]["marker"]
    assert COLOR_SCALE == [v[1] for v in marker["colorscale"]]
    assert "reversecale" not in marker
Example #17
0
def test_add_trials(storage_mode: str) -> None:

    with StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage)
        assert len(study.trials) == 0

        study.add_trials([])
        assert len(study.trials) == 0

        trials = [create_trial(value=i) for i in range(3)]
        study.add_trials(trials)
        assert len(study.trials) == 3
        for i, trial in enumerate(study.trials):
            assert trial.number == i
            assert trial.value == i

        other_study = create_study(storage=storage)
        other_study.add_trials(study.trials)
        assert len(other_study.trials) == 3
        for i, trial in enumerate(other_study.trials):
            assert trial.number == i
            assert trial.value == i
Example #18
0
def test_plot_parallel_coordinate():
    # type: () -> None

    # Test with no trial.
    study = create_study()
    figure = plot_parallel_coordinate(study)
    assert len(figure.data) == 0

    study = prepare_study_with_trials(with_c_d=False)

    # Test with a trial.
    figure = plot_parallel_coordinate(study)
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, 2.0, 1.0)
    assert figure.data[0]["dimensions"][1]["label"] == "param_a"
    assert figure.data[0]["dimensions"][1]["range"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][1]["values"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][2]["label"] == "param_b"
    assert figure.data[0]["dimensions"][2]["range"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][2]["values"] == (2.0, 0.0, 1.0)

    # Test with a trial to select parameter.
    figure = plot_parallel_coordinate(study, params=["param_a"])
    assert len(figure.data[0]["dimensions"]) == 2
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, 2.0, 1.0)
    assert figure.data[0]["dimensions"][1]["label"] == "param_a"
    assert figure.data[0]["dimensions"][1]["range"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][1]["values"] == (1.0, 2.5)

    # Test with wrong params that do not exist in trials
    with pytest.raises(ValueError, match="Parameter optuna does not exist in your study."):
        plot_parallel_coordinate(study, params=["optuna", "optuna"])

    # Ignore failed trials.
    def fail_objective(_):
        # type: (Trial) -> float

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
    figure = plot_parallel_coordinate(study)
    assert len(figure.data) == 0

    # Test with categorical params that cannot be converted to numeral.
    study_categorical_params = create_study()
    study_categorical_params.add_trial(
        create_trial(
            value=0.0,
            params={"category_a": "preferred", "category_b": "net",},
            distributions={
                "category_a": CategoricalDistribution(("preferred", "opt")),
                "category_b": CategoricalDistribution(("net", "una")),
            },
        )
    )
    study_categorical_params.add_trial(
        create_trial(
            value=2.0,
            params={"category_a": "opt", "category_b": "una",},
            distributions={
                "category_a": CategoricalDistribution(("preferred", "opt")),
                "category_b": CategoricalDistribution(("net", "una")),
            },
        )
    )
    figure = plot_parallel_coordinate(study_categorical_params)
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][1]["label"] == "category_a"
    assert figure.data[0]["dimensions"][1]["range"] == (0, 1)
    assert figure.data[0]["dimensions"][1]["values"] == (0, 1)
    assert figure.data[0]["dimensions"][1]["ticktext"] == (["preferred", 0], ["opt", 1])
    assert figure.data[0]["dimensions"][2]["label"] == "category_b"
    assert figure.data[0]["dimensions"][2]["range"] == (0, 1)
    assert figure.data[0]["dimensions"][2]["values"] == (0, 1)
    assert figure.data[0]["dimensions"][2]["ticktext"] == (["net", 0], ["una", 1])
Example #19
0
# Load existed screenshots
distributions = {
    "Body Type": CategoricalDistribution(choices=("Male", "Female")),
    "Head": CategoricalDistribution(choices=list(range(20))),
    "Brow Height": IntUniformDistribution(0, 12),
    "Brow Depth": IntUniformDistribution(0, 12),
    "Eyeline": IntUniformDistribution(0, 12),
    "Eye Spacing": IntUniformDistribution(0, 12),
    "Nose Width": IntUniformDistribution(0, 12),
    "Nose Height": IntUniformDistribution(0, 12),
    "Nose Bridge": IntUniformDistribution(0, 12),
    "Mouth Height": IntUniformDistribution(0, 12),
    "Cheeks": IntUniformDistribution(0, 12),
    "Jawline": IntUniformDistribution(0, 12),
}

for filename, screenshot in tqdm(screenshot_folder.items(), ascii=True):
    params = filename_to_params(filename)
    encoding = get_face_encoding(screenshot)
    distance = get_face_distance(encoding, target_encoding)
    trial = optuna.create_trial(params=params,
                                distributions=distributions,
                                value=distance)
    study.add_trial(trial)

study.optimize(objective, n_trials=10)

print()
print("Best settings:")
print_params(study.best_params)