Esempio n. 1
0
        optuna.multi_objective.samplers.NSGAIIMultiObjectiveSampler,
    ],
)


@parametrize_sampler
@pytest.mark.parametrize(
    "distribution",
    [
        UniformDistribution(-1.0, 1.0),
        UniformDistribution(0.0, 1.0),
        UniformDistribution(-1.0, 0.0),
        LogUniformDistribution(1e-7, 1.0),
        DiscreteUniformDistribution(-10, 10, 0.1),
        DiscreteUniformDistribution(-10.2, 10.2, 0.1),
        IntUniformDistribution(-10, 10),
        IntUniformDistribution(0, 10),
        IntUniformDistribution(-10, 0),
        IntUniformDistribution(-10, 10, 2),
        IntUniformDistribution(0, 10, 2),
        IntUniformDistribution(-10, 0, 2),
        CategoricalDistribution((1, 2, 3)),
        CategoricalDistribution(("a", "b", "c")),
        CategoricalDistribution((1, "a")),
    ],
)
def test_sample_independent(sampler_class: Callable[[],
                                                    BaseMultiObjectiveSampler],
                            distribution: UniformDistribution) -> None:
    study = optuna.multi_objective.study.create_study(["minimize", "maximize"],
                                                      sampler=sampler_class())
Esempio n. 2
0
    def suggest_int(self,
                    name: str,
                    low: int,
                    high: int,
                    step: int = 1,
                    log: bool = False) -> int:
        """Suggest a value for the integer parameter.

        The value is sampled from the integers in :math:`[\\mathsf{low}, \\mathsf{high}]`.

        Example:

            Suggest the number of trees in `RandomForestClassifier <https://scikit-learn.org/
            stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html>`_.

            .. testcode::

                import numpy as np
                from sklearn.datasets import load_iris
                from sklearn.ensemble import RandomForestClassifier
                from sklearn.model_selection import train_test_split

                import optuna

                X, y = load_iris(return_X_y=True)
                X_train, X_valid, y_train, y_valid = train_test_split(X, y)

                def objective(trial):
                    n_estimators = trial.suggest_int('n_estimators', 50, 400)
                    clf = RandomForestClassifier(n_estimators=n_estimators, random_state=0)
                    clf.fit(X_train, y_train)
                    return clf.score(X_valid, y_valid)

                study = optuna.create_study(direction='maximize')
                study.optimize(objective, n_trials=3)

        Args:
            name:
                A parameter name.
            low:
                Lower endpoint of the range of suggested values. ``low`` is included in the range.
            high:
                Upper endpoint of the range of suggested values. ``high`` is included in the range.
            step:
                A step of discretization.

                .. note::
                    Note that :math:`\\mathsf{high}` is modified if the range is not divisible by
                    :math:`\\mathsf{step}`. Please check the warning messages to find the changed
                    values.

                .. note::
                    The method returns one of the values in the sequence
                    :math:`\\mathsf{low}, \\mathsf{low} + \\mathsf{step}, \\mathsf{low} + 2 *
                    \\mathsf{step}, \\dots, \\mathsf{low} + k * \\mathsf{step} \\le
                    \\mathsf{high}`, where :math:`k` denotes an integer.

                .. note::
                    The ``step != 1`` and ``log`` arguments cannot be used at the same time.
                    To set the ``step`` argument :math:`\\mathsf{step} \\ge 2`, set the
                    ``log`` argument to ``False``.
            log:
                A flag to sample the value from the log domain or not.

                .. note::
                    If ``log`` is true, at first, the range of suggested values is divided into
                    grid points of width 1. The range of suggested values is then converted to
                    a log domain, from which a value is sampled. The uniformly sampled
                    value is re-converted to the original domain and rounded to the nearest grid
                    point that we just split, and the suggested value is determined.
                    For example, if `low = 2` and `high = 8`, then the range of suggested values is
                    `[2, 3, 4, 5, 6, 7, 8]` and lower values tend to be more sampled than higher
                    values.

                .. note::
                    The ``step != 1`` and ``log`` arguments cannot be used at the same time.
                    To set the ``log`` argument to ``True``, set the ``step`` argument to 1.

        Raises:
            :exc:`ValueError`:
                If ``step != 1`` and ``log = True`` are specified.
        """

        if step != 1:
            if log:
                raise ValueError(
                    "The parameter `step != 1` is not supported when `log` is True."
                    "The specified `step` is {}.".format(step))
            else:
                distribution = IntUniformDistribution(
                    low=low, high=high, step=step
                )  # type: Union[IntUniformDistribution, IntLogUniformDistribution]
        else:
            if log:
                distribution = IntLogUniformDistribution(low=low, high=high)
            else:
                distribution = IntUniformDistribution(low=low,
                                                      high=high,
                                                      step=step)

        self._check_distribution(name, distribution)

        return int(self._suggest(name, distribution))
Esempio n. 3
0
def test_intersection_search_space() -> None:
    search_space = optuna.samplers.IntersectionSearchSpace()
    study = optuna.create_study()

    # No trial.
    assert search_space.calculate(study) == {}
    assert search_space.calculate(
        study) == optuna.samplers.intersection_search_space(study)

    # First trial.
    study.optimize(
        lambda t: t.suggest_uniform("y", -3, 3) + t.suggest_int("x", 0, 10),
        n_trials=1)
    assert search_space.calculate(study) == {
        "x": IntUniformDistribution(low=0, high=10),
        "y": UniformDistribution(low=-3, high=3),
    }
    assert search_space.calculate(
        study) == optuna.samplers.intersection_search_space(study)

    # Returning sorted `OrderedDict` instead of `dict`.
    assert search_space.calculate(study, ordered_dict=True) == OrderedDict([
        ("x", IntUniformDistribution(low=0, high=10)),
        ("y", UniformDistribution(low=-3, high=3)),
    ])
    assert search_space.calculate(
        study, ordered_dict=True) == optuna.samplers.intersection_search_space(
            study, ordered_dict=True)

    # Second trial (only 'y' parameter is suggested in this trial).
    study.optimize(lambda t: t.suggest_uniform("y", -3, 3), n_trials=1)
    assert search_space.calculate(study) == {
        "y": UniformDistribution(low=-3, high=3)
    }
    assert search_space.calculate(
        study) == optuna.samplers.intersection_search_space(study)

    # Failed or pruned trials are not considered in the calculation of
    # an intersection search space.
    def objective(trial: Trial, exception: Exception) -> float:

        trial.suggest_uniform("z", 0, 1)
        raise exception

    study.optimize(lambda t: objective(t, RuntimeError()),
                   n_trials=1,
                   catch=(RuntimeError, ))
    study.optimize(lambda t: objective(t, optuna.TrialPruned()), n_trials=1)
    assert search_space.calculate(study) == {
        "y": UniformDistribution(low=-3, high=3)
    }
    assert search_space.calculate(
        study) == optuna.samplers.intersection_search_space(study)

    # If two parameters have the same name but different distributions,
    # those are regarded as different parameters.
    study.optimize(lambda t: t.suggest_uniform("y", -1, 1), n_trials=1)
    assert search_space.calculate(study) == {}
    assert search_space.calculate(
        study) == optuna.samplers.intersection_search_space(study)

    # The search space remains empty once it is empty.
    study.optimize(
        lambda t: t.suggest_uniform("y", -3, 3) + t.suggest_int("x", 0, 10),
        n_trials=1)
    assert search_space.calculate(study) == {}
    assert search_space.calculate(
        study) == optuna.samplers.intersection_search_space(study)
Esempio n. 4
0
 def to_optuna(self):
     """returns an equivalent optuna space"""
     if self.prior != 'log':
         return IntUniformDistribution(low=self.low, high=self.high)
     else:
         return IntLogUniformDistribution(low=self.low, high=self.high)
Esempio n. 5
0
@mark.parametrize(
    "input, expected",
    [
        (
            {
                "type": "categorical",
                "choices": [1, 2, 3]
            },
            CategoricalDistribution([1, 2, 3]),
        ),
        ({
            "type": "int",
            "low": 0,
            "high": 10
        }, IntUniformDistribution(0, 10)),
        (
            {
                "type": "int",
                "low": 0,
                "high": 10,
                "step": 2
            },
            IntUniformDistribution(0, 10, step=2),
        ),
        ({
            "type": "int",
            "low": 0,
            "high": 5
        }, IntUniformDistribution(0, 5)),
        (
Esempio n. 6
0
import pytest

from optuna._transform import _SearchSpaceTransform
from optuna.distributions import BaseDistribution
from optuna.distributions import CategoricalDistribution
from optuna.distributions import DiscreteUniformDistribution
from optuna.distributions import IntLogUniformDistribution
from optuna.distributions import IntUniformDistribution
from optuna.distributions import LogUniformDistribution
from optuna.distributions import UniformDistribution


@pytest.mark.parametrize(
    "param,distribution",
    [
        (0, IntUniformDistribution(0, 3)),
        (1, IntLogUniformDistribution(1, 10)),
        (2, IntUniformDistribution(0, 10, step=2)),
        (0.0, UniformDistribution(0, 3)),
        (1.0, LogUniformDistribution(1, 10)),
        (0.2, DiscreteUniformDistribution(0, 1, q=0.2)),
        ("foo", CategoricalDistribution(["foo"])),
        ("bar", CategoricalDistribution(["foo", "bar", "baz"])),
    ],
)
def test_search_space_transform_shapes_dtypes(param: Any, distribution: BaseDistribution) -> None:
    trans = _SearchSpaceTransform({"x0": distribution})
    trans_params = trans.transform({"x0": param})

    if isinstance(distribution, CategoricalDistribution):
        expected_bounds_shape = (len(distribution.choices), 2)
Esempio n. 7
0
def test_group_decomposed_search_space() -> None:
    search_space = _GroupDecomposedSearchSpace()
    study = create_study()

    # No trial.
    assert search_space.calculate(study).search_spaces == []

    # A single parameter.
    study.optimize(lambda t: t.suggest_int("x", 0, 10), n_trials=1)
    assert search_space.calculate(study).search_spaces == [{
        "x":
        IntUniformDistribution(low=0, high=10)
    }]

    # Disjoint parameters.
    study.optimize(
        lambda t: t.suggest_int("y", 0, 10) + t.suggest_float("z", -3, 3),
        n_trials=1)
    assert search_space.calculate(study).search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10),
            "z": UniformDistribution(low=-3, high=3),
        },
    ]

    # Parameters which include one of search spaces in the group.
    study.optimize(
        lambda t: t.suggest_int("y", 0, 10) + t.suggest_float("z", -3, 3) + t.
        suggest_float("u", 1e-2, 1e2, log=True) + bool(
            t.suggest_categorical("v", ["A", "B", "C"])),
        n_trials=1,
    )
    assert search_space.calculate(study).search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "z": UniformDistribution(low=-3, high=3),
            "y": IntUniformDistribution(low=0, high=10),
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2),
            "v": CategoricalDistribution(choices=["A", "B", "C"]),
        },
    ]

    # A parameter which is included by one of search spaces in thew group.
    study.optimize(lambda t: t.suggest_float("u", 1e-2, 1e2, log=True),
                   n_trials=1)
    assert search_space.calculate(study).search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10),
            "z": UniformDistribution(low=-3, high=3),
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
    ]

    # Parameters whose intersection with one of search spaces in the group is not empty.
    study.optimize(lambda t: t.suggest_int("y", 0, 10) + t.suggest_int(
        "w", 2, 8, log=True),
                   n_trials=1)
    assert search_space.calculate(study).search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10)
        },
        {
            "z": UniformDistribution(low=-3, high=3)
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
        {
            "w": IntLogUniformDistribution(low=2, high=8)
        },
    ]

    search_space = _GroupDecomposedSearchSpace()
    study = create_study()

    # Failed or pruned trials are not considered in the calculation of
    # an intersection search space.
    def objective(trial: Trial, exception: Exception) -> float:

        trial.suggest_float("a", 0, 1)
        raise exception

    study.optimize(lambda t: objective(t, RuntimeError()),
                   n_trials=1,
                   catch=(RuntimeError, ))
    study.optimize(lambda t: objective(t, TrialPruned()), n_trials=1)
    assert search_space.calculate(study).search_spaces == []

    # If two parameters have the same name but different distributions,
    # the first one takes priority.
    study.optimize(lambda t: t.suggest_float("a", -1, 1), n_trials=1)
    study.optimize(lambda t: t.suggest_float("a", 0, 1), n_trials=1)
    assert search_space.calculate(study).search_spaces == [{
        "a":
        UniformDistribution(low=-1, high=1)
    }]
Esempio n. 8
0
def test_search_space_group() -> None:
    search_space_group = _SearchSpaceGroup()

    # No search space.
    assert search_space_group.search_spaces == []

    # No distributions.
    search_space_group.add_distributions({})
    assert search_space_group.search_spaces == []

    # Add a single distribution.
    search_space_group.add_distributions(
        {"x": IntUniformDistribution(low=0, high=10)})
    assert search_space_group.search_spaces == [{
        "x":
        IntUniformDistribution(low=0, high=10)
    }]

    # Add a same single distribution.
    search_space_group.add_distributions(
        {"x": IntUniformDistribution(low=0, high=10)})
    assert search_space_group.search_spaces == [{
        "x":
        IntUniformDistribution(low=0, high=10)
    }]

    # Add disjoint distributions.
    search_space_group.add_distributions({
        "y":
        IntUniformDistribution(low=0, high=10),
        "z":
        UniformDistribution(low=-3, high=3),
    })
    assert search_space_group.search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10),
            "z": UniformDistribution(low=-3, high=3),
        },
    ]

    # Add distributions, which include one of search spaces in the group.
    search_space_group.add_distributions({
        "y":
        IntUniformDistribution(low=0, high=10),
        "z":
        UniformDistribution(low=-3, high=3),
        "u":
        LogUniformDistribution(low=1e-2, high=1e2),
        "v":
        CategoricalDistribution(choices=["A", "B", "C"]),
    })
    assert search_space_group.search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10),
            "z": UniformDistribution(low=-3, high=3),
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2),
            "v": CategoricalDistribution(choices=["A", "B", "C"]),
        },
    ]

    # Add a distribution, which is included by one of search spaces in the group.
    search_space_group.add_distributions(
        {"u": LogUniformDistribution(low=1e-2, high=1e2)})
    assert search_space_group.search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10),
            "z": UniformDistribution(low=-3, high=3),
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
    ]

    # Add distributions whose intersection with one of search spaces in the group is not empty.
    search_space_group.add_distributions({
        "y":
        IntUniformDistribution(low=0, high=10),
        "w":
        IntLogUniformDistribution(low=2, high=8),
    })
    assert search_space_group.search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10)
        },
        {
            "z": UniformDistribution(low=-3, high=3)
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
        {
            "w": IntLogUniformDistribution(low=2, high=8)
        },
    ]

    # Add distributions which include some of search spaces in the group.
    search_space_group.add_distributions({
        "y":
        IntUniformDistribution(low=0, high=10),
        "w":
        IntLogUniformDistribution(low=2, high=8),
        "t":
        UniformDistribution(low=10, high=100),
    })
    assert search_space_group.search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10)
        },
        {
            "z": UniformDistribution(low=-3, high=3)
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
        {
            "w": IntLogUniformDistribution(low=2, high=8)
        },
        {
            "t": UniformDistribution(low=10, high=100)
        },
    ]
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawTextHelpFormatter,
        prog='%s %s' % (__script_name__, __version__),
        description='Optimize parameter values of a game agent using optuna framework.',
        epilog='%(prog)s')
    parser.add_argument('--engine', required=True,
                        help='Engine filename or engine path and filename.')
    parser.add_argument('--resign-movecount', required=False,
                        help='Number of move counts before the game is adjudicated as a loss.\n'
                             'This should be used together with --resign-score option. Example:\n'
                             '--resign-movecount 10 --resign-score 700\n'
                             'Will terminate the game when there are 10 successive -700 or worse score.')
    parser.add_argument('--resign-score', required=False,
                        help='Score is centipawn where the game is considered resignable.\n'
                             'This should be used together with --resign-movecount option.')
    parser.add_argument('--trials', required=False, type=int,
                        help='Trials to try, default=1000.',
                        default=1000)
    parser.add_argument('--concurrency', required=False, type=int,
                        help='Number of game matches to run concurrently, default=1.',
                        default=1)
    parser.add_argument('--games-per-trial', required=False, type=int,
                        help='Number of games per trial, default=32.\n'
                        'This should be even number.', default=32)
    parser.add_argument('--study-name', required=False, type=str,
                        default='default_study_name',
                        help='The name of study. This can be used to resume\n'
                             'study sessions, default=default_study_name.')
    parser.add_argument('--base-time-sec', required=False, type=int,
                        help='Base time in sec for time control, default=5.',
                        default=5)
    parser.add_argument('--inc-time-sec', required=False, type=float,
                        help='Increment time in sec for time control, default=0.05.',
                        default=0.05)
    parser.add_argument('--depth', required=False, type=int,
                        help='The maximum search depth that the engine is'
                             ' allowed, default=1000.\n'
                             'Example:\n'
                             'python tuner.py --depth 6 ...\n'
                             'If depth is high say 24 and you want this depth\n'
                             'to be always respected increase the base time'
                             ' control.\n'
                             'tuner.py --depth 24 --base-time-sec 300 ...',
                        default=1000)
    parser.add_argument('--nodes', required=False, type=int,
                        help='The maximum search nodes that the engine is'
                             ' allowed.\n'
                             'Example:\n'
                             'python tuner.py --nodes 1000 ...\n'
                             'Time and depth control will not be followed.')
    parser.add_argument('--opening-file', required=True, type=str,
                        help='Start opening filename in pgn, fen or epd format.\n'
                             'If match manager is cutechess, you can use pgn, fen\n'
                             'or epd format. The format is hard-coded currently.\n'
                             'You have to modify the code.')
    parser.add_argument('--opening-format', required=False, type=str,
                        help='Can be pgn, or epd for cutechess match manager,'
                             'default is pgn, for duel.py no need as it will use epd or fen.',
                        default='pgn')
    parser.add_argument('--variant', required=False, type=str,
                        help='Game variant, default=normal.', default='normal')
    parser.add_argument('--pgn-output', required=False, type=str,
                        help='Output pgn filename, default=optuna_games.pgn.',
                        default='optuna_games.pgn')
    parser.add_argument('--plot', action='store_true', help='A flag to output plots in png.')
    parser.add_argument('--initial-best-value', required=False, type=float,
                        help='The initial best value for the initial best\n'
                             'parameter values, default=0.5.', default=0.5)
    parser.add_argument('--save-plots-every-trial', required=False, type=int,
                        help='Save plots every n trials, default=10.',
                        default=10)
    parser.add_argument('--fix-base-param', action='store_true',
                        help='A flag to fix the parameter of base engine.\n'
                             'It will use the init or default parameter values.')
    parser.add_argument('--match-manager', required=False, type=str,
                        help='The application that handles the engine match,'
                             ' default=cutechess.',
                        default='cutechess')
    parser.add_argument('--match-manager-path', required=True,
                        help='Match manager path and/or filename. Example:\n'
                             'cutechess:\n'
                             '--match-manager-path c:/chess/tourney_manager/cutechess/cutechess-cli.exe\n'
                             'duel.py for xboard engines:\n'
                             '--match-manager-path python c:/chess/tourney_manager/duel/duel.py\n'
                             'or\n'
                              '--match-manager-path c:/python3/python c:/chess/tourney_manager/duel/duel.py')
    parser.add_argument('--protocol', required=False, type=str,
                        help='The protocol that the engine supports, can be'
                             ' uci or cecp, default=uci.',
                        default='uci')
    parser.add_argument('--sampler', required=False, nargs='*', action='append',
                        metavar=('name=', 'option_name='),
                        help='The sampler to be used in the study, default name=tpe.\n'
                             'name can be tpe or cmaes or skopt, examples:\n'
                             '--sampler name=tpe ei_samples=50 ...\n'
                             '  default ei_samples=24\n'
                             '--sampler name=tpe multivariate=true ...\n'
                             '  default multivariate is false.\n'
                             '--sampler name=cmaes ...\n'
                             '--sampler name=skopt acquisition_function=LCB ...\n'
                             '  default acquisition_function=gp_hedge\n'
                             '  Can be LCB or EI or PI or gp_hedge\n'
                             '  Example to explore, with LCB and kappa, high kappa would explore, low would exploit:\n'
                             '  --sampler name=skopt acquisition_function=LCB kappa=10000\n'
                             '  Example to exploit, with EI or PI and xi, high xi would explore, low would exploit:\n'
                             '  --sampler name=skopt acquisition_function=EI xi=0.0001\n'
                             '  Note: negative xi does not work with PI, but will work with EI.\n'
                             '  Ref.: https://scikit-optimize.github.io/stable/auto_examples/exploration-vs-exploitation.html#sphx-glr-auto-examples-exploration-vs-exploitation-py\n'
                             '  skopt has base_estimator options namely: GP, RF, ET and GBRT, default is GP.\n'
                             '  GP=Gaussian Process, RF=Random Forest, ET=Extra Tree, GBRT=Gradient Boosted Regressor Tree.\n'
                             '  Example:\n'
                             '  --sampler name=skopt base_estimator=GBRT acquisition_function=EI ...\n')
    parser.add_argument('--threshold-pruner', required=False, nargs='*', action='append',
                        metavar=('result=', 'games='),
                        help='A trial pruner used to prune or stop unpromising'
                             ' trials.\n'
                             'Example:\n'
                             'tuner.py --threshold-pruner result=0.45 games=50 interval=1 ...\n'
                             'Assuming games per trial is 100, after 50 games, check\n'
                             'the score of the match, if this is below 0.45, then\n'
                             'prune the trial or stop the engine match. Get new param\n'
                             'from optimizer and start a new trial.\n'
                             'Default values:\n'
                             'result=0.25, games=games_per_trial/2, interval=1\n'
                             'Example:\n'
                             'tuner.py --threshold-pruner ...',
                        default=None)
    parser.add_argument('--input-param', required=True, type=str,
                        help='The parameters that will be optimized.\n'
                             'Example 1 with 1 parameter:\n'
                             '--input-param \"{\'pawn\': {\'default\': 92,'
                             ' \'min\': 90, \'max\': 120, \'step\': 2}}\"\n'
                             'Example 2 with 2 parameters:\n'
                             '--input-param \"{\'pawn\': {\'default\': 92,'
                             ' \'min\': 90, \'max\': 120, \'step\': 2},'
                             ' \'knight\': {\'default\': 300, \'min\': 250,'
                             ' \'max\': 350, \'step\': 2}}\"\n'
                             'Example 3 with 1 parameter but float value:\n'
                             '--input-param \"{\'CPuct\': {\'default\': 0.5,'
                             ' \'min\': 0.1, \'max\': 3.0, \'step\': 0.05, \'type\': \'float\'}}\"'
                        )
    parser.add_argument('-v', '--version', action='version', version=f'{__version__}')
    parser.add_argument('--common-param', required=False, type=str,
                        help='The parameters that will be sent to both test and base engines.\n'
                             'Make sure that this param is not included in the input-param.\n'
                             'Example:\n'
                             '--common-param \"{\'RookOpenFile\': 92, \'KnightOutpost\': 300}\"')

    args = parser.parse_args()

    trials = args.trials
    init_value = args.initial_best_value
    save_plots_every_trial = args.save_plots_every_trial
    fix_base_param = args.fix_base_param
    common_param = args.common_param

    if common_param is not None:
        common_param = ast.literal_eval(common_param)

    # Number of games should be even for a fair engine match.
    games_per_trial = args.games_per_trial
    games_per_trial += 1 if (args.games_per_trial % 2) != 0 else 0
    rounds = games_per_trial//2

    good_result_cnt = 0

    study_name = args.study_name
    storage_file = f'{study_name}.db'

    logger.info(f'{__script_name__} {__version__}')
    logger.info(f'trials: {trials}, games_per_trial: {rounds * 2}, sampler: {args.sampler}\n')

    # Convert the input param string to a dict of dict and sort by key.
    input_param = ast.literal_eval(args.input_param)
    input_param = OrderedDict(sorted(input_param.items()))

    logger.info(f'input param: {input_param}\n')
    init_param = Objective.set_param(input_param)

    # Adjust save_plots_every_trial if trials is lower than it so
    # that max_cycle is 1 or more and studies can continue. The plot
    # will be generated after the study.
    if trials < save_plots_every_trial:
        save_plots_every_trial = trials

    max_cycle = trials // save_plots_every_trial
    n_trials = save_plots_every_trial
    cycle = 0

    # Define sampler to use, default is TPE.
    sampler = Objective.get_sampler(args.sampler)

    # ThresholdPruner as trial pruner, if result of a match is below result
    # threshold after games threshold then prune the trial. Get new param
    # from optimizer and continue with the next trial.
    # --threshold-pruner result=0.45 games=50 --games-per-trial 100 ...
    pruner, th_pruner = Objective.get_pruner(args.threshold_pruner, games_per_trial)

    logger.info('Starting optimization ...')

    while cycle < max_cycle:
        cycle += 1

        # Define study.
        study = optuna.create_study(study_name=study_name,
                                    direction='maximize',
                                    storage=f'sqlite:///{storage_file}',
                                    load_if_exists=True, sampler=sampler,
                                    pruner=pruner)

        # Get the best value from previous study session.
        best_param, best_value, is_study = {}, 0.0, False
        try:
            best_value = study.best_value
            is_study = True
        except ValueError:
            logger.warning('Warning, best value from previous trial is not found!')
        except:
            logger.exception('Unexpected error:', sys.exc_info()[0])
            raise
        logger.info(f'study best value: {best_value}')

        # Get the best param values from previous study session.
        try:
            best_param = copy.deepcopy(study.best_params)
        except ValueError:
            logger.warning('Warning, best param from previous trial is not found!.')
        except:
            logger.exception('Unexpected error:', sys.exc_info()[0])
            raise
        logger.info(f'study best param: {best_param}')

        old_trial_num = len(study.trials)

        # Get the good result count before we resume the study.
        if is_panda_ok and not fix_base_param and is_study:
            df = study.trials_dataframe(attrs=('value', 'state'))
            for index, row in df.iterrows():
                if row['value'] > init_value and row['state'] == 'COMPLETE':
                    good_result_cnt += 1

        # If there is no trial recorded yet we will initialize our study
        # with default values from the engine.
        if not is_study:
            distri = {}
            init_trial_value = init_value

            for k, v in input_param.items():
                if 'type' in v and v['type'] == 'float':
                    distri.update({k: DiscreteUniformDistribution(v['min'], v['max'], v['step'])})
                else:
                    distri.update({k: IntUniformDistribution(v['min'], v['max'], v['step'])})

            init_trial = optuna.trial.create_trial(
                params=copy.deepcopy(init_param),
                distributions=copy.deepcopy(distri),
                value=init_trial_value,
            )
            study.add_trial(init_trial)

            best_param = study.best_params
            best_value = study.best_value

        # Begin param optimization.
        # https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize
        study.optimize(Objective(args.engine, input_param, best_param,
                                 best_value, init_param, init_value,
                                 args.variant, args.opening_file,
                                 args.opening_format, old_trial_num,
                                 args.pgn_output, args.nodes,
                                 args.base_time_sec, args.inc_time_sec,
                                 rounds, args.concurrency,
                                 args.protocol, fix_base_param,
                                 args.match_manager, args.match_manager_path,
                                 good_result_cnt,
                                 args.depth, games_per_trial, th_pruner,
                                 common_param, args.resign_movecount,
                                 args.resign_score),
                       n_trials=n_trials)

        # Create and save plots after this study session is completed.
        save_plots(study, study_name, input_param, args.plot)

        # Build pandas dataframe, print and save to csv file.
        if is_panda_ok:
            df = study.trials_dataframe(attrs=('number', 'value', 'params',
                                               'state'))
            logger.info(f'{df.to_string(index=False)}\n')
            df.to_csv(f'{study_name}.csv', index=False)

        # Show the best param, value and trial number.
        logger.info(f'study best param: {study.best_params}')
        logger.info(f'study best value: {study.best_value}')
        logger.info(f'study best trial number: {study.best_trial.number}\n')

        # Output for match manager.
        option_output = ''
        for k, v in study.best_params.items():
            option_output += f'option.\'{k}\'={v} '
        logger.info(f'{option_output}\n')
Esempio n. 10
0
            screenshot = screenshot_folder[filename]

            encoding = get_face_encoding(screenshot)
            distance = get_face_distance(encoding, target_encoding)
            return distance
        except Exception:
            traceback.print_exc()


study = optuna.create_study()

# Load existed screenshots
distributions = {
    "Body Type": CategoricalDistribution(choices=("Male", "Female")),
    "Head": CategoricalDistribution(choices=list(range(20))),
    "Brow Height": IntUniformDistribution(0, 12),
    "Brow Depth": IntUniformDistribution(0, 12),
    "Eyeline": IntUniformDistribution(0, 12),
    "Eye Spacing": IntUniformDistribution(0, 12),
    "Nose Width": IntUniformDistribution(0, 12),
    "Nose Height": IntUniformDistribution(0, 12),
    "Nose Bridge": IntUniformDistribution(0, 12),
    "Mouth Height": IntUniformDistribution(0, 12),
    "Cheeks": IntUniformDistribution(0, 12),
    "Jawline": IntUniformDistribution(0, 12),
}

for filename, screenshot in tqdm(screenshot_folder.items(), ascii=True):
    params = filename_to_params(filename)
    encoding = get_face_encoding(screenshot)
    distance = get_face_distance(encoding, target_encoding)