Beispiel #1
0
def test_fixed_trial_suggest_int() -> None:

    trial = FixedTrial({"x": 1})
    assert trial.suggest_int("x", 0, 10) == 1

    with pytest.raises(ValueError):
        trial.suggest_int("y", 0, 10)
Beispiel #2
0
def test_suggest_loguniform() -> None:

    trial = FixedTrial({"x": 0.99})
    assert trial.suggest_loguniform("x", 0.1, 1.0) == 0.99

    with pytest.raises(ValueError):
        trial.suggest_loguniform("y", 0.0, 1.0)
Beispiel #3
0
def test_fixed_trial_suggest_discrete_uniform() -> None:

    trial = FixedTrial({"x": 0.9})
    assert trial.suggest_discrete_uniform("x", 0.0, 1.0, 0.1) == 0.9

    with pytest.raises(ValueError):
        trial.suggest_discrete_uniform("y", 0.0, 1.0, 0.1)
Beispiel #4
0
def test_fixed_trial_suggest_uniform() -> None:

    trial = FixedTrial({"x": 1.0})
    assert trial.suggest_uniform("x", -100.0, 100.0) == 1.0

    with pytest.raises(ValueError):
        trial.suggest_uniform("y", -100.0, 100.0)
Beispiel #5
0
def test_fixed_trial_report():
    # type: () -> None

    # FixedTrial ignores reported values.
    trial = FixedTrial({})
    trial.report(1.0, 1)
    trial.report(2.0, 2)
Beispiel #6
0
def test_fixed_trial_suggest_int_log():
    # type: () -> None

    trial = FixedTrial({"x": 1})
    assert trial.suggest_int("x", 1, 10, log=True) == 1

    with pytest.raises(ValueError):
        trial.suggest_int("y", 1, 10, log=True)
Beispiel #7
0
def test_fixed_trial_suggest_uniform():
    # type: () -> None

    trial = FixedTrial({'x': 1.})
    assert trial.suggest_uniform('x', -100., 100.) == 1.

    with pytest.raises(ValueError):
        trial.suggest_uniform('y', -100., 100.)
Beispiel #8
0
def test_fixed_trial_params() -> None:

    params = {"x": 1}
    trial = FixedTrial(params)
    assert trial.params == {}

    assert trial.suggest_uniform("x", 0, 10) == 1
    assert trial.params == params
Beispiel #9
0
def test_fixed_trial_suggest_int():
    # type: () -> None

    trial = FixedTrial({'x': 1})
    assert trial.suggest_int('x', 0, 10) == 1

    with pytest.raises(ValueError):
        trial.suggest_int('y', 0, 10)
Beispiel #10
0
def test_fixed_trial_suggest_discrete_uniform():
    # type: () -> None

    trial = FixedTrial({'x': 0.9})
    assert trial.suggest_discrete_uniform('x', 0., 1., 0.1) == 0.9

    with pytest.raises(ValueError):
        trial.suggest_discrete_uniform('y', 0., 1., 0.1)
Beispiel #11
0
def test_fixed_trial_number() -> None:

    params = {"x": 1}
    trial = FixedTrial(params, 2)
    assert trial.number == 2

    trial = FixedTrial(params)
    assert trial.number == 0
Beispiel #12
0
def test_fixed_trial_suggest_categorical():
    # type: () -> None

    trial = FixedTrial({'x': 1})
    assert trial.suggest_categorical('x', [0, 1, 2, 3]) == 1

    with pytest.raises(ValueError):
        trial.suggest_categorical('y', [0, 1, 2, 3])
Beispiel #13
0
def test_fixed_trial_params():
    # type: () -> None

    params = {'x': 1}
    trial = FixedTrial(params)
    assert trial.params == {}

    assert trial.suggest_uniform('x', 0, 10) == 1
    assert trial.params == params
Beispiel #14
0
def _create_trial(
    trial_type: type,
    params: Optional[Dict[str, Any]] = None,
    distributions: Optional[Dict[str, BaseDistribution]] = None,
) -> BaseTrial:
    if params is None:
        params = {"x": 10}
    assert params is not None
    if distributions is None:
        distributions = {"x": FloatDistribution(5, 12)}
    assert distributions is not None

    if trial_type == FixedTrial:
        return FixedTrial(params)
    elif trial_type == FrozenTrial:
        trial = create_trial(value=0.2,
                             params=params,
                             distributions=distributions)
        trial.number = 0
        return trial
    elif trial_type == Trial:
        study = create_study()
        study.enqueue_trial(params)
        return study.ask()
    else:
        assert False
def model_fn(model_dir):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    params = torch.load(os.path.join(model_dir, 'params.pth'))
    model = define_model(FixedTrial(params)).to(device)
    with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
        model.load_state_dict(torch.load(f))
    return model.to(device)
def model_fn(model_dir):
    """
    This function is called by the Chainer container during hosting when running on SageMaker with
    values populated by the hosting environment.
    
    This function loads models written during training into `model_dir`.

    Args:
        model_dir (str): path to the directory containing the saved model artifacts

    Returns:
        a loaded Chainer model
    
    For more on `model_fn`, please visit the sagemaker-python-sdk repository:
    https://github.com/aws/sagemaker-python-sdk
    
    For more on the Chainer container, please visit the sagemaker-chainer-containers repository:
    https://github.com/aws/sagemaker-chainer-containers
    """

    from optuna.trial import FixedTrial

    chainer.config.train = False
    params = np.load(os.path.join(model_dir, 'params.npz'))['arr_0'].item()
    model = L.Classifier(create_model(FixedTrial(params)))
    serializers.load_npz(os.path.join(model_dir, 'model.npz'), model)
    return model.predictor
Beispiel #17
0
def test_fixed_trial_suggest_categorical():
    # type: () -> None

    # Integer categories.
    trial = FixedTrial({'x': 1})
    assert trial.suggest_categorical('x', [0, 1, 2, 3]) == 1

    with pytest.raises(ValueError):
        trial.suggest_categorical('y', [0, 1, 2, 3])

    # String categories.
    trial = FixedTrial({'x': 'baz'})
    assert trial.suggest_categorical('x', ['foo', 'bar', 'baz']) == 'baz'

    with pytest.raises(ValueError):
        trial.suggest_categorical('y', ['foo', 'bar', 'baz'])
Beispiel #18
0
def example():
    """
    In this example we search for best hyperparameters for a Support Vector
    Machine Classifier on the breast cancer dataset.

    We split the data for training and testing (we do not need a separate
    validation set, because POCStudy internally does a cross validation).

    We run optimization for 60 seconds. You may see some ConvergenceWarning
    from SVC. Don't worry - those trials are usually pruned anyway. Number of
    explored points depends on how fast is your machine.

    We use the best params to train the model on the whole train set and score
    it on the test set.

    Finally we plot the optimization history. For first 10 trials, optuna will
    perform random search to warm up TPE, then it will focus on exploring most
    promising areas. Observe that after 15th iteration, multiple trials are
    pruned.
    """

    # Remove next 3 lines if you want to see some SVC warnings.
    import warnings
    from sklearn.exceptions import ConvergenceWarning
    warnings.filterwarnings(action='ignore', category=ConvergenceWarning)

    # Load and optionally split data.
    x, y = load_breast_cancer(return_X_y=True)
    dataset = Dataset(x, y, 'sklearn_breast_cancer')
    splitter = StratifiedSplitter(train_ratio=0.7,
                                  val_ratio=0.0,
                                  random_state=2020)
    split = splitter.split(dataset)

    # Instantiate objective with train data.
    # You can pass a list of scorers - the first one wll be used for
    # optimization and the rest just for monitoring
    objective = MultiLevelSVMObjective(
        split.train, scorer=['accuracy', 'f1', 'recall', 'precision'])

    # Create study and run optimization (internally it will use 5-fold CV)
    study = POCStudy(objective)
    study.optimize(timeout_seconds=60, num_trials=None)

    # Now you can use best trial params to create a model...
    best_params = study.best_trial.params
    print('Creating model with best params:', best_params)
    best_trial = FixedTrial(best_params)
    best_model = objective.create_model(best_trial)

    # ... which you can fit and score.
    best_model.fit(X=split.train.x, y=split.train.y)
    test_score = best_model.score(X=split.test.x, y=split.test.y)
    print(f'test_score: {test_score:.3g}')

    # Plot optimization history.
    print('Plot will hopefully open in a browser.')
    fig = optuna.visualization.plot_optimization_history(study)
    fig.show()
    print('Done.')
Beispiel #19
0
    def best_models(self, return_preds=False):
        fixed_trial = FixedTrial(self.study.best_params)
        loss, models, cv_preds = self.objective(fixed_trial, return_model=True)

        if return_preds:
            return models, cv_preds
        return models
Beispiel #20
0
def test_suggest_float() -> None:

    trial = FixedTrial({"x": 1.0})
    assert trial.suggest_float("x", -100.0, 100.0) == 1.0

    with pytest.raises(ValueError):
        trial.suggest_float("x", -100, 100, step=10, log=True)

    with pytest.raises(ValueError):
        trial.suggest_uniform("y", -100.0, 100.0)
Beispiel #21
0
def test_suggest_int_log() -> None:

    trial = FixedTrial({"x": 1})
    assert trial.suggest_int("x", 1, 10, log=True) == 1

    with pytest.raises(ValueError):
        trial.suggest_int("x", 1, 10, step=2, log=True)

    with pytest.raises(ValueError):
        trial.suggest_int("y", 1, 10, log=True)
Beispiel #22
0
def test_fixed_trial_user_attrs():
    # type: () -> None

    trial = FixedTrial({'x': 1})
    trial.set_user_attr('data', 'MNIST')
    assert trial.user_attrs['data'] == 'MNIST'
Beispiel #23
0
def test_fixed_trial_system_attrs():
    # type: () -> None

    trial = FixedTrial({'x': 1})
    trial.set_system_attr('system_message', 'test')
    assert trial.system_attrs['system_message'] == 'test'
Beispiel #24
0
def test_fixed_trial_datetime_start():
    # type: () -> None

    params = {'x': 1}
    trial = FixedTrial(params)
    assert trial.datetime_start is not None
Beispiel #25
0
def test_fixed_trial_user_attrs() -> None:

    trial = FixedTrial({"x": 1})
    trial.set_user_attr("data", "MNIST")
    assert trial.user_attrs["data"] == "MNIST"
Beispiel #26
0
def test_fixed_trial_system_attrs() -> None:

    trial = FixedTrial({"x": 1})
    trial.set_system_attr("system_message", "test")
    assert trial.system_attrs["system_message"] == "test"
Beispiel #27
0
def test_fixed_trial_should_prune() -> None:

    # FixedTrial never prunes trials.
    assert FixedTrial({}).should_prune() is False
Beispiel #28
0
def test_fixed_trial_datetime_start() -> None:

    params = {"x": 1}
    trial = FixedTrial(params)
    assert trial.datetime_start is not None
Beispiel #29
0
def test_fixed_trial_should_prune():
    # type: () -> None

    # FixedTrial never prunes trials.
    assert FixedTrial({}).should_prune() is False
    assert FixedTrial({}).should_prune(1) is False
Beispiel #30
0
def test_fixed_trial_suggest_categorical() -> None:

    # Integer categories.
    trial = FixedTrial({"x": 1})
    assert trial.suggest_categorical("x", [0, 1, 2, 3]) == 1

    with pytest.raises(ValueError):
        trial.suggest_categorical("y", [0, 1, 2, 3])

    # String categories.
    trial = FixedTrial({"x": "baz"})
    assert trial.suggest_categorical("x", ["foo", "bar", "baz"]) == "baz"

    # Unknown parameter.
    with pytest.raises(ValueError):
        trial.suggest_categorical("y", ["foo", "bar", "baz"])

    # Not in choices.
    with pytest.raises(ValueError):
        trial.suggest_categorical("x", ["foo", "bar"])

    # Unknown parameter and bad category type.
    with pytest.warns(UserWarning):
        with pytest.raises(ValueError):  # Must come after `pytest.warns` to catch failures.
            trial.suggest_categorical("x", [{"foo": "bar"}])  # type: ignore