Exemple #1
0
def test_bayesian_save_reload(tmp_dir):
    hps = hp_module.HyperParameters()
    hps.Choice("a", [1, 2], default=1)
    hps.Choice("b", [3, 4], default=3)
    hps.Choice("c", [5, 6], default=5)
    hps.Choice("d", [7, 8], default=7)
    hps.Choice("e", [9, 0], default=9)
    oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective(
        "score", "max"),
                                                  max_trials=20,
                                                  hyperparameters=hps)
    oracle._set_project_dir(tmp_dir, "untitled")

    for _ in range(3):
        trial = oracle.create_trial("tuner_id")
        oracle.update_trial(trial.trial_id, {"score": 1.0})
        oracle.end_trial(trial.trial_id, "COMPLETED")

    oracle.save()
    oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective(
        "score", "max"),
                                                  max_trials=20,
                                                  hyperparameters=hps)
    oracle._set_project_dir(tmp_dir, "untitled")
    oracle.reload()

    for trial_id in range(3):
        trial = oracle.create_trial("tuner_id")
        oracle.update_trial(trial.trial_id, {"score": 1.0})
        oracle.end_trial(trial.trial_id, "COMPLETED")

    assert len(oracle.trials) == 6
def test_base_tuner(tmp_dir):
    class MyTuner(base_tuner.BaseTuner):
        def run_trial(self, trial, x):
            model = self.hypermodel.build(trial.hyperparameters)
            self.oracle.update_space(trial.hyperparameters)
            score = model(x)
            self.oracle.update_trial(trial.trial_id, metrics={"score": score})

        def get_best_models(self, num_models=1):
            best_trials = self.oracle.get_best_trials(num_models)
            models = [
                self.hypermodel.build(t.hyperparameters) for t in best_trials
            ]
            return models

    def build_model(hp):
        class MyModel(object):
            def __init__(self):
                self.factor = hp.Float("a", 0, 10)

            def __call__(self, x):
                return self.factor * x

        return MyModel()

    oracle = keras_tuner.tuners.randomsearch.RandomSearchOracle(
        objective=keras_tuner.Objective("score", "max"), max_trials=5)
    tuner = MyTuner(oracle=oracle, hypermodel=build_model, directory=tmp_dir)
    tuner.search(1.0)
    models = tuner.get_best_models(5)

    # Check that scoring of the model was done correctly.
    models_by_factor = sorted(models, key=lambda m: m.factor, reverse=True)
    assert models[0] == models_by_factor[0]
Exemple #3
0
def test_bayesian_oracle_maximize(tmp_dir):
    hps = hp_module.HyperParameters()
    hps.Int("a", -100, 100)

    oracle = bo_module.BayesianOptimizationOracle(
        objective=kt.Objective("score", direction="max"),
        max_trials=20,
        hyperparameters=hps,
        num_initial_points=2,
    )
    oracle._set_project_dir(tmp_dir, "untitled")

    # Make examples with high 'a' and high score.
    for i in range(5):
        trial = trial_module.Trial(hyperparameters=hps.copy())
        trial.hyperparameters.values["a"] = 10 * i
        trial.score = i
        trial.status = "COMPLETED"
        oracle.trials[trial.trial_id] = trial

    # Make examples with low 'a' and low score
    for i in range(5):
        trial = trial_module.Trial(hyperparameters=hps.copy())
        trial.hyperparameters.values["a"] = -10 * i
        trial.score = -i
        trial.status = "COMPLETED"
        oracle.trials[trial.trial_id] = trial

    trial = oracle.create_trial("tuner0")
    assert trial.status == "RUNNING"
    # Assert that the oracle suggests hps it thinks will maximize.
    assert trial.hyperparameters.get("a") > 0
Exemple #4
0
def test_sklearn_real_data(tmp_dir):
    tuner = kt.SklearnTuner(
        oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
            "score", "max"),
                                               max_trials=10),
        hypermodel=build_model,
        scoring=metrics.make_scorer(metrics.accuracy_score),
        cv=model_selection.StratifiedKFold(5),
        directory=tmp_dir,
    )

    x, y = datasets.load_iris(return_X_y=True)
    x_train, x_test, y_train, y_test = model_selection.train_test_split(
        x, y, test_size=0.2)

    tuner.search(x_train, y_train)

    best_models = tuner.get_best_models(10)
    best_model = best_models[0]
    worst_model = best_models[9]
    best_model_score = best_model.score(x_test, y_test)
    worst_model_score = worst_model.score(x_test, y_test)

    assert best_model_score > 0.8
    assert best_model_score >= worst_model_score
Exemple #5
0
def test_sklearn_pipeline(tmp_dir):
    tuner = kt.SklearnTuner(
        oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
            "score", "max"),
                                               max_trials=10),
        hypermodel=build_pipeline,
        directory=tmp_dir,
    )

    x = np.random.uniform(size=(50, 10))
    y = np.random.randint(0, 2, size=(50, ))
    sample_weight = np.random.uniform(0.1, 1, size=(50, ))
    tuner.search(x, y, sample_weight=sample_weight)

    assert len(tuner.oracle.trials) == 10

    best_trial = tuner.oracle.get_best_trials()[0]
    assert best_trial.status == "COMPLETED"
    assert best_trial.score is not None
    assert best_trial.best_step == 0
    assert best_trial.metrics.exists("score")

    # Make sure best pipeline can be reloaded.
    best_pipeline = tuner.get_best_models()[0]
    best_pipeline.score(x, y)
Exemple #6
0
def test_sklearn_cv_with_groups(tmp_dir):
    tuner = kt.SklearnTuner(
        oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
            "score", "max"),
                                               max_trials=10),
        hypermodel=build_model,
        cv=model_selection.GroupKFold(5),
        directory=tmp_dir,
    )

    x = np.random.uniform(size=(50, 10))
    y = np.random.randint(0, 2, size=(50, ))
    groups = np.random.randint(0, 5, size=(50, ))
    tuner.search(x, y, groups=groups)

    assert len(tuner.oracle.trials) == 10

    best_trial = tuner.oracle.get_best_trials()[0]
    assert best_trial.status == "COMPLETED"
    assert best_trial.score is not None
    assert best_trial.best_step == 0
    assert best_trial.metrics.exists("score")

    # Make sure best model can be reloaded.
    best_model = tuner.get_best_models()[0]
    best_model.score(x, y)
Exemple #7
0
def test_sklearn_additional_metrics(tmp_dir):
    tuner = kt.SklearnTuner(
        oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
            "score", "max"),
                                               max_trials=10),
        hypermodel=build_model,
        metrics=[metrics.balanced_accuracy_score, metrics.recall_score],
        directory=tmp_dir,
    )

    x = np.random.uniform(size=(50, 10))
    y = np.random.randint(0, 2, size=(50, ))
    tuner.search(x, y)

    assert len(tuner.oracle.trials) == 10

    best_trial = tuner.oracle.get_best_trials()[0]
    assert best_trial.status == "COMPLETED"
    assert best_trial.score is not None
    assert best_trial.best_step == 0
    assert best_trial.metrics.exists("score")
    assert best_trial.metrics.exists("balanced_accuracy_score")
    assert best_trial.metrics.exists("recall_score")

    # Make sure best model can be reloaded.
    best_model = tuner.get_best_models()[0]
    best_model.score(x, y)
Exemple #8
0
def test_sklearn_custom_scoring_and_cv(tmp_dir):
    tuner = kt.SklearnTuner(
        oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
            "score", "max"),
                                               max_trials=10),
        hypermodel=build_model,
        scoring=metrics.make_scorer(metrics.balanced_accuracy_score),
        cv=model_selection.StratifiedKFold(5),
        directory=tmp_dir,
    )

    x = np.random.uniform(size=(50, 10))
    y = np.random.randint(0, 2, size=(50, ))
    tuner.search(x, y)

    assert len(tuner.oracle.trials) == 10

    best_trial = tuner.oracle.get_best_trials()[0]
    assert best_trial.status == "COMPLETED"
    assert best_trial.score is not None
    assert best_trial.best_step == 0
    assert best_trial.metrics.exists("score")

    # Make sure best model can be reloaded.
    best_model = tuner.get_best_models()[0]
    best_model.score(x, y)
 def _test_get_best_trials():
     hps = kt.HyperParameters()
     hps.Int("a", 0, 100, default=5)
     hps.Int("b", 0, 100, default=6)
     oracle = randomsearch.RandomSearchOracle(
         objective=kt.Objective("score", direction="max"),
         max_trials=10,
         hyperparameters=hps,
     )
     oracle._set_project_dir(tmp_dir, "untitled")
     tuner_id = os.environ["KERASTUNER_TUNER_ID"]
     if "chief" in tuner_id:
         oracle_chief.start_server(oracle)
     else:
         client = oracle_client.OracleClient(oracle)
         trial_scores = {}
         for score in range(10):
             trial = client.create_trial(tuner_id)
             assert trial.status == "RUNNING"
             assert "a" in trial.hyperparameters.values
             assert "b" in trial.hyperparameters.values
             trial_id = trial.trial_id
             client.update_trial(trial_id, {"score": score})
             client.end_trial(trial_id)
             trial_scores[trial_id] = score
         return
         best_trials = client.get_best_trials(3)
         best_scores = [t.score for t in best_trials]
         assert best_scores == [9, 8, 7]
         # Check that trial_ids are correctly mapped to scores.
         for t in best_trials:
             assert trial_scores[t.trial_id] == t.score
Exemple #10
0
    def _test_base_tuner():
        def build_model(hp):
            return hp.Int("a", 1, 100)

        tuner = SimpleTuner(
            oracle=kt.oracles.RandomSearch(objective=kt.Objective(
                "score", "max"),
                                           max_trials=10),
            hypermodel=build_model,
            directory=tmp_dir,
        )
        tuner.search()

        # Only worker makes it to this point, server runs until thread stops.
        assert dist_utils.has_chief_oracle()
        assert not dist_utils.is_chief_oracle()
        assert isinstance(tuner.oracle,
                          kt.distribute.oracle_client.OracleClient)

        barrier.wait(60)

        # Model is just a score.
        scores = tuner.get_best_models(10)
        assert len(scores)
        assert scores == sorted(copy.copy(scores), reverse=True)
Exemple #11
0
def test_hyperparameters_added(tmp_dir):
    hps = hp_module.HyperParameters()
    hps.Int("a", -100, 100)

    oracle = bo_module.BayesianOptimizationOracle(
        objective=kt.Objective("score", direction="max"),
        max_trials=20,
        hyperparameters=hps,
        num_initial_points=2,
    )
    oracle._set_project_dir(tmp_dir, "untitled")

    # Populate initial trials.
    for i in range(10):
        trial = trial_module.Trial(hyperparameters=hps.copy())
        trial.hyperparameters.values["a"] = 10 * i
        trial.score = i
        trial.status = "COMPLETED"
        oracle.trials[trial.trial_id] = trial

    # A new trial discovered a new hp and synced to oracle.hyperparameters.
    new_hps = hp_module.HyperParameters()
    new_hps.Float("b", 3.2, 6.4, step=0.2, default=3.6)
    new_hps.Boolean("c", default=True)
    oracle.update_space(new_hps)

    # Make a new trial, it should have b set.
    trial = oracle.create_trial("tuner0")
    assert trial.status == "RUNNING"
    assert "b" in trial.hyperparameters.values
    assert "c" in trial.hyperparameters.values
Exemple #12
0
def test_metric_direction_inferred_from_objective(tmp_dir):
    oracle = keras_tuner.tuners.randomsearch.RandomSearchOracle(
        objective=keras_tuner.Objective("a", "max"), max_trials=1
    )
    oracle._set_project_dir(tmp_dir, "untitled_project")
    trial = oracle.create_trial("tuner0")
    oracle.update_trial(trial.trial_id, {"a": 1})
    trial = oracle.get_trial(trial.trial_id)
    assert trial.metrics.get_direction("a") == "max"

    oracle = keras_tuner.tuners.randomsearch.RandomSearchOracle(
        objective=keras_tuner.Objective("a", "min"), max_trials=1
    )
    oracle._set_project_dir(tmp_dir, "untitled_project2")
    trial = oracle.create_trial("tuner0")
    oracle.update_trial(trial.trial_id, {"a": 1})
    trial = oracle.get_trial(trial.trial_id)
    assert trial.metrics.get_direction("a") == "min"
Exemple #13
0
def test_sklearn_deprecation_warning(tmp_dir):
    with pytest.deprecated_call():
        kt.tuners.Sklearn(
            oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
                "score", "max"),
                                                   max_trials=10),
            hypermodel=build_model,
            directory=tmp_dir,
        )
Exemple #14
0
def test_save_before_result(tmp_dir):
    hps = hp_module.HyperParameters()
    hps.Choice("a", [1, 2], default=1)
    hps.Int("b", 3, 10, default=3)
    hps.Float("c", 0, 1, 0.1, default=0)
    hps.Fixed("d", 7)
    hps.Choice("e", [9, 0], default=9)
    oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective(
        "score", "max"),
                                                  max_trials=10,
                                                  hyperparameters=hps)
    oracle._set_project_dir(tmp_dir, "untitled")
    oracle.populate_space(str(1))
    oracle.save()
Exemple #15
0
def test_sklearn_tuner_with_df(tmp_dir):
    tuner = kt.SklearnTuner(
        oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
            "score", "max"),
                                               max_trials=10),
        hypermodel=build_model,
        directory=tmp_dir,
    )

    x = pd.DataFrame(np.random.uniform(size=(50, 10)))
    y = pd.DataFrame(np.random.randint(0, 2, size=(50, )))
    tuner.search(x, y)

    assert len(tuner.oracle.trials) == 10
Exemple #16
0
def test_sklearn_not_install_error(tmp_dir):
    sklearn_module = kt.tuners.sklearn_tuner.sklearn
    kt.tuners.sklearn_tuner.sklearn = None

    with pytest.raises(ImportError, match="Please install sklearn"):
        kt.SklearnTuner(
            oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
                "score", "max"),
                                                   max_trials=10),
            hypermodel=build_model,
            directory=tmp_dir,
        )

    kt.tuners.sklearn_tuner.sklearn = sklearn_module
Exemple #17
0
def tuner_fn(fn_args: tfx.components.FnArgs) -> tfx.components.TunerFnResult:
    """Build the tuner using the KerasTuner API.

  Args:
    fn_args: Holds args as name/value pairs.
      - working_dir: working dir for tuning.
      - train_files: List of file paths containing training tf.Example data.
      - eval_files: List of file paths containing eval tf.Example data.
      - train_steps: number of train steps.
      - eval_steps: number of eval steps.
      - schema_path: optional schema of the input data.
      - transform_graph_path: optional transform graph produced by TFT.

  Returns:
    A namedtuple contains the following:
      - tuner: A BaseTuner that will be used for tuning.
      - fit_kwargs: Args to pass to tuner's run_trial function for fitting the
                    model , e.g., the training and validation dataset. Required
                    args depend on the above tuner's implementation.
  """
    # RandomSearch is a subclass of keras_tuner.Tuner which inherits from
    # BaseTuner.
    tuner = keras_tuner.RandomSearch(_make_keras_model,
                                     max_trials=6,
                                     hyperparameters=_get_hyperparameters(),
                                     allow_new_entries=False,
                                     objective=keras_tuner.Objective(
                                         'val_sparse_categorical_accuracy',
                                         'max'),
                                     directory=fn_args.working_dir,
                                     project_name='penguin_tuning')

    transform_graph = tft.TFTransformOutput(fn_args.transform_graph_path)

    train_dataset = base.input_fn(fn_args.train_files, fn_args.data_accessor,
                                  transform_graph, base.TRAIN_BATCH_SIZE)

    eval_dataset = base.input_fn(fn_args.eval_files, fn_args.data_accessor,
                                 transform_graph, base.EVAL_BATCH_SIZE)

    return tfx.components.TunerFnResult(tuner=tuner,
                                        fit_kwargs={
                                            'x': train_dataset,
                                            'validation_data': eval_dataset,
                                            'steps_per_epoch':
                                            fn_args.train_steps,
                                            'validation_steps':
                                            fn_args.eval_steps
                                        })
Exemple #18
0
def test_objective_formats():
    obj = keras_tuner.engine.oracle._format_objective("accuracy")
    assert obj == keras_tuner.Objective("accuracy", "max")

    obj = keras_tuner.engine.oracle._format_objective(
        keras_tuner.Objective("score", "min"))
    assert obj == keras_tuner.Objective("score", "min")

    obj = keras_tuner.engine.oracle._format_objective([
        keras_tuner.Objective("score", "max"),
        keras_tuner.Objective("loss", "min")
    ])
    assert obj == [
        keras_tuner.Objective("score", "max"),
        keras_tuner.Objective("loss", "min"),
    ]

    obj = keras_tuner.engine.oracle._format_objective(["accuracy", "loss"])
    assert obj == [
        keras_tuner.Objective("accuracy", "max"),
        keras_tuner.Objective("loss", "min"),
    ]
 def _test_get_space():
     hps = kt.HyperParameters()
     hps.Int("a", 0, 10, default=3)
     oracle = randomsearch.RandomSearchOracle(
         objective=kt.Objective("score", "max"),
         max_trials=10,
         hyperparameters=hps,
     )
     oracle._set_project_dir(tmp_dir, "untitled")
     tuner_id = os.environ["KERASTUNER_TUNER_ID"]
     if "chief" in tuner_id:
         oracle_chief.start_server(oracle)
     else:
         client = oracle_client.OracleClient(oracle)
         retrieved_hps = client.get_space()
         assert retrieved_hps.values == {"a": 3}
         assert len(retrieved_hps.space) == 1
Exemple #20
0
def test_bayesian_oracle_with_zero_y(tmp_dir):
    hps = hp_module.HyperParameters()
    hps.Choice("a", [1, 2], default=1)
    hps.Int("b", 3, 10, default=3)
    hps.Float("c", 0, 1, 0.1, default=0)
    hps.Fixed("d", 7)
    hps.Choice("e", [9, 0], default=9)
    oracle = bo_module.BayesianOptimizationOracle(
        objective=kt.Objective("score", "max"),
        max_trials=20,
        num_initial_points=2,
        hyperparameters=hps,
    )
    oracle._set_project_dir(tmp_dir, "untitled")
    for i in range(5):
        trial = oracle.create_trial(str(i))
        oracle.update_trial(trial.trial_id, {"score": 0})
        oracle.end_trial(trial.trial_id, "COMPLETED")
Exemple #21
0
def test_distributed_optimization(tmp_dir):

    hps = hp_module.HyperParameters()
    hps.Int("a", 0, 10)
    hps.Float("b", -1, 1, step=0.1)
    hps.Float("c", 1e-5, 1e-2, sampling="log")

    def evaluate(hp):
        # Minimum at a=4, b=1, c=1e-3 with score=-1
        return abs(hp["a"] -
                   4) - hp["b"] + 0.1 * abs(3 + math.log(hp["c"], 10))

    oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective(
        "score", "min"),
                                                  hyperparameters=hps,
                                                  max_trials=60)
    oracle._set_project_dir(tmp_dir, "untitled")

    tuners = 4

    for _ in range(10):
        trials = []
        for i in range(tuners):
            trial = oracle.create_trial("tuner_" + str(i))
            trials.append(trial)
        for trial in trials:
            oracle.update_trial(trial.trial_id,
                                {"score": evaluate(trial.hyperparameters)})
        for trial in trials:
            oracle.end_trial(trial.trial_id, "COMPLETED")

    atol, rtol = 1e-1, 1e-1
    best_trial = oracle.get_best_trials()[0]
    best_hps = best_trial.hyperparameters

    # The minimum is not always found but it is always close.
    assert best_trial.score < -0.8, best_hps.values
    assert np.isclose(best_hps["a"], 4, atol=atol, rtol=rtol)
    assert np.isclose(best_hps["b"], 1, atol=atol, rtol=rtol)

    # For log-scale param, just check that the order of magnitude is correct.
    log_best_c = math.log(best_hps["c"], 10)
    assert log_best_c > -4 and log_best_c < -2
    def _test_update_space():
        oracle = randomsearch.RandomSearchOracle(objective=kt.Objective(
            "score", "max"),
                                                 max_trials=10)
        oracle._set_project_dir(tmp_dir, "untitled")
        tuner_id = os.environ["KERASTUNER_TUNER_ID"]
        if "chief" in tuner_id:
            oracle_chief.start_server(oracle)
        else:
            client = oracle_client.OracleClient(oracle)

            hps = kt.HyperParameters()
            hps.Int("a", 0, 10, default=5)
            hps.Choice("b", [1, 2, 3])
            client.update_space(hps)

            retrieved_hps = client.get_space()
            assert len(retrieved_hps.space) == 2
            assert retrieved_hps.values["a"] == 5
            assert retrieved_hps.values["b"] == 1
 def _test_end_trial():
     hps = kt.HyperParameters()
     hps.Int("a", 0, 10, default=5)
     oracle = randomsearch.RandomSearchOracle(
         objective=kt.Objective("score", "max"),
         max_trials=10,
         hyperparameters=hps,
     )
     oracle._set_project_dir(tmp_dir, "untitled")
     tuner_id = os.environ["KERASTUNER_TUNER_ID"]
     if "chief" in tuner_id:
         oracle_chief.start_server(oracle)
     else:
         client = oracle_client.OracleClient(oracle)
         trial = client.create_trial(tuner_id)
         trial_id = trial.trial_id
         client.update_trial(trial_id, {"score": 1}, step=2)
         client.end_trial(trial_id, "INVALID")
         updated_trial = client.get_trial(trial_id)
         assert updated_trial.status == "INVALID"
 def _test_create_trial():
     hps = kt.HyperParameters()
     hps.Int("a", 0, 10, default=5)
     hps.Choice("b", [1, 2, 3])
     oracle = randomsearch.RandomSearchOracle(
         objective=kt.Objective("score", "max"),
         max_trials=10,
         hyperparameters=hps,
     )
     oracle._set_project_dir(tmp_dir, "untitled")
     tuner_id = os.environ["KERASTUNER_TUNER_ID"]
     if "chief" in tuner_id:
         oracle_chief.start_server(oracle)
     else:
         client = oracle_client.OracleClient(oracle)
         trial = client.create_trial(tuner_id)
         assert trial.status == "RUNNING"
         a = trial.hyperparameters.get("a")
         assert a >= 0 and a <= 10
         b = trial.hyperparameters.get("b")
         assert b in {1, 2, 3}
Exemple #25
0
def test_hyperband_oracle_bracket_configs(tmp_dir):
    oracle = hyperband_module.HyperbandOracle(
        objective=kt.Objective("score", "max"),
        hyperband_iterations=1,
        max_epochs=8,
        factor=2,
    )
    oracle._set_project_dir(tmp_dir, "untitled")

    # 8, 4, 2, 1 starting epochs.
    assert oracle._get_num_brackets() == 4

    assert oracle._get_num_rounds(bracket_num=3) == 4
    assert oracle._get_size(bracket_num=3, round_num=0) == 8
    assert oracle._get_epochs(bracket_num=3, round_num=0) == 1
    assert oracle._get_size(bracket_num=3, round_num=3) == 1
    assert oracle._get_epochs(bracket_num=3, round_num=3) == 8

    assert oracle._get_num_rounds(bracket_num=0) == 1
    assert oracle._get_size(bracket_num=0, round_num=0) == 4
    assert oracle._get_epochs(bracket_num=0, round_num=0) == 8
 def _test_update_trial():
     hps = kt.HyperParameters()
     hps.Int("a", 0, 10, default=5)
     oracle = randomsearch.RandomSearchOracle(
         objective=kt.Objective("score", "max"),
         max_trials=10,
         hyperparameters=hps,
     )
     oracle._set_project_dir(tmp_dir, "untitled")
     tuner_id = os.environ["KERASTUNER_TUNER_ID"]
     if "chief" in tuner_id:
         oracle_chief.start_server(oracle)
     else:
         client = oracle_client.OracleClient(oracle)
         trial = client.create_trial(tuner_id)
         assert "score" not in trial.metrics.metrics
         trial_id = trial.trial_id
         client.update_trial(trial_id, {"score": 1}, step=2)
         updated_trial = client.get_trial(trial_id)
         assert updated_trial.metrics.get_history("score") == [
             metrics_tracking.MetricObservation([1], step=2)
         ]
Exemple #27
0
def test_step_respected(tmp_dir):
    hps = hp_module.HyperParameters()
    hps.Float("c", 0, 10, step=3)
    oracle = bo_module.BayesianOptimizationOracle(
        objective=kt.Objective("score", direction="max"),
        max_trials=20,
        hyperparameters=hps,
        num_initial_points=2,
    )
    oracle._set_project_dir(tmp_dir, "untitled")

    # Populate initial trials.
    for i in range(10):
        trial = trial_module.Trial(hyperparameters=hps.copy())
        trial.hyperparameters.values["c"] = 3.0
        trial.score = i
        trial.status = "COMPLETED"
        oracle.trials[trial.trial_id] = trial

    trial = oracle.create_trial("tuner0")
    # Check that oracle respects the `step` param.
    assert trial.hyperparameters.get("c") in {0, 3, 6, 9}
def test_simple_sklearn_tuner(tmp_dir):
    class SimpleSklearnTuner(base_tuner.BaseTuner):
        def run_trial(self, trial, x, y, validation_data):
            model = self.hypermodel.build(trial.hyperparameters)
            model.fit(x, y)
            x_val, y_val = validation_data
            score = model.score(x_val, y_val)
            self.oracle.update_trial(trial.trial_id, {"score": score})
            self.save_model(trial.trial_id, model)

        def save_model(self, trial_id, model, step=0):
            fname = os.path.join(self.get_trial_dir(trial_id), "model.pickle")
            with open(fname, "wb") as f:
                pickle.dump(model, f)

        def load_model(self, trial):
            fname = os.path.join(self.get_trial_dir(trial.trial_id),
                                 "model.pickle")
            with open(fname, "rb") as f:
                return pickle.load(f)

    def sklearn_build_fn(hp):
        c = hp.Float("c", 1e-4, 10)
        return linear_model.LogisticRegression(C=c)

    tuner = SimpleSklearnTuner(
        oracle=keras_tuner.tuners.randomsearch.RandomSearchOracle(
            objective=keras_tuner.Objective("score", "max"), max_trials=2),
        hypermodel=sklearn_build_fn,
        directory=tmp_dir,
    )
    tuner.search(TRAIN_INPUTS,
                 TRAIN_TARGETS,
                 validation_data=(VAL_INPUTS, VAL_TARGETS))
    models = tuner.get_best_models(2)
    score0 = models[0].score(VAL_INPUTS, VAL_TARGETS)
    score1 = models[1].score(VAL_INPUTS, VAL_TARGETS)
    assert score0 >= score1
Exemple #29
0
def test_float_optimization(tmp_dir):
    def build_model(hp):
        # Maximum at a=-1, b=1, c=1, d=0 with score=3
        return -1 * hp["a"]**3 + hp["b"]**3 + hp["c"] - abs(hp["d"])

    class PolynomialTuner(kt.engine.base_tuner.BaseTuner):
        def run_trial(self, trial):
            hps = trial.hyperparameters
            score = self.hypermodel.build(hps)
            self.oracle.update_trial(trial.trial_id, {"score": score})

    hps = hp_module.HyperParameters()
    hps.Float("a", -1, 1)
    hps.Float("b", -1, 1)
    hps.Float("c", -1, 1)
    hps.Float("d", -1, 1)

    tuner = PolynomialTuner(
        hypermodel=build_model,
        oracle=kt.oracles.BayesianOptimization(
            objective=kt.Objective("score", "max"),
            hyperparameters=hps,
            max_trials=50,
        ),
        directory=tmp_dir,
    )

    tuner.search()

    atol, rtol = 1e-1, 1e-1
    best_trial = tuner.oracle.get_best_trials()[0]
    best_hps = best_trial.hyperparameters

    assert np.isclose(best_trial.score, 3, atol=atol, rtol=rtol)
    assert np.isclose(best_hps["a"], -1, atol=atol, rtol=rtol)
    assert np.isclose(best_hps["b"], 1, atol=atol, rtol=rtol)
    assert np.isclose(best_hps["c"], 1, atol=atol, rtol=rtol)
    assert np.isclose(best_hps["d"], 0, atol=atol, rtol=rtol)
Exemple #30
0
def test_hyperband_oracle_one_sweep_single_thread(tmp_dir):
    hp = kt.HyperParameters()
    hp.Float("a", -100, 100)
    hp.Float("b", -100, 100)
    oracle = hyperband_module.HyperbandOracle(
        hyperparameters=hp,
        objective=kt.Objective("score", "max"),
        hyperband_iterations=1,
        max_epochs=9,
        factor=3,
    )
    oracle._set_project_dir(tmp_dir, "untitled")

    score = 0
    for bracket_num in reversed(range(oracle._get_num_brackets())):
        for round_num in range(oracle._get_num_rounds(bracket_num)):
            for model_num in range(oracle._get_size(bracket_num, round_num)):
                trial = oracle.create_trial("tuner0")
                assert trial.status == "RUNNING"
                score += 1
                oracle.update_trial(trial.trial_id, {"score": score})
                oracle.end_trial(trial.trial_id, status="COMPLETED")
            assert len(oracle._brackets[0]["rounds"][round_num]) == oracle._get_size(
                bracket_num, round_num
            )
        assert len(oracle._brackets) == 1

    # Iteration should now be complete.
    trial = oracle.create_trial("tuner0")
    assert trial.status == "STOPPED", oracle.hyperband_iterations
    assert len(oracle.ongoing_trials) == 0

    # Brackets should all be finished and removed.
    assert len(oracle._brackets) == 0

    best_trial = oracle.get_best_trials()[0]
    assert best_trial.score == score