Esempio n. 1
0
def test_bayesian_save_reload(tmp_dir):
    hps = hp_module.HyperParameters()
    hps.Choice("a", [1, 2], default=1)
    hps.Choice("b", [3, 4], default=3)
    hps.Choice("c", [5, 6], default=5)
    hps.Choice("d", [7, 8], default=7)
    hps.Choice("e", [9, 0], default=9)
    oracle = bo_module.BayesianOptimizationOracle(
        objective=kt.Objective("score", "max"), max_trials=20, hyperparameters=hps
    )
    oracle._set_project_dir(tmp_dir, "untitled")

    for _ in range(3):
        trial = oracle.create_trial("tuner_id")
        oracle.update_trial(trial.trial_id, {"score": 1.0})
        oracle.end_trial(trial.trial_id, "COMPLETED")

    oracle.save()
    oracle = bo_module.BayesianOptimizationOracle(
        objective=kt.Objective("score", "max"), max_trials=20, hyperparameters=hps
    )
    oracle._set_project_dir(tmp_dir, "untitled")
    oracle.reload()

    for trial_id in range(3):
        trial = oracle.create_trial("tuner_id")
        oracle.update_trial(trial.trial_id, {"score": 1.0})
        oracle.end_trial(trial.trial_id, "COMPLETED")

    assert len(oracle.trials) == 6
Esempio n. 2
0
    def tuner_objective(self) -> kerastuner.Objective:
        """Returns the target objective of the tuner."""

        if self.num_classes == 2:
            return kerastuner.Objective('val_auc', 'max')
        else:
            return kerastuner.Objective('val_accuracy', 'max')
Esempio n. 3
0
 def tuner_compile(self):
     if self.verbose: print(f'Compiling kerastuner trainer...')          
     def tunable_model(hp):
         self.model.compile(optimizer=self.optimizer['tuner'](hp),
                            loss=self.loss,
                            metrics=self._basic_metrics())
         return self.model  
     
     if self.FLAGS['tune']['by_metric'] == 'acc':
         objective = kerastuner.Objective("val_probs_acc", direction="max")
     elif self.FLAGS['tune']['by_metric'] == 'ece':
         objective = kerastuner.Objective("val_probs_ece", direction="min")
     elif 'val' in self.FLAGS['tune']['by_metric']:
         obj_name = self.FLAGS['tune']['by_metric']
         if 'ece' in obj_name:
             direction = "min"
         objective = kerastuner.Objective(obj_name, direction=direction)
                     
     else:
         bymetric = self.FLAGS['tune']['by_metric']
         raise ValueError(f'unknown by_metric={bymetric}')
     
     self.tuner = kerastuner.tuners.RandomSearch(tunable_model,
                                                 objective=objective,
                                                 max_trials=self.FLAGS['tune']['num_trials'],
                                                 executions_per_trial=1,
                                                 directory=self.FLAGS['tune']['dir'],
                                                 project_name=self.FLAGS['tune']['subdir'])
Esempio n. 4
0
def test_bayesian_save_reload(tmp_dir):
    hps = hp_module.HyperParameters()
    hps.Choice('a', [1, 2], default=1)
    hps.Choice('b', [3, 4], default=3)
    hps.Choice('c', [5, 6], default=5)
    hps.Choice('d', [7, 8], default=7)
    hps.Choice('e', [9, 0], default=9)
    oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective(
        'score', 'max'),
                                                  max_trials=20,
                                                  hyperparameters=hps)
    oracle._set_project_dir(tmp_dir, 'untitled')

    for _ in range(3):
        trial = oracle.create_trial('tuner_id')
        oracle.update_trial(trial.trial_id, {'score': 1.})
        oracle.end_trial(trial.trial_id, "COMPLETED")

    oracle.save()
    oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective(
        'score', 'max'),
                                                  max_trials=20,
                                                  hyperparameters=hps)
    oracle._set_project_dir(tmp_dir, 'untitled')
    oracle.reload()

    for trial_id in range(3):
        trial = oracle.create_trial('tuner_id')
        oracle.update_trial(trial.trial_id, {'score': 1.})
        oracle.end_trial(trial.trial_id, "COMPLETED")

    assert len(oracle.trials) == 6
Esempio n. 5
0
def get_tuner(model_builder):
    global exp_id
    try:
      shutil.rmtree(f'hp_tuning_{exp_id}')
    except FileNotFoundError:
      pass
    return kt.Hyperband(model_builder, objective=kt.Objective('val_AuPR', direction='max'), max_epochs=epochs, executions_per_trial=3, directory=f'hp_tuning_{exp_id}', project_name='initial_model')
Esempio n. 6
0
def test_metric_direction_inferred_from_objective(tmp_dir):
    oracle = kerastuner.tuners.randomsearch.RandomSearchOracle(
        objective=kerastuner.Objective('a', 'max'), max_trials=1)
    oracle._set_project_dir(tmp_dir, 'untitled_project')
    trial = oracle.create_trial('tuner0')
    oracle.update_trial(trial.trial_id, {'a': 1})
    trial = oracle.get_trial(trial.trial_id)
    assert trial.metrics.get_direction('a') == 'max'

    oracle = kerastuner.tuners.randomsearch.RandomSearchOracle(
        objective=kerastuner.Objective('a', 'min'), max_trials=1)
    oracle._set_project_dir(tmp_dir, 'untitled_project2')
    trial = oracle.create_trial('tuner0')
    oracle.update_trial(trial.trial_id, {'a': 1})
    trial = oracle.get_trial(trial.trial_id)
    assert trial.metrics.get_direction('a') == 'min'
Esempio n. 7
0
    def _test_base_tuner():
        def build_model(hp):
            return hp.Int("a", 1, 100)

        tuner = SimpleTuner(
            oracle=kt.oracles.RandomSearch(objective=kt.Objective(
                "score", "max"),
                                           max_trials=10),
            hypermodel=build_model,
            directory=tmp_dir,
        )
        tuner.search()

        # Only worker makes it to this point, server runs until thread stops.
        assert dist_utils.has_chief_oracle()
        assert not dist_utils.is_chief_oracle()
        assert isinstance(tuner.oracle,
                          kt.distribute.oracle_client.OracleClient)

        barrier.wait(60)

        # Model is just a score.
        scores = tuner.get_best_models(10)
        assert len(scores)
        assert scores == sorted(copy.copy(scores), reverse=True)
Esempio n. 8
0
def test_float_optimization(tmp_dir):
    def build_model(hp):
        # Maximum at a=-1, b=1, c=1, d=0 with score=3
        return -1 * hp['a']**3 + hp['b']**3 + hp['c'] - abs(hp['d'])

    class PolynomialTuner(kt.engine.base_tuner.BaseTuner):
        def run_trial(self, trial):
            hps = trial.hyperparameters
            score = self.hypermodel.build(hps)
            self.oracle.update_trial(trial.trial_id, {'score': score})

    hps = hp_module.HyperParameters()
    hps.Float('a', -1, 1)
    hps.Float('b', -1, 1)
    hps.Float('c', -1, 1)
    hps.Float('d', -1, 1)

    tuner = PolynomialTuner(hypermodel=build_model,
                            oracle=kt.oracles.BayesianOptimization(
                                objective=kt.Objective('score', 'max'),
                                hyperparameters=hps,
                                max_trials=50),
                            directory=tmp_dir)

    tuner.search()

    atol, rtol = 1e-2, 1e-2
    best_trial = tuner.oracle.get_best_trials()[0]
    best_hps = best_trial.hyperparameters

    assert np.isclose(best_trial.score, 3, atol=atol, rtol=rtol)
    assert np.isclose(best_hps['a'], -1, atol=atol, rtol=rtol)
    assert np.isclose(best_hps['b'], 1, atol=atol, rtol=rtol)
    assert np.isclose(best_hps['c'], 1, atol=atol, rtol=rtol)
    assert np.isclose(best_hps['d'], 0, atol=atol, rtol=rtol)
Esempio n. 9
0
def test_bayesian_oracle_maximize(tmp_dir):
    hps = hp_module.HyperParameters()
    hps.Int('a', -100, 100)

    oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective(
        'score', direction='max'),
                                                  max_trials=20,
                                                  hyperparameters=hps,
                                                  num_initial_points=2)
    oracle._set_project_dir(tmp_dir, 'untitled')

    # Make examples with high 'a' and high score.
    for i in range(5):
        trial = trial_module.Trial(hyperparameters=hps.copy())
        trial.hyperparameters.values['a'] = 10 * i
        trial.score = i
        trial.status = 'COMPLETED'
        oracle.trials[trial.trial_id] = trial

    # Make examples with low 'a' and low score
    for i in range(5):
        trial = trial_module.Trial(hyperparameters=hps.copy())
        trial.hyperparameters.values['a'] = -10 * i
        trial.score = -i
        trial.status = 'COMPLETED'
        oracle.trials[trial.trial_id] = trial

    trial = oracle.create_trial('tuner0')
    assert trial.status == 'RUNNING'
    # Assert that the oracle suggests hps it thinks will maximize.
    assert trial.hyperparameters.get('a') > 0
Esempio n. 10
0
def test_sklearn_cv_with_groups(tmp_dir):
    tuner = kt.SklearnTuner(
        oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
            "score", "max"),
                                               max_trials=10),
        hypermodel=build_model,
        cv=model_selection.GroupKFold(5),
        directory=tmp_dir,
    )

    x = np.random.uniform(size=(50, 10))
    y = np.random.randint(0, 2, size=(50, ))
    groups = np.random.randint(0, 5, size=(50, ))
    tuner.search(x, y, groups=groups)

    assert len(tuner.oracle.trials) == 10

    best_trial = tuner.oracle.get_best_trials()[0]
    assert best_trial.status == "COMPLETED"
    assert best_trial.score is not None
    assert best_trial.best_step == 0
    assert best_trial.metrics.exists("score")

    # Make sure best model can be reloaded.
    best_model = tuner.get_best_models()[0]
    best_model.score(x, y)
Esempio n. 11
0
def test_sklearn_pipeline(tmp_dir):
    tuner = kt.SklearnTuner(
        oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
            "score", "max"),
                                               max_trials=10),
        hypermodel=build_pipeline,
        directory=tmp_dir,
    )

    x = np.random.uniform(size=(50, 10))
    y = np.random.randint(0, 2, size=(50, ))
    sample_weight = np.random.uniform(0.1, 1, size=(50, ))
    tuner.search(x, y, sample_weight=sample_weight)

    assert len(tuner.oracle.trials) == 10

    best_trial = tuner.oracle.get_best_trials()[0]
    assert best_trial.status == "COMPLETED"
    assert best_trial.score is not None
    assert best_trial.best_step == 0
    assert best_trial.metrics.exists("score")

    # Make sure best pipeline can be reloaded.
    best_pipeline = tuner.get_best_models()[0]
    best_pipeline.score(x, y)
Esempio n. 12
0
def launch_tuner(max_trial, idata, experiment_name, nn_type):
    log_file = experiment_name + '/log_tune_model.log'
    log_file = open(log_file, "a")
    sys.stdout = log_file

    tf.debugging.set_log_device_placement(False)
    strategy = tf.distribute.MirroredStrategy()
    print('Number of devices: {}'.format(strategy.num_replicas_in_sync))

    with strategy.scope():
        hp = kt.HyperParameters()
        hp.Int('len_slice', 50, 1000, 100, default=200)
        hp.Int('shift', 10, 500, 10, default=50)

        if nn_type == 'AlexNet':
            hp.Int('batch_size', 10, 70, 10, default=30)
            build_model = build_model_alexnet
        elif nn_type == 'CNN2':
            hp.Int('batch_size', 10, 70, 10, default=30)
            build_model = build_model_cnntwo
        elif nn_type == 'ConvRNN':
            hp.Int('batch_size', 10, 70, 10, default=30)
            build_model = build_model_convrnn
        elif nn_type == 'ResNet':
            hp.Int('batch_size', 2, 20, 2, default=10)
            build_model = build_model_resnet
        elif nn_type == 'CNNConv2D':
            hp.Int('batch_size', 10, 70, 10, default=30)
            build_model = build_model_cnnconvtwod
        else:
            print('Unknown model')

    tuner_rs = MyTuner(oracle=kt.oracles.BayesianOptimization(
        objective=kt.Objective('val_acc', 'max'),
        max_trials=max_trial,
        hyperparameters=hp),
                       hypermodel=build_model,
                       overwrite=True)

    tuner_rs.search_space_summary()

    tuner_rs.search(idata=idata)

    print(tuner_rs.results_summary())

    best_model = tuner_rs.get_best_models(num_models=1)[0]
    model_file = experiment_name + '/tuned_model'
    best_model.save(model_file)
    file_model_graph = experiment_name + '/' + nn_type + '_graph.png'
    plot_model(best_model, show_shapes=True, to_file=file_model_graph)

    results = tuner_rs.oracle.get_best_trials(
        num_trials=1)[0].hyperparameters.values
    len_slice = results['len_slice']
    shift = results['shift']

    X_train, X_test, y_train, y_test, input_shape, nb_slice = data_preparation_nn(
        idata, len_slice, shift)
    test_metrics(best_model, y_test, X_test, experiment_name)
    log_file.close()
Esempio n. 13
0
def test_base_tuner(tmp_dir):
    class MyTuner(base_tuner.BaseTuner):
        def run_trial(self, trial, x):
            model = self.hypermodel.build(trial.hyperparameters)
            self.oracle.update_space(trial.hyperparameters)
            score = model(x)
            self.oracle.update_trial(trial.trial_id, metrics={'score': score})

        def get_best_models(self, num_models=1):
            best_trials = self.oracle.get_best_trials(num_models)
            models = [
                self.hypermodel.build(t.hyperparameters) for t in best_trials
            ]
            return models

    def build_model(hp):
        class MyModel(object):
            def __init__(self):
                self.factor = hp.Float('a', 0, 10)

            def __call__(self, x):
                return self.factor * x

        return MyModel()

    oracle = kerastuner.tuners.randomsearch.RandomSearchOracle(
        objective=kerastuner.Objective('score', 'max'), max_trials=5)
    tuner = MyTuner(oracle=oracle, hypermodel=build_model, directory=tmp_dir)
    tuner.search(1.0)
    models = tuner.get_best_models(5)

    # Check that scoring of the model was done correctly.
    models_by_factor = sorted(models, key=lambda m: m.factor, reverse=True)
    assert models[0] == models_by_factor[0]
 def tuneHP(self, hyperModel, X_train, X_test, y_train, y_test, tuner_epochs=50, tuner_batch_size=10000, tuner_mode=0):
     if tuner_mode == 0:
         tuner = kt.Hyperband(hyperModel,
                              objective=kt.Objective("auc", direction="max"),  # ['loss', 'auc', 'accuracy', 'val_loss', 'val_auc', 'val_accuracy']
                              max_epochs=200,
                              hyperband_iterations=3,
                              factor=3,
                              seed=seed_value,
                              directory='tuning',
                              project_name='model_hyperband_1',
                              overwrite=True)
     elif tuner_mode == 1:
         tuner = kt.BayesianOptimization(hyperModel,
                                         objective='val_loss',
                                         max_trials=100,
                                         seed=seed_value,
                                         directory='tuning',
                                         project_name='model_bayesian_1',
                                         overwrite=True)
     elif tuner_mode == 2:
         tuner = kt.RandomSearch(hyperModel,
                                 objective='val_loss',
                                 max_trials=1000,
                                 seed=seed_value,
                                 directory='tuning',
                                 project_name='model_random_1',
                                 overwrite=True)
     else:
         raise ValueError('Invalid tuner mode')
     tuner.search(X_train, y_train, epochs=tuner_epochs, batch_size=tuner_batch_size, validation_data=(X_test, y_test), verbose=0)
     # tuner.search(X_train, y_train, epochs=tuner_epochs, validation_data=(X_test, y_test), verbose=1)
     best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
     print(tuner.search_space_summary())
     return best_hps, tuner
Esempio n. 15
0
def tuner_fn(fn_args: FnArgs) -> TunerFnResult:
    train_files = fn_args.train_files
    eval_files = fn_args.eval_files

    tf_transform_output = tft.TFTransformOutput(fn_args.transform_graph_path)

    hparams = _get_hyperparameters()

    tuner = kerastuner.Hyperband(hypermodel=_build_keras_model,
                                 hyperparameters=hparams,
                                 objective=kerastuner.Objective(
                                     'binary_accuracy', 'max'),
                                 factor=3,
                                 max_epochs=2,
                                 directory=fn_args.working_dir,
                                 project_name='ftfx:simple_e2e')

    train_dataset = _input_fn(train_files, tf_transform_output)
    eval_dataset = _input_fn(eval_files, tf_transform_output)

    return TunerFnResult(tuner=tuner,
                         fit_kwargs={
                             'x': train_dataset,
                             'validation_data': eval_dataset,
                             'steps_per_epoch': fn_args.train_steps,
                             'validation_steps': fn_args.eval_steps
                         })
Esempio n. 16
0
def test_sklearn_custom_scoring_and_cv(tmp_dir):
    tuner = sklearn_tuner.Sklearn(
        oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
            'score', 'max'),
                                               max_trials=10),
        hypermodel=build_model,
        scoring=metrics.make_scorer(metrics.balanced_accuracy_score),
        cv=model_selection.StratifiedKFold(5),
        directory=tmp_dir)

    x = np.random.uniform(size=(50, 10))
    y = np.random.randint(0, 2, size=(50, ))
    tuner.search(x, y)

    assert len(tuner.oracle.trials) == 10

    best_trial = tuner.oracle.get_best_trials()[0]
    assert best_trial.status == 'COMPLETED'
    assert best_trial.score is not None
    assert best_trial.best_step == 0
    assert best_trial.metrics.exists('score')

    # Make sure best model can be reloaded.
    best_model = tuner.get_best_models()[0]
    best_model.score(x, y)
Esempio n. 17
0
def test_sklearn_additional_metrics(tmp_dir):
    tuner = sklearn_tuner.Sklearn(
        oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
            'score', 'max'),
                                               max_trials=10),
        hypermodel=build_model,
        metrics=[metrics.balanced_accuracy_score, metrics.recall_score],
        directory=tmp_dir)

    x = np.random.uniform(size=(50, 10))
    y = np.random.randint(0, 2, size=(50, ))
    tuner.search(x, y)

    assert len(tuner.oracle.trials) == 10

    best_trial = tuner.oracle.get_best_trials()[0]
    assert best_trial.status == 'COMPLETED'
    assert best_trial.score is not None
    assert best_trial.best_step == 0
    assert best_trial.metrics.exists('score')
    assert best_trial.metrics.exists('balanced_accuracy_score')
    assert best_trial.metrics.exists('recall_score')

    # Make sure best model can be reloaded.
    best_model = tuner.get_best_models()[0]
    best_model.score(x, y)
Esempio n. 18
0
def test_sklearn_real_data(tmp_dir):
    tuner = kt.SklearnTuner(
        oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
            "score", "max"),
                                               max_trials=10),
        hypermodel=build_model,
        scoring=metrics.make_scorer(metrics.accuracy_score),
        cv=model_selection.StratifiedKFold(5),
        directory=tmp_dir,
    )

    x, y = datasets.load_iris(return_X_y=True)
    x_train, x_test, y_train, y_test = model_selection.train_test_split(
        x, y, test_size=0.2)

    tuner.search(x_train, y_train)

    best_models = tuner.get_best_models(10)
    best_model = best_models[0]
    worst_model = best_models[9]
    best_model_score = best_model.score(x_test, y_test)
    worst_model_score = worst_model.score(x_test, y_test)

    assert best_model_score > 0.8
    assert best_model_score >= worst_model_score
Esempio n. 19
0
def test_hyperparameters_added(tmp_dir):
    hps = hp_module.HyperParameters()
    hps.Int('a', -100, 100)

    oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective(
        'score', direction='max'),
                                                  max_trials=20,
                                                  hyperparameters=hps,
                                                  num_initial_points=2)
    oracle._set_project_dir(tmp_dir, 'untitled')

    # Populate initial trials.
    for i in range(10):
        trial = trial_module.Trial(hyperparameters=hps.copy())
        trial.hyperparameters.values['a'] = 10 * i
        trial.score = i
        trial.status = 'COMPLETED'
        oracle.trials[trial.trial_id] = trial

    # Update the space.
    new_hps = hp_module.HyperParameters()
    new_hps.Float('b', 3.2, 6.4, step=0.2, default=3.6)
    new_hps.Boolean('c', default=True)
    oracle.update_space(new_hps)

    # Make a new trial, it should have b set.
    trial = oracle.create_trial('tuner0')
    assert trial.status == 'RUNNING'
    assert 'b' in trial.hyperparameters.values
    assert 'c' in trial.hyperparameters.values
Esempio n. 20
0
 def _test_get_best_trials():
     hps = kt.HyperParameters()
     hps.Int('a', 0, 100, default=5)
     hps.Int('b', 0, 100, default=6)
     oracle = randomsearch.RandomSearchOracle(objective=kt.Objective(
         'score', direction='max'),
                                              max_trials=10,
                                              hyperparameters=hps)
     oracle._set_project_dir(tmp_dir, 'untitled')
     tuner_id = os.environ['KERASTUNER_TUNER_ID']
     if 'chief' in tuner_id:
         oracle_chief.start_server(oracle)
     else:
         client = oracle_client.OracleClient(oracle)
         trial_scores = {}
         for score in range(10):
             trial = client.create_trial(tuner_id)
             assert trial.status == "RUNNING"
             assert 'a' in trial.hyperparameters.values
             assert 'b' in trial.hyperparameters.values
             trial_id = trial.trial_id
             client.update_trial(trial_id, {'score': score})
             client.end_trial(trial_id)
             trial_scores[trial_id] = score
         return
         best_trials = client.get_best_trials(3)
         best_scores = [t.score for t in best_trials]
         assert best_scores == [9, 8, 7]
         # Check that trial_ids are correctly mapped to scores.
         for t in best_trials:
             assert trial_scores[t.trial_id] == t.score
Esempio n. 21
0
def test_hyperparameters_added(tmp_dir):
    hps = hp_module.HyperParameters()
    hps.Int("a", -100, 100)

    oracle = bo_module.BayesianOptimizationOracle(
        objective=kt.Objective("score", direction="max"),
        max_trials=20,
        hyperparameters=hps,
        num_initial_points=2,
    )
    oracle._set_project_dir(tmp_dir, "untitled")

    # Populate initial trials.
    for i in range(10):
        trial = trial_module.Trial(hyperparameters=hps.copy())
        trial.hyperparameters.values["a"] = 10 * i
        trial.score = i
        trial.status = "COMPLETED"
        oracle.trials[trial.trial_id] = trial

    # Update the space.
    new_hps = hp_module.HyperParameters()
    new_hps.Float("b", 3.2, 6.4, step=0.2, default=3.6)
    new_hps.Boolean("c", default=True)
    oracle.update_space(new_hps)

    # Make a new trial, it should have b set.
    trial = oracle.create_trial("tuner0")
    assert trial.status == "RUNNING"
    assert "b" in trial.hyperparameters.values
    assert "c" in trial.hyperparameters.values
def test_metric_direction_inferred_from_objective(tmp_dir):
    oracle = kerastuner.tuners.randomsearch.RandomSearchOracle(
        objective=kerastuner.Objective("a", "max"), max_trials=1)
    oracle._set_project_dir(tmp_dir, "untitled_project")
    trial = oracle.create_trial("tuner0")
    oracle.update_trial(trial.trial_id, {"a": 1})
    trial = oracle.get_trial(trial.trial_id)
    assert trial.metrics.get_direction("a") == "max"

    oracle = kerastuner.tuners.randomsearch.RandomSearchOracle(
        objective=kerastuner.Objective("a", "min"), max_trials=1)
    oracle._set_project_dir(tmp_dir, "untitled_project2")
    trial = oracle.create_trial("tuner0")
    oracle.update_trial(trial.trial_id, {"a": 1})
    trial = oracle.get_trial(trial.trial_id)
    assert trial.metrics.get_direction("a") == "min"
Esempio n. 23
0
def test_bayesian_oracle_maximize(tmp_dir):
    hps = hp_module.HyperParameters()
    hps.Int("a", -100, 100)

    oracle = bo_module.BayesianOptimizationOracle(
        objective=kt.Objective("score", direction="max"),
        max_trials=20,
        hyperparameters=hps,
        num_initial_points=2,
    )
    oracle._set_project_dir(tmp_dir, "untitled")

    # Make examples with high 'a' and high score.
    for i in range(5):
        trial = trial_module.Trial(hyperparameters=hps.copy())
        trial.hyperparameters.values["a"] = 10 * i
        trial.score = i
        trial.status = "COMPLETED"
        oracle.trials[trial.trial_id] = trial

    # Make examples with low 'a' and low score
    for i in range(5):
        trial = trial_module.Trial(hyperparameters=hps.copy())
        trial.hyperparameters.values["a"] = -10 * i
        trial.score = -i
        trial.status = "COMPLETED"
        oracle.trials[trial.trial_id] = trial

    trial = oracle.create_trial("tuner0")
    assert trial.status == "RUNNING"
    # Assert that the oracle suggests hps it thinks will maximize.
    assert trial.hyperparameters.get("a") > 0
Esempio n. 24
0
def tuner_fn(fn_args: FnArgs) -> TunerFnResult:
    """Build the tuner using the CloudTuner API.

  Args:
    fn_args: Holds args as name/value pairs. See
      https://www.tensorflow.org/tfx/api_docs/python/tfx/components/trainer/fn_args_utils/FnArgs.
      - transform_graph_path: optional transform graph produced by TFT.
      - custom_config: An optional dictionary passed to the component. In this
        example, it contains the dict ai_platform_tuning_args.
      - working_dir: working dir for tuning.
      - train_files: List of file paths containing training tf.Example data.
      - eval_files: List of file paths containing eval tf.Example data.
      - train_steps: number of train steps.
      - eval_steps: number of eval steps.

  Returns:
    A namedtuple contains the following:
      - tuner: A BaseTuner that will be used for tuning.
      - fit_kwargs: Args to pass to tuner's run_trial function for fitting the
                    model , e.g., the training and validation dataset. Required
                    args depend on the above tuner's implementation.
  """
    transform_graph = tft.TFTransformOutput(fn_args.transform_graph_path)

    # CloudTuner is a subclass of kerastuner.Tuner which inherits from
    # BaseTuner.
    tuner = CloudTuner(
        _build_keras_model,
        # The project/region configuations for Cloud Vizier service and its trial
        # executions. Note: this example uses the same configuration as the
        # CAIP Training service for distributed tuning flock management to view
        # all of the pipeline's jobs and resources in the same project. It can
        # also be configured separately.
        project_id=fn_args.custom_config['ai_platform_tuning_args']['project'],
        region=fn_args.custom_config['ai_platform_tuning_args']['region'],
        objective=kerastuner.Objective('val_sparse_categorical_accuracy',
                                       'max'),
        hyperparameters=_get_hyperparameters(),
        max_trials=8,  # Optional.
        directory=fn_args.working_dir)

    train_dataset = _input_fn(fn_args.train_files,
                              fn_args.data_accessor,
                              transform_graph,
                              batch_size=_TRAIN_BATCH_SIZE)

    eval_dataset = _input_fn(fn_args.eval_files,
                             fn_args.data_accessor,
                             transform_graph,
                             batch_size=_EVAL_BATCH_SIZE)

    return TunerFnResult(tuner=tuner,
                         fit_kwargs={
                             'x': train_dataset,
                             'validation_data': eval_dataset,
                             'steps_per_epoch': fn_args.train_steps,
                             'validation_steps': fn_args.eval_steps
                         })
Esempio n. 25
0
def test_sklearn_deprecation_warning(tmp_dir):
    with pytest.deprecated_call():
        kt.tuners.Sklearn(
            oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
                "score", "max"),
                                                   max_trials=10),
            hypermodel=build_model,
            directory=tmp_dir,
        )
Esempio n. 26
0
 def pretune(self):
     # instantiate hyberband tuner
     self.tuner = kt.Hyperband(self.hyper_model,
                               objective=kt.Objective(self.objective,
                                                      direction='max'),
                               max_epochs=self.epochs,
                               factor=3,
                               directory=self.objective,
                               project_name='hypertune')
def get_tuner(which_tuner, input_shape, exp_name: str):
    tuners = {
        'hyperband':
        Hyperband(HyperPhenomenet(input_shape),
                  objective=kerastuner.Objective("val_precision",
                                                 direction="max"),
                  directory='hyperband_' + exp_name,
                  project_name=exp_name,
                  max_epochs=100),
        'random_search':
        RandomSearch(HyperPhenomenet(input_shape),
                     objective=kerastuner.Objective("val_precision",
                                                    direction="max"),
                     directory='keras_tuner_' + exp_name,
                     project_name=exp_name,
                     max_trials=100)
    }
    return tuners[which_tuner]
Esempio n. 28
0
def try_net(params):
    """Run a random experiment for particular params and data.

    Arguments:
        params -- dictionary of parameters for experiment

    Returns:
        None

    Side effects:
        Changes params dict
        Saves files
    """
    # SET UP NETWORK
    deep_koop = net.DeepKoopmanHyperModel(params['input_dim'],
                                          params['len_time'],
                                          params['num_shifts'],
                                          params['delta_t'])

    tuner = kerastuner.tuners.Hyperband(
        deep_koop,
        objective=kerastuner.Objective("val_prediction_loss", direction="min"),
        max_epochs=params['num_passes_per_file'],
        directory=params['data_name'],
        project_name=params['folder_name'],
        executions_per_trial=3,
        seed=42,
    )

    data_train_tensor = helperfns.load_training_data(params['data_name'],
                                                     params['data_train_len'],
                                                     params['len_time'],
                                                     params['num_shifts'])
    data_val_tensor = helperfns.load_eval_data(params['data_name'],
                                               params['len_time'],
                                               params['num_shifts'])

    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
        monitor='val_prediction_loss', patience=10)
    stop_early = tf.keras.callbacks.EarlyStopping(
        monitor='val_prediction_loss', patience=15, restore_best_weights=True)

    tuner.search_space_summary()

    tuner.search(
        x=data_train_tensor,
        y=data_train_tensor,
        validation_data=(data_val_tensor, data_val_tensor),
        shuffle=True,
        epochs=params['num_passes_per_file'],
        batch_size=params["batch_size"],
        callbacks=[reduce_lr, stop_early],
    )

    tuner.results_summary()
Esempio n. 29
0
def tuner_fn(fn_args: TrainerFnArgs) -> TunerFnResult:
    """Build the tuner using the KerasTuner API.
  Args:
    fn_args: Holds args as name/value pairs.
      - working_dir: working dir for tuning.
      - train_files: List of file paths containing training tf.Example data.
      - eval_files: List of file paths containing eval tf.Example data.
      - train_steps: number of train steps.
      - eval_steps: number of eval steps.
      - schema_path: optional schema of the input data.
      - transform_graph_path: optional transform graph produced by TFT.
  Returns:
    A namedtuple contains the following:
      - tuner: A BaseTuner that will be used for tuning.
      - fit_kwargs: Args to pass to tuner's run_trial function for fitting the
                    model , e.g., the training and validation dataset. Required
                    args depend on the above tuner's implementation.
  """
    transform_graph = tft.TFTransformOutput(fn_args.transform_graph_path)

    # Construct a build_keras_model_fn that just takes hyperparams from get_hyperparameters as input.
    build_keras_model_fn = functools.partial(
        _build_keras_model, tf_transform_output=transform_graph)

    # BayesianOptimization is a subclass of kerastuner.Tuner which inherits from BaseTuner.
    tuner = kerastuner.BayesianOptimization(
        build_keras_model_fn,
        max_trials=10,
        hyperparameters=_get_hyperparameters(),
        # New entries allowed for n_units hyperparameter construction conditional on n_layers selected.
        #       allow_new_entries=True,
        #       tune_new_entries=True,
        objective=kerastuner.Objective('val_sparse_categorical_accuracy',
                                       'max'),
        directory=fn_args.working_dir,
        project_name='covertype_tuning')

    train_dataset = _input_fn(fn_args.train_files,
                              fn_args.data_accessor,
                              transform_graph,
                              batch_size=TRAIN_BATCH_SIZE)

    eval_dataset = _input_fn(fn_args.eval_files,
                             fn_args.data_accessor,
                             transform_graph,
                             batch_size=EVAL_BATCH_SIZE)

    return TunerFnResult(tuner=tuner,
                         fit_kwargs={
                             'x': train_dataset,
                             'validation_data': eval_dataset,
                             'steps_per_epoch': fn_args.train_steps,
                             'validation_steps': fn_args.eval_steps
                         })
Esempio n. 30
0
def test_sklearn_not_install_error(tmp_dir):
    sklearn_module = sklearn_tuner.sklearn
    sklearn_tuner.sklearn = None
    with pytest.raises(ImportError, match="Please install sklearn"):
        sklearn_tuner.Sklearn(
            oracle=kt.oracles.BayesianOptimization(objective=kt.Objective(
                "score", "max"),
                                                   max_trials=10),
            hypermodel=build_model,
            directory=tmp_dir,
        )
    sklearn_tuner.sklearn = sklearn_module