Example #1
0
    case, val = args
    if case == 'case 1':
        return val
    else:
        return val ** 2

# define a search space
space = hpo.hp.choice('a',
    [
        ('case 1', 1 + hpo.hp.lognormal('c1', 0, 1)),
        ('case 2', hpo.hp.uniform('c2', -10, 10))
    ])

# minimize the objective over the space
trials = hpo.Trials()
best = nu_fmin(objective, space, algo=hpo.tpe.suggest, max_evals=100, trials=trials)

print(best)
# print(best)
# # 베스트 hyperparameter 값 -> {'a': 1, 'c2': 0.01420615366247227}
# print(space_eval(space, best))
# # -> ('case 2', 0.01420615366247227}
# trials.best_trial['result']
# # -> 베스트 loss 값
# print(trials.results[0]['loss'])
# # -> 1.3010621119448424
# print(trials.vals)
# # -> hp 값들
# print(trials.results)
# # -> loss 값
# print(trials.results[0]['loss'])
Example #2
0
                  metrics=['acc'])

    # data 설정
    x_val = x_train[:2]
    partial_x_train = x_train[2:3]
    y_val = y_train[:2]
    partial_y_train = y_train[2:3]

    # 학습
    history = model.fit(partial_x_train,
                        partial_y_train,
                        epochs=params['epochs'],
                        batch_size=params['batch_size'],
                        validation_data=(x_val, y_val))

    loss, acc = model.evaluate(x_test[:1], y_test[:1])

    return {'loss': -acc, 'status': hpo.STATUS_OK}


trials = hpo.Trials()
best = nu_fmin("hello",
               objective,
               space,
               algo=hpo.tpe.suggest,
               max_evals=50,
               trials=trials)
print("====================hps====================")
print(trials.vals)
print("====================best===================")
print(best)