示例#1
0
def test_1d():
    def obj_func(x):
        # Global maximum of 4 is at x=4
        return 4. * numpy.exp(-(x - 4.)**2 / 10.) * numpy.cos(1.5 *
                                                              (x - 4.))**2

    parameters = [sherpa.Continuous('x1', [0., 7.])]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=50,
                                   model_type='GP',
                                   acquisition_type='EI')
    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=False,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(trial.parameters['x1'])
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    print(rval)
    assert numpy.isclose(rval['Objective'], 4., atol=0.2)
def test_bayes_opt():
    def f(x, sd=1):
        y = (x - 3) ** 2 + 10.
        if sd == 0:
            return y
        else:
            return y + numpy.random.normal(loc=0., scale=sd,
                                           size=numpy.array(x).shape)

    parameters = [sherpa.Continuous('x', [1, 6])]

    alg = GPyOpt(max_num_trials=10)
    gs = SequentialTesting(algorithm=alg,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)
    for trial in study:
        study.add_observation(trial,
                              iteration=1,
                              objective=f(trial.parameters['x']))
        study.finalize(trial)
        print(study.results)
示例#3
0
def test_types_are_correct(parameters, results):
    gpyopt = GPyOpt(max_concurrent=1)
    suggestion = gpyopt.get_suggestion(parameters, results, True)
    assert isinstance(suggestion['dropout'], float)
    assert isinstance(suggestion['lr'], float)
    assert isinstance(suggestion['num_hidden'], int)
    assert isinstance(suggestion['activation'], str)
示例#4
0
def test_bayesopt_batch(parameters, results, transforms):
    gpyopt = GPyOpt(max_concurrent=10)
    domain = gpyopt._initialize_domain(parameters, transforms)
    X, y = GPyOpt._prepare_data_for_bayes_opt(parameters, results, transforms)
    batch = gpyopt._generate_bayesopt_batch(domain, X, y, lower_is_better=True)

    assert batch.shape == (10, 5)
示例#5
0
def test_noisy_parabola():
    def f(x, sd=1):
        y = (x - 3)**2 + 10.
        if sd == 0:
            return y
        else:
            return y + numpy.random.normal(
                loc=0., scale=sd, size=numpy.array(x).shape)

    parameters = [sherpa.Continuous('x1', [0., 7.])]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=5,
                                   model_type='GP',
                                   acquisition_type='EI')
    rep = Repeat(algorithm=bayesian_optimization, num_times=3, agg=True)
    study = sherpa.Study(algorithm=rep,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        # print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = f(trial.parameters['x1'], sd=1)
        # print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')
    # rval = study.get_best_result()
    # print(rval)
    print(study.results.query("Status=='COMPLETED'"))
示例#6
0
def test_1d_minimize():
    def obj_func(x):
        # Global maximum of 4 is at x=4
        return -4. * numpy.exp(-(x - 4.)**2 / 10.) * numpy.cos(1.5 *
                                                               (x - 4.))**2

    parameters = [sherpa.Continuous('x1', [0., 7.])]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=12,
                                   model_type='GP',
                                   acquisition_type='EI',
                                   initial_data_points=[{
                                       'x1': 2
                                   }, {
                                       'x1': 5
                                   }],
                                   num_initial_data_points=2)

    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(trial.parameters['x1'])
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    print(rval)

    # bounds = [{'name': 'x', 'type': 'continuous', 'domain': (0, 7)}]
    # Xinit = numpy.array([2, 5]).reshape(-1, 1)
    # yinit = obj_func(Xinit)
    # myBopt = gpyopt_package.methods.BayesianOptimization(f=obj_func,
    #                                              # function to optimize
    #                                              domain=bounds,
    #                                              # box-constraints of the problem
    #                                              acquisition_type='EI',
    #                                              X=Xinit,
    #                                              y=yinit,
    #                                              initial_design_numdata=0,
    #                                              initial_design_type='random',
    #                                              evaluator_type='local_penalization',
    #                                              batch_size=1,
    #                                              maximize=True,
    #                                              exact_feval=False)
    # # Run the optimization
    # max_iter = 10  # evaluation budget
    # max_time = 60  # time budget
    # eps = 10e-6  # Minimum allows distance between the las two observations
    #
    # myBopt.run_optimization(max_iter, max_time, eps)
    # print(myBopt.get_evaluations())

    assert numpy.isclose(rval['x1'], 4., atol=0.1)
示例#7
0
def test_overall():
    gpyopt = GPyOpt(max_concurrent=1)
    parameters, results, lower_is_better = sherpa.algorithms.get_sample_results_and_params(
    )

    for i in range(51):
        suggestion = gpyopt.get_suggestion(
            parameters, results.loc[results['Trial-ID'] < i, :],
            lower_is_better)
        print(suggestion)
示例#8
0
def test_bayesopt_batch(parameters, results):
    gpyopt = GPyOpt(max_concurrent=10)
    gpyopt.domain = gpyopt._initialize_domain(parameters)
    gpyopt.lower_is_better = True
    X, y, y_var = GPyOpt._prepare_data_for_bayes_opt(parameters, results)
    domain = gpyopt._initialize_domain(parameters)
    batch = gpyopt._generate_bayesopt_batch(X,
                                            y,
                                            lower_is_better=True,
                                            domain=domain)

    assert batch.shape == (10, 4)
示例#9
0
def test_get_best_pred(lower_is_better, expected_best):
    results = pandas.DataFrame({
        'x': numpy.linspace(0, 1, 10),
        'Objective': numpy.linspace(0, 1, 10),
        'Status': ['COMPLETED'] * 10
    })
    params = [sherpa.Continuous('x', [0, 1])]
    algorithm = GPyOpt(num_initial_data_points=2)
    algorithm.get_suggestion(results=results,
                             parameters=params,
                             lower_is_better=lower_is_better)
    best_params = algorithm.get_best_pred(results=results,
                                          parameters=params,
                                          lower_is_better=lower_is_better)
    assert best_params['x'] == expected_best
示例#10
0
def test_mixed_dtype():
    algorithm = GPyOpt(max_num_trials=4)
    parameters = [
        sherpa.Choice('param_int', [0, 1]),
        sherpa.Choice('param_float', [0.1, 1.1]),
    ]
    study = sherpa.Study(
        parameters=parameters,
        algorithm=algorithm,
        lower_is_better=True,
        disable_dashboard=True,
    )
    for trial in study:
        study.add_observation(trial, iteration=0, objective=0)
        study.finalize(trial)
    assert type(trial.parameters['param_int']) == int
    assert type(trial.parameters['param_float']) == float
示例#11
0
def test_3d():
    def obj_func(x, y, z):
        assert isinstance(x, float)
        assert isinstance(y, str)
        assert isinstance(z, int)
        # Global maximum of 4 is at x=4
        return -4. * numpy.exp(-(x - 4.)**2 / 10.) * numpy.cos(
            1.5 * (x - 4.))**2 - int(y) * z

    parameters = [
        sherpa.Continuous('x', [0., 7.]),
        sherpa.Choice('y', ["-1", "0", "1"]),
        sherpa.Discrete('z', [1, 5])
    ]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=100,
                                   model_type='GP',
                                   acquisition_type='EI')

    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(**trial.parameters)
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    print(rval)

    assert numpy.isclose(rval['x'], 4., atol=0.1)
    assert rval['y'] == 1
    assert rval['z'] == 5
示例#12
0
def test_3d():
    def obj_func(x, y, z):
        # Global maximum of 4 is at x=4
        return -4. * numpy.exp(-(x - 4.)**2 / 10.) * numpy.cos(
            1.5 * (x - 4.))**2 - y * z

    parameters = [
        sherpa.Continuous('x', [0., 7.]),
        sherpa.Choice('y', [-1, 0, 1]),
        sherpa.Discrete('z', [1, 5])
    ]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=100,
                                   model_type='GP',
                                   acquisition_type='EI')

    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(**trial.parameters)
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    print(rval)

    assert numpy.isclose(rval['x'], 4., atol=0.1)
    assert rval['y'] == 1
    assert rval['z'] == 5


# def test_noisy_parabola():
#     def f(x, sd=1):
#         y = (x - 3) ** 2 + 10.
#         if sd == 0:
#             return y
#         else:
#             return y + numpy.random.normal(loc=0., scale=sd,
#                                            size=numpy.array(x).shape)
#
#     parameters = [sherpa.Continuous('x1', [0., 7.])]
#
#     bayesian_optimization = GPyOpt(max_concurrent=1,
#                                    max_num_trials=20,
#                                    model_type='GP',
#                                    acquisition_type='EI')
#     rep = Repeat(algorithm=bayesian_optimization,
#                  num_times=5)
#     study = sherpa.Study(algorithm=rep,
#                          parameters=parameters,
#                          lower_is_better=True,
#                          disable_dashboard=True)
#
#     for trial in study:
#         print("Trial {}:\t{}".format(trial.id, trial.parameters))
#
#         fval = f(trial.parameters['x1'], sd=1)
#         print("Function Value: {}".format(fval))
#         study.add_observation(trial=trial,
#                               iteration=1,
#                               objective=fval)
#         study.finalize(trial, status='COMPLETED')
#     rval = study.get_best_result()
#     print(rval)
#     # assert numpy.sqrt((rval['Objective'] - 3.)**2) < 0.2

#
# if __name__ == '__main__':
#     test_noisy_parabola()