예제 #1
0
def sherpaopt():
    train, targ, test = data_prep()
    sigb, sigw, layers = 0.35204672, 2.1220488, 87
    gp = DNNGP(train,targ[1][:-targ[2]],test,sigb,sigw,layers)

    t0 = time.time()
    parameters = [sherpa.Discrete(name='layers',range=[2,100]),
                  sherpa.Continuous(name='bias',range=[0,5]),
                  sherpa.Continuous(name='weight',range=[.1,2.09])]
    bayesopt = sherpa.algorithms.PopulationBasedTraining(4)
    stop = sherpa.algorithms.MedianStoppingRule(0,1)
    study = sherpa.Study(parameters=parameters,
                         algorithm = bayesopt,
                         stopping_rule=stop,
                         lower_is_better=True,
                         disable_dashboard=True)
    
    train = study.get_suggestion()
    
    for trial in study:
        print('still going: ',trial.id)
        for iteration in range(1):
            error = GPerror(gp,targ,trial.parameters["bias"],
                            trial.parameters["weight"],
                            trial.parameters["layers"])
            study.add_observation(trial=trial,
                                  iteration=iteration,
                                  objective=error)
        study.finalize(trial)
        if(trial.id == 100): #set to around ~200
            break
    print(study.get_best_result())
    print("Time Optimizing: ", (time.time() - t0)/60, " minutes")
예제 #2
0
def run_example(FLAGS):
    """
    Run parallel Sherpa optimization over a set of discrete hp combinations.
    """
    
    parameters = [sherpa.Continuous('learning_rate', [1e-5, 5e-1], 'log'),
                  sherpa.Continuous('decay', [1e-8, 1e-2], 'log'),
                  sherpa.Continuous('momentum', [0., 0.99]),
                  sherpa.Continuous('dropout', [0.0001, 0.7]),
                  sherpa.Ordinal('batch_size', [32, 64, 128, 256])]

    algorithm = bayesian_optimization.GPyOpt(max_concurrent=FLAGS.concurrent,
                                             model_type='GP',
                                             acquisition_type='EI',
                                             max_num_trials=100)

    # Run on local machine.
    scheduler = LocalScheduler()

    rval = sherpa.optimize(parameters=parameters,
                           algorithm=algorithm,
                           dashboard_port=FLAGS.port,
                           lower_is_better=False,
                           command='python fashion_mlp.py',
                           scheduler=scheduler,
                           verbose=0,
                           max_concurrent=FLAGS.concurrent,
                           output_dir='./output_gpyopt_{}'.format(
                               time.strftime("%Y-%m-%d--%H-%M-%S")))
예제 #3
0
def test_branin():
    def branin(x1, x2):
        # Global minimum 0.397887 at (-pi, 12.275), (pi, 2.275),
        # and (9.42478, 2.475)
        a = 1
        b = 5.1/(4*math.pi**2)
        c = 5/math.pi
        r = 6
        s = 10
        t = 1/(8*math.pi)
        return a*(x2 - b*x1**2 + c*x1 - r)**2 + s*(1-t)*math.cos(x1)+s

    parameters = [sherpa.Continuous('x1', [-5., 10.]),
                  sherpa.Continuous('x2', [0., 15.])]

    bayesian_optimization = BayesianOptimization(num_grid_points=2, max_num_trials=50, fine_tune=True)
    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = branin(trial.parameters['x1'], trial.parameters['x2'])
        print("Branin-Hoo: {}".format(fval))
        study.add_observation(trial=trial,
                              iteration=1,
                              objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    print(study.get_best_result())
    assert np.isclose(rval['Objective'], 0.397887, rtol=1e-3)
예제 #4
0
def run_example(FLAGS):
    """
    Run parallel Sherpa optimization over a set of discrete hp combinations.
    """

    parameters = [
        sherpa.Continuous('learning_rate', [1e-5, 5e-1], 'log'),
        sherpa.Continuous('decay', [1e-8, 1e-2], 'log'),
        sherpa.Continuous('momentum', [0., 0.99]),
        sherpa.Continuous('dropout', [0.0001, 0.7]),
        sherpa.Ordinal('batch_size', [32, 64, 128, 256])
    ]

    algorithm = alg = sherpa.algorithms.PopulationBasedTraining(
        num_generations=26,
        population_size=100,
        parameter_range={
            'learning_rate': [1e-10, 9e-1],
            'decay': [1e-10, 9e-1]
        },
        perturbation_factors=(0.8, 1.2))

    # Run on local machine.
    scheduler = LocalScheduler()

    rval = sherpa.optimize(parameters=parameters,
                           algorithm=algorithm,
                           dashboard_port=FLAGS.port,
                           lower_is_better=False,
                           command='python fashion_mlp.py',
                           scheduler=scheduler,
                           verbose=0,
                           max_concurrent=FLAGS.concurrent,
                           output_dir='./output_pbt_{}'.format(
                               time.strftime("%Y-%m-%d--%H-%M-%S")))
예제 #5
0
def test_convex():
    def convex(x1, x2):
        # Global minimum is at x1=3., x2=5.
        return (x1 - 3.)**2 + (x2 - 5.)**2 + 0.1

    parameters = [
        sherpa.Continuous('x1', [-5., 10.]),
        sherpa.Continuous('x2', [0., 15.])
    ]

    bayesian_optimization = BayesianOptimization(num_grid_points=2,
                                                 max_num_trials=50,
                                                 fine_tune=True)
    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = convex(trial.parameters['x1'], trial.parameters['x2'])
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')

    rval = study.get_best_result()
    print(rval)
    assert np.isclose(rval['Objective'], 0.1, rtol=1e-3)
예제 #6
0
def parameters():
    parameters = [
        sherpa.Continuous('dropout', [0., 0.5]),
        sherpa.Continuous('lr', [1e-7, 1e-1], 'log'),
        sherpa.Choice('activation', ['relu', 'tanh', 'sigmoid']),
        sherpa.Discrete('num_hidden', [100, 300])
    ]
    return parameters
예제 #7
0
def build_sherpa_augmentations_space():
    params = [
        sherpa.Continuous(name='width_shift', range=[0.0, 0.2]),
        sherpa.Continuous(name='height_shift', range=[0.0, 0.2]),
        sherpa.Continuous(name='zoom', range=[0.0, 0.3]),
        sherpa.Choice(name='horizontal_flip', range=[False, True]),
        sherpa.Discrete(name='rotation', range=[0, 30])
    ]
    return params
예제 #8
0
def build_sherpa_parameter_space():
    params = [
        sherpa.Ordinal(name='depth', range=[2,3]),
        sherpa.Discrete(name='dense_neurons', range=[100, 164]),
        sherpa.Discrete(name='init_filters', range=[8,32]),
        sherpa.Choice(name='use_batchnorm', range=[False, True]),
        sherpa.Continuous(name='dropout', range=[0.35, 0.55]),
        sherpa.Ordinal(name='batch_size', range=[512, 1024]),
        sherpa.Continuous(name='learning_rate', range=[0.005, 0.01]),
        sherpa.Continuous(name='beta1', range=[0.45, 0.55]),
        sherpa.Continuous(name='beta2', range=[0.95, 1.0])
    ]
    return params
예제 #9
0
파일: runner.py 프로젝트: hamogu/sherpa-1
def run_example(FLAGS):
    """
    Run parallel Sherpa optimization over a set of discrete hp combinations.
    """
    parameters = [
        sherpa.Continuous('lrinit', [0.001, 0.1], 'log'),
        sherpa.Continuous('momentum', [0., 0.99]),
        sherpa.Continuous('lrdecay', [1e-7, 1e-2], 'log'),
        sherpa.Continuous('dropout', [0., 0.5])
    ]

    if FLAGS.algorithm == 'BayesianOptimization':
        print('Running GPyOpt')
        alg = bayesian_optimization.GPyOpt(max_concurrent=FLAGS.max_concurrent,
                                           model_type='GP_MCMC',
                                           acquisition_type='EI_MCMC',
                                           max_num_trials=150)
    elif FLAGS.algorithm == 'LocalSearch':
        print('Running Local Search')
        alg = sherpa.algorithms.LocalSearch(seed_configuration={
            'lrinit': 0.038,
            'momentum': 0.92,
            'lrdecay': 0.0001,
            'dropout': 0.
        },
                                            perturbation_factors=(0.9, 1.1))
    else:
        print('Running Random Search')
        alg = sherpa.algorithms.RandomSearch(max_num_trials=150)

    if FLAGS.sge:
        assert FLAGS.env, "For SGE use, you need to set an environment path."
        # Submit to SGE queue.
        env = FLAGS.env  # Script specifying environment variables.
        opt = '-N MNISTExample -P {} -q {} -l {}'.format(
            FLAGS.P, FLAGS.q, FLAGS.l)
        sched = SGEScheduler(environment=env, submit_options=opt)
    else:
        # Run on local machine.
        sched = LocalScheduler()

    rval = sherpa.optimize(parameters=parameters,
                           algorithm=alg,
                           lower_is_better=True,
                           filename='trial.py',
                           output_dir='output_{}'.format(FLAGS.studyname),
                           scheduler=sched,
                           max_concurrent=FLAGS.max_concurrent)
    print('Best results:')
    print(rval)
예제 #10
0
def run_example(FLAGS):
    """
    Run parallel Sherpa optimization over a set of discrete hp combinations.
    """
    parameters = [
        sherpa.Continuous('log10_learning_rate', [-5, 0]),
        sherpa.Continuous('log2_batch_size', [5, 8]),
        sherpa.Continuous('log2_n_steps', [4, 11]),
        sherpa.Continuous('x_n_opt_epochs', [0, 7]),
        sherpa.Continuous('log10_entcoeff', [-8, -1]),
        sherpa.Continuous('x_gamma', [-4, -1]),
        sherpa.Continuous('cliprange', [0.1, 0.4]),
        sherpa.Continuous('lam', [0.8, 1.0])
    ]

    alg = sherpa.algorithms.RandomSearch(max_num_trials=300)
    alg = sherpa.algorithms.Repeat(alg, 25, agg=True)

    # Run on local machine.
    sched = LocalScheduler()

    rval = sherpa.optimize(parameters=parameters,
                           algorithm=alg,
                           lower_is_better=False,
                           filename='trial.py',
                           scheduler=sched,
                           verbose=0,
                           max_concurrent=FLAGS.max_concurrent,
                           disable_dashboard=True,
                           output_dir='./output_{}'.format(
                               time.strftime("%Y-%m-%d--%H-%M-%S")))
    print('Best results:')
    print(rval)
예제 #11
0
def test_transformers():
    parameter = sherpa.Choice('choice', ['a', 'b', 'c', 'd'])
    transformer = BayesianOptimization.ChoiceTransformer(parameter)
    assert np.all(
        transformer.transform(['d', 'c', 'b', 'a']) == np.flip(np.eye(4),
                                                               axis=0))

    assert all(
        transformer.reverse(transformer.transform(['d', 'c', 'b', 'a'])) ==
        np.array(['d', 'c', 'b', 'a']))

    parameter = sherpa.Continuous('continuous', [0., 0.4])
    transformer = BayesianOptimization.ContinuousTransformer(parameter)
    assert np.all(
        transformer.transform([0.2, 0.4, 0.]) == np.array([0.5, 1.0, 0.0]))
    assert np.all(
        transformer.reverse(transformer.transform([0.2, 0.4, 0.])) == np.array(
            [0.2, 0.4, 0.]))

    parameter = sherpa.Continuous('continuous-log', [0.00001, 0.1], 'log')
    transformer = BayesianOptimization.ContinuousTransformer(parameter)
    print(transformer.transform([0.01]))
    assert np.all(
        transformer.transform([0.0001, 0.001, 0.01]) == np.array(
            [0.25, 0.5, 0.75]))
    print(transformer.reverse(transformer.transform([0.0001, 0.001, 0.01])))
    assert np.all(
        transformer.reverse(transformer.transform([0.0001, 0.001, 0.01])) ==
        np.array([0.0001, 0.001, 0.01]))

    parameter = sherpa.Discrete('discrete', [0, 12])
    transformer = BayesianOptimization.DiscreteTransformer(parameter)
    assert np.all(
        transformer.transform([3, 6, 9]) == np.array([0.25, 0.5, 0.75]))
    assert np.all(
        transformer.reverse(transformer.transform([3, 6, 9])) == np.array(
            [3, 6, 9]))
    assert np.all(transformer.reverse([0.2, 0.3, 0.4]) == np.array([2, 4, 5]))

    parameter = sherpa.Discrete('discrete-log', [10, 100000], 'log')
    transformer = BayesianOptimization.DiscreteTransformer(parameter)
    assert np.all(
        transformer.transform([10, 100, 1000, 10000, 100000]) == np.array(
            [0., 0.25, 0.5, 0.75, 1.]))
    assert np.all(
        transformer.reverse(
            transformer.transform([10, 100, 1000, 10000, 100000])) == np.array(
                [10, 100, 1000, 10000, 100000]))
예제 #12
0
def test_random_search():
    parameters = [
        sherpa.Continuous('a', [0, 1]),
        sherpa.Choice('b', ['x', 'y', 'z'])
    ]
    rs = sherpa.algorithms.RandomSearch(max_num_trials=10, repeat=10)
    config_repeat = {}

    for i in range(10):
        config = rs.get_suggestion(parameters=parameters)
        assert config != config_repeat
        for j in range(9):
            config_repeat = rs.get_suggestion(parameters=parameters)
            assert config == config_repeat

    assert rs.get_suggestion(parameters=parameters) is None

    rs = sherpa.algorithms.RandomSearch(max_num_trials=10, repeat=1)
    last_config = {}

    for i in range(10):
        config = rs.get_suggestion(parameters=parameters)
        assert config != last_config
        last_config = config

    assert rs.get_suggestion(parameters=parameters) is None

    rs = sherpa.algorithms.RandomSearch()
    last_config = {}

    for _ in range(1000):
        config = rs.get_suggestion(parameters=parameters)
        assert config != last_config
        last_config = config
예제 #13
0
파일: long_tests.py 프로젝트: ml-lab/sherpa
def test_user_code_fails(test_dir):

    tempdir = test_dir

    parameters = [
        sherpa.Choice(name="param_a", range=[1, 2, 3]),
        sherpa.Continuous(name="param_b", range=[0, 1])
    ]

    algorithm = sherpa.algorithms.RandomSearch(max_num_trials=3)

    db_port = 27000
    scheduler = sherpa.schedulers.LocalScheduler()

    filename = os.path.join(tempdir, "test.py")
    with open(filename, 'w') as f:
        f.write(testscript2)

    with pytest.warns(RuntimeWarning):
        results = sherpa.optimize(filename=filename,
                                  lower_is_better=True,
                                  algorithm=algorithm,
                                  parameters=parameters,
                                  output_dir=tempdir,
                                  scheduler=scheduler,
                                  max_concurrent=1,
                                  db_port=db_port)
예제 #14
0
def test_1d():
    def obj_func(x):
        # Global maximum of 4 is at x=4
        return 4. * np.exp(-(x - 4.) ** 2 / 10.) * np.cos(1.5 * (x - 4.)) ** 2

    parameters = [sherpa.Continuous('x1', [0., 7.])]

    bayesian_optimization = BayesianOptimization(num_grid_points=5,
                                                 max_num_trials=50,
                                                 fine_tune=False)
    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=False,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(trial.parameters['x1'])
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial,
                              iteration=1,
                              objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    assert np.isclose(rval['Objective'], 4.)
예제 #15
0
def test_local_search():
    parameters = [
        sherpa.Continuous('cont', [0, 1]),
        sherpa.Ordinal('ord', [1, 2, 3])
    ]

    seed = {'cont': 0.5, 'ord': 2}
    alg = sherpa.algorithms.LocalSearch(seed_configuration=seed)

    study = sherpa.Study(parameters=parameters,
                         algorithm=alg,
                         lower_is_better=True,
                         disable_dashboard=True)

    def mock_objective(p):
        return p['cont'] / p['ord']

    # Initial suggestion.
    t = study.get_suggestion()
    tlist = [t]
    tbest = t
    assert t.parameters == seed
    study.add_observation(t,
                          objective=mock_objective(t.parameters),
                          iteration=1)
    study.finalize(t)

    # Perform a suggestion.
    t = study.get_suggestion()
    tlist.append(t)
    if mock_objective(t.parameters) < mock_objective(tbest.parameters):
        tbest = t
    study.add_observation(t,
                          objective=mock_objective(t.parameters),
                          iteration=1)
    study.finalize(t)
    if t.parameters['ord'] == 2:
        assert t.parameters['cont'] != 0.5
        assert abs(t.parameters['cont'] - 0.5) < 0.2
    else:
        assert t.parameters['cont'] == 0.5
        t.parameters['ord'] in [1, 3]

    # Do more iterations.
    for i in range(50):
        t = study.get_suggestion()
        #print(t.parameters)
        assert t.parameters['ord'] in [1, 2, 3]
        assert t.parameters['cont'] >= 0.0
        assert t.parameters['cont'] <= 1.0
        # All new suggestions should be based on tbest.
        assert t.parameters['ord'] == tbest.parameters['ord'] \
               or t.parameters['cont'] == tbest.parameters['cont']
        tlist.append(t)
        if mock_objective(t.parameters) < mock_objective(tbest.parameters):
            tbest = t
        study.add_observation(t,
                              objective=mock_objective(t.parameters),
                              iteration=1)
        study.finalize(t)
def test_results_aggregation():
    parameters = [sherpa.Continuous('myparam', [0, 1])]

    class MyAlg(Algorithm):
        def get_suggestion(self, parameters, results, lower_is_better):
            if results is not None and len(results) > 0:
                print(results)
                assert 'ObjectiveStdErr' in results.columns
                assert (results.loc[:, 'Objective'] == 0.).all()
                exp_std_err = numpy.sqrt(numpy.var([-1,0,1],ddof=1)/(3-1))
                assert (numpy.isclose(results.loc[:, 'ObjectiveStdErr'], exp_std_err)).all()
            return {'myparam': numpy.random.random()}


    alg = MyAlg()
    gs = SequentialTesting(algorithm=alg,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)
    for trial in study:
        study.add_observation(trial,
                              iteration=1,
                              objective=trial.id%3-1)
        study.finalize(trial)
        print(study.results)
def test_bayes_opt():
    def f(x, sd=1):
        y = (x - 3) ** 2 + 10.
        if sd == 0:
            return y
        else:
            return y + numpy.random.normal(loc=0., scale=sd,
                                           size=numpy.array(x).shape)

    parameters = [sherpa.Continuous('x', [1, 6])]

    alg = GPyOpt(max_num_trials=10)
    gs = SequentialTesting(algorithm=alg,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)
    for trial in study:
        study.add_observation(trial,
                              iteration=1,
                              objective=f(trial.parameters['x']))
        study.finalize(trial)
        print(study.results)
def test_wait():
    parameters = [sherpa.Continuous('myparam', [0, 1])]
    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for _ in range(10*3 - 1):
        trial = study.get_suggestion()
        print(trial.id, trial.parameters, "{}/{}".format(gs.k, gs.K[gs.t]),
              "{}/{}".format(gs.t, gs.T))
        study.add_observation(trial,
                              iteration=1,
                              objective=trial.parameters['myparam'] + numpy.random.normal(
                                  scale=0.01))
        study.finalize(trial)

    trial = study.get_suggestion()
    assert trial.parameters['stage'] == 1

    waittrial = study.get_suggestion()
    assert waittrial == 'WAIT'
    study.add_observation(trial,
                          iteration=1,
                          objective=trial.parameters['myparam'] + numpy.random.normal(
                              scale=0.01))
    study.finalize(trial)

    trial = study.get_suggestion()
    assert trial.parameters['stage'] == 2
def test_parallel():
    tempdir = tempfile.mkdtemp(dir=".")

    parameters = [sherpa.Continuous('myparam', [0, 1])]
    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=3,
                           n=(3, 6, 9),
                           P=0.5,
                           verbose=True)

    scheduler = sherpa.schedulers.LocalScheduler()

    filename = os.path.join(tempdir, "test.py")
    with open(filename, 'w') as f:
        f.write(testscript)

    try:
        results = sherpa.optimize(parameters=parameters,
                                  algorithm=gs,
                                  lower_is_better=True,
                                  command="python {}".format(filename),
                                  output_dir=tempdir,
                                  scheduler=scheduler,
                                  max_concurrent=2,
                                  verbose=1,
                                  disable_dashboard=True)

    finally:
        shutil.rmtree(tempdir)
예제 #20
0
def get_study():
    parameters = [
        sherpa.Ordinal("conv3d_num_filters", [16, 32, 64, 128]),
        sherpa.Ordinal("conv3d_kernel_size", [(3, 5, 5), (5, 5, 5), (5, 7, 7)]),
        sherpa.Discrete("encoder_rnn_num_layers", [1, 3]),
        sherpa.Continuous("encoder_rnn_dropout", [0.0, 0.3]),
        sherpa.Continuous("lr", [2e-4, 4e-3], scale="log"),
    ]
    algorithm = sherpa.algorithms.RandomSearch(max_num_trials=16)
    stopping_rule = sherpa.algorithms.MedianStoppingRule(min_iterations=8, min_trials=4)
    return sherpa.Study(
        parameters=parameters,
        algorithm=algorithm,
        lower_is_better=True,
        stopping_rule=stopping_rule,
    )
예제 #21
0
def test_1d():
    def obj_func(x):
        # Global maximum of 4 is at x=4
        return 4. * numpy.exp(-(x - 4.)**2 / 10.) * numpy.cos(1.5 *
                                                              (x - 4.))**2

    parameters = [sherpa.Continuous('x1', [0., 7.])]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=50,
                                   model_type='GP',
                                   acquisition_type='EI')
    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=False,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(trial.parameters['x1'])
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    print(rval)
    assert numpy.isclose(rval['Objective'], 4., atol=0.2)
예제 #22
0
def test_optimize_mix():
    # Test a mixture of these
    parameters = [
        sherpa.Continuous('continuous', [
            0.,
            1,
        ]),
        sherpa.Choice('choice', [1, 2, 3, 4]),
        sherpa.Choice('choice2', [1, 2, 3]),
        sherpa.Discrete('discrete', [0, 100])
    ]

    # Function should have maximum: [0.5, 0, 0, 0, 1, 0, 0, 1, 0.5]
    # Maximum should be 7
    def fun(x):
        cont = -1. * (x[0] - 0.5)**2
        ch = np.dot(x[1:5], np.array([1, 2, 3, 4]))
        ch2 = np.dot(x[5:8], np.array([1, 2, 3]))
        discr = -1. * (x[-1] - 0.5)**2
        return cont + ch + ch2 + discr

    bayesian_optimization = BayesianOptimization()
    bayesian_optimization.num_candidates = 100

    candidates = bayesian_optimization._generate_candidates(parameters)
    X = bayesian_optimization._to_design(candidates, parameters)

    Xoptimized, fun_values = bayesian_optimization._maximize(X, fun)
    # print(Xoptimized)
    # print(fun_values)
    print(Xoptimized[fun_values.argmax()])
    print(fun_values.max())
    assert np.all(
        np.isclose(Xoptimized[fun_values.argmax()],
                   np.array([0.5, 0., 0., 0., 1., 0., 0., 1., 0.5])))
예제 #23
0
def test_repeat_grid_search():
    parameters = [
        sherpa.Choice('a', [1, 2]),
        sherpa.Choice('b', ['a', 'b']),
        sherpa.Continuous('c', [1, 4])
    ]

    alg = sherpa.algorithms.GridSearch()
    alg = sherpa.algorithms.Repeat(algorithm=alg, num_times=3)

    suggestion = alg.get_suggestion(parameters)
    seen = list()

    while suggestion:
        seen.append((suggestion['a'], suggestion['b'], suggestion['c']))
        suggestion = alg.get_suggestion(parameters)

    expected_params = [(1, 'a', 2.0), (1, 'a', 3.0), (1, 'b', 2.0),
                       (1, 'b', 3.0), (2, 'a', 2.0), (2, 'a', 3.0),
                       (2, 'b', 2.0), (2, 'b', 3.0)]

    expected = list(
        itertools.chain.from_iterable(
            itertools.repeat(x, 3) for x in expected_params))

    print(sorted(expected))
    print(sorted(seen))

    assert sorted(expected) == sorted(seen)
예제 #24
0
def test_noisy_parabola():
    def f(x, sd=1):
        y = (x - 3)**2 + 10.
        if sd == 0:
            return y
        else:
            return y + numpy.random.normal(
                loc=0., scale=sd, size=numpy.array(x).shape)

    parameters = [sherpa.Continuous('x1', [0., 7.])]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=5,
                                   model_type='GP',
                                   acquisition_type='EI')
    rep = Repeat(algorithm=bayesian_optimization, num_times=3, agg=True)
    study = sherpa.Study(algorithm=rep,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        # print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = f(trial.parameters['x1'], sd=1)
        # print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')
    # rval = study.get_best_result()
    # print(rval)
    print(study.results.query("Status=='COMPLETED'"))
예제 #25
0
def test_optimize():
    # Test Continuous
    parameters = [sherpa.Continuous('continuous', [
        0.,
        1,
    ])]

    bayesian_optimization = BayesianOptimization()
    bayesian_optimization.num_candidates = 100

    candidates = bayesian_optimization._generate_candidates(parameters)
    X = bayesian_optimization._to_design(candidates, parameters)

    fun = lambda x: -1 * (x - 0.5)**2

    Xoptimized, fun_values = bayesian_optimization._maximize(X, fun)
    assert np.isclose(Xoptimized[fun_values.argmax()][0], 0.5)

    # Test Discrete
    parameters = [sherpa.Discrete('discrete', [0, 100])]

    bayesian_optimization = BayesianOptimization()
    bayesian_optimization.num_candidates = 100

    candidates = bayesian_optimization._generate_candidates(parameters)
    X = bayesian_optimization._to_design(candidates, parameters)

    fun = lambda x: -1. * (x - 0.5)**2
    Xoptimized, fun_values = bayesian_optimization._maximize(X, fun)
    assert np.isclose(Xoptimized[fun_values.argmax()][0], 0.5)
def test_overall_larger_is_better():
    parameters = [sherpa.Continuous('myparam', [0, 10]),
                  sherpa.Discrete('myparam2', [0, 10])]
    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=False,
                         disable_dashboard=True)

    for trial in study:
        print(trial.id, trial.parameters, "{}/{}".format(gs.k, gs.K[gs.t]),
              "{}/{}".format(gs.t, gs.T))

        study.add_observation(trial,
                              iteration=1,
                              objective=trial.parameters[
                                            'myparam'] + numpy.random.normal(
                                  scale=1.))
        study.finalize(trial)

    completed = study.results.query("Status == 'COMPLETED'")
    assert completed.myparam.max() in completed[completed.stage == 2].myparam.unique()
예제 #27
0
def test_design():
    parameters = [
        sherpa.Choice('choice', ['a', 'b', 'c', 'd']),
        sherpa.Continuous('continuous', [0., 0.4]),
        sherpa.Discrete('discrete', [0, 12])
    ]

    bayesian_optimization = BayesianOptimization()
    bayesian_optimization.num_candidates = 100

    candidates = bayesian_optimization._generate_candidates(parameters)
    assert len(candidates) == bayesian_optimization.num_candidates
    assert len(candidates.columns) == len(parameters)

    X = bayesian_optimization._to_design(candidates, parameters)
    assert X.shape == (bayesian_optimization.num_candidates, 6)
    for j in range(X.shape[1]):
        assert (X[:, j] >= 0.).all() and (X[:, j] <= 1.).all()

    df = bayesian_optimization._from_design(X)
    pd.testing.assert_frame_equal(df, candidates)

    row = bayesian_optimization._from_design(X[0])
    row_dict = row.iloc[0].to_dict()
    candidates_dict = candidates.iloc[0].to_dict()
    assert row_dict['choice'] == candidates_dict['choice']
    assert row_dict['discrete'] == candidates_dict['discrete']
    assert np.isclose(row_dict['continuous'], candidates_dict['continuous'])
예제 #28
0
def test_repeat_results_aggregation():
    parameters = [sherpa.Continuous('myparam', [0, 1])]

    class MyAlg(sherpa.algorithms.Algorithm):
        allows_repetition = True
        def get_suggestion(self, parameters, results, lower_is_better):
            if results is not None and len(results) > 0:
                print(results)
                assert 'ObjectiveStdErr' in results.columns
                assert 'ObjectiveVar' in results.columns
                assert (results.loc[:, 'Objective'] == 0.).all()
                exp_std_err = numpy.sqrt(numpy.var([-1,0,1],ddof=1)/(3-1))
                assert (numpy.isclose(results.loc[:, 'ObjectiveStdErr'], exp_std_err)).all()
            return {'myparam': numpy.random.random()}


    alg = MyAlg()
    gs = sherpa.algorithms.Repeat(algorithm=alg,
                                  num_times=3,
                                  agg=True)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)
    for trial in study:
        study.add_observation(trial,
                              iteration=1,
                              objective=trial.id%3-1)  # 1->-1, 2->0, 3->1, 4->-1, ...
        study.finalize(trial)
        print(study.results)
        if trial.id > 10:
            break
class TestLocalSearch:
    @pytest.mark.parametrize("parameter,seed,expected",
                             [(sherpa.Ordinal('p', [0, 1, 2, 3, 4]), {'p': 2}, [1, 3]),
                              (sherpa.Continuous('p', [0, 1]), {'p': 0.5}, [0.5*0.8, 0.5*1.2]),
                              (sherpa.Discrete('p', [0, 10]), {'p': 5}, [4, 6]),
                              (sherpa.Choice('p', [0, 1, 2, 3, 4]), {'p': 2}, [0, 1, 3, 4])])
    def test_seed_and_first_suggestion(self, parameter, seed, expected):
        study = get_local_search_study_lower_is_better([parameter],
                                                       seed)
        trial = study.get_suggestion()
        assert trial.parameters['p'] == seed['p']
        study.add_observation(trial, objective=trial.parameters['p'], iteration=1)
        study.finalize(trial)

        trial = study.get_suggestion()
        assert trial.parameters['p'] in expected

    @pytest.mark.parametrize("parameter,seed,expected",
                             [(sherpa.Ordinal('p', [0, 1, 2, 3, 4]), {'p': 2}, [0, 1]),
                              (sherpa.Continuous('p', [0, 1]), {'p': 0.5}, [0.5*(0.8), 0.5*(0.8)**2]),
                              (sherpa.Discrete('p', [0, 10]), {'p': 5}, [int(5*(0.8)), int(5*(0.8)**2)]),
                              (sherpa.Choice('p', [0, 1, 2]), {'p': 2}, [0])])
    def test_expected_value_after_three_iterations(self, parameter, seed, expected):
        study = get_local_search_study_lower_is_better([parameter],
                                                       seed)
        for trial in study:
            study.add_observation(trial, objective=trial.parameters['p'], iteration=1)
            study.finalize(trial)
            if trial.id == 3:
                break

        assert study.get_best_result()['Objective'] in expected

    @pytest.mark.parametrize("param1,seed1,param2,seed2", [(sherpa.Ordinal('p1', [0, 1, 2, 3, 4]), {'p1': 2},
                                                            sherpa.Continuous('p2', [0, 1]), {'p2': 0.5})])
    def test_only_one_parameter_is_perturbed_at_a_time(self, param1, seed1, param2, seed2):
        seed = dict(seed1, **seed2)
        study = get_local_search_study_lower_is_better([param1, param2],
                                                       seed=seed)
        trial = study.get_suggestion()
        study.add_observation(trial, objective=1, iteration=1)
        study.finalize(trial)

        trial = study.get_suggestion()
        assert not all(
            param_value != seed[param_name] for param_name, param_value in
            trial.parameters.items())
예제 #30
0
def test_1d_minimize():
    def obj_func(x):
        # Global maximum of 4 is at x=4
        return -4. * numpy.exp(-(x - 4.)**2 / 10.) * numpy.cos(1.5 *
                                                               (x - 4.))**2

    parameters = [sherpa.Continuous('x1', [0., 7.])]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=12,
                                   model_type='GP',
                                   acquisition_type='EI',
                                   initial_data_points=[{
                                       'x1': 2
                                   }, {
                                       'x1': 5
                                   }],
                                   num_initial_data_points=2)

    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(trial.parameters['x1'])
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    print(rval)

    # bounds = [{'name': 'x', 'type': 'continuous', 'domain': (0, 7)}]
    # Xinit = numpy.array([2, 5]).reshape(-1, 1)
    # yinit = obj_func(Xinit)
    # myBopt = gpyopt_package.methods.BayesianOptimization(f=obj_func,
    #                                              # function to optimize
    #                                              domain=bounds,
    #                                              # box-constraints of the problem
    #                                              acquisition_type='EI',
    #                                              X=Xinit,
    #                                              y=yinit,
    #                                              initial_design_numdata=0,
    #                                              initial_design_type='random',
    #                                              evaluator_type='local_penalization',
    #                                              batch_size=1,
    #                                              maximize=True,
    #                                              exact_feval=False)
    # # Run the optimization
    # max_iter = 10  # evaluation budget
    # max_time = 60  # time budget
    # eps = 10e-6  # Minimum allows distance between the las two observations
    #
    # myBopt.run_optimization(max_iter, max_time, eps)
    # print(myBopt.get_evaluations())

    assert numpy.isclose(rval['x1'], 4., atol=0.1)