def test_pbt_ordinal():
    parameters = [sherpa.Ordinal(name='param_a', range=[-1, 0, 1])]

    algorithm = sherpa.algorithms.PopulationBasedTraining(num_generations=2,
                                                          population_size=10)

    study = sherpa.Study(parameters=parameters,
                         algorithm=algorithm,
                         lower_is_better=True,
                         disable_dashboard=True)

    for _ in range(10):
        trial = study.get_suggestion()
        print("Trial-ID={}".format(trial.id))
        print(trial.parameters)
        print()
        study.add_observation(trial=trial, iteration=1, objective=trial.parameters['param_a']*0.1)
        study.finalize(trial=trial,
                       status='COMPLETED')

    for _ in range(10):
        trial = study.get_suggestion()
        print("Trial-ID={}".format(trial.id))
        print(trial.parameters)
        print()
        assert trial.parameters['param_a'] in (-1, 0, 1)
        study.add_observation(trial=trial, iteration=1, objective=trial.parameters['param_a']*0.1)
        study.finalize(trial=trial,
                       status='COMPLETED')
Exemplo n.º 2
0
def test_convex():
    def convex(x1, x2):
        # Global minimum is at x1=3., x2=5.
        return (x1 - 3.)**2 + (x2 - 5.)**2 + 0.1

    parameters = [
        sherpa.Continuous('x1', [-5., 10.]),
        sherpa.Continuous('x2', [0., 15.])
    ]

    bayesian_optimization = BayesianOptimization(num_grid_points=2,
                                                 max_num_trials=50,
                                                 fine_tune=True)
    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = convex(trial.parameters['x1'], trial.parameters['x2'])
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')

    rval = study.get_best_result()
    print(rval)
    assert np.isclose(rval['Objective'], 0.1, rtol=1e-3)
Exemplo n.º 3
0
def test_1d():
    def obj_func(x):
        # Global maximum of 4 is at x=4
        return 4. * np.exp(-(x - 4.) ** 2 / 10.) * np.cos(1.5 * (x - 4.)) ** 2

    parameters = [sherpa.Continuous('x1', [0., 7.])]

    bayesian_optimization = BayesianOptimization(num_grid_points=5,
                                                 max_num_trials=50,
                                                 fine_tune=False)
    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=False,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(trial.parameters['x1'])
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial,
                              iteration=1,
                              objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    assert np.isclose(rval['Objective'], 4.)
Exemplo n.º 4
0
def test_1d():
    def obj_func(x):
        # Global maximum of 4 is at x=4
        return 4. * numpy.exp(-(x - 4.)**2 / 10.) * numpy.cos(1.5 *
                                                              (x - 4.))**2

    parameters = [sherpa.Continuous('x1', [0., 7.])]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=50,
                                   model_type='GP',
                                   acquisition_type='EI')
    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=False,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(trial.parameters['x1'])
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    print(rval)
    assert numpy.isclose(rval['Objective'], 4., atol=0.2)
Exemplo n.º 5
0
def test_user_code_fails(test_dir):

    tempdir = test_dir

    parameters = [
        sherpa.Choice(name="param_a", range=[1, 2, 3]),
        sherpa.Continuous(name="param_b", range=[0, 1])
    ]

    algorithm = sherpa.algorithms.RandomSearch(max_num_trials=3)

    study = sherpa.Study(parameters=parameters,
                         algorithm=algorithm,
                         lower_is_better=True)
    db_port = 27000
    scheduler = sherpa.schedulers.LocalScheduler()

    filename = os.path.join(tempdir, "test.py")
    with open(filename, 'w') as f:
        f.write(testscript2)

    with pytest.warns(RuntimeWarning):
        results = sherpa.optimize(filename=filename,
                                  study=study,
                                  output_dir=tempdir,
                                  scheduler=scheduler,
                                  max_concurrent=1,
                                  db_port=db_port)
Exemplo n.º 6
0
def sherpaopt():
    train, targ, test = data_prep()
    sigb, sigw, layers = 0.35204672, 2.1220488, 87
    gp = DNNGP(train,targ[1][:-targ[2]],test,sigb,sigw,layers)

    t0 = time.time()
    parameters = [sherpa.Discrete(name='layers',range=[2,100]),
                  sherpa.Continuous(name='bias',range=[0,5]),
                  sherpa.Continuous(name='weight',range=[.1,2.09])]
    bayesopt = sherpa.algorithms.PopulationBasedTraining(4)
    stop = sherpa.algorithms.MedianStoppingRule(0,1)
    study = sherpa.Study(parameters=parameters,
                         algorithm = bayesopt,
                         stopping_rule=stop,
                         lower_is_better=True,
                         disable_dashboard=True)
    
    train = study.get_suggestion()
    
    for trial in study:
        print('still going: ',trial.id)
        for iteration in range(1):
            error = GPerror(gp,targ,trial.parameters["bias"],
                            trial.parameters["weight"],
                            trial.parameters["layers"])
            study.add_observation(trial=trial,
                                  iteration=iteration,
                                  objective=error)
        study.finalize(trial)
        if(trial.id == 100): #set to around ~200
            break
    print(study.get_best_result())
    print("Time Optimizing: ", (time.time() - t0)/60, " minutes")
Exemplo n.º 7
0
def test_noisy_parabola():
    def f(x, sd=1):
        y = (x - 3)**2 + 10.
        if sd == 0:
            return y
        else:
            return y + numpy.random.normal(
                loc=0., scale=sd, size=numpy.array(x).shape)

    parameters = [sherpa.Continuous('x1', [0., 7.])]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=5,
                                   model_type='GP',
                                   acquisition_type='EI')
    rep = Repeat(algorithm=bayesian_optimization, num_times=3, agg=True)
    study = sherpa.Study(algorithm=rep,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        # print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = f(trial.parameters['x1'], sd=1)
        # print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')
    # rval = study.get_best_result()
    # print(rval)
    print(study.results.query("Status=='COMPLETED'"))
def get_local_search_study_lower_is_better(params, seed):
    alg = sherpa.algorithms.LocalSearch(seed_configuration=seed)

    study = sherpa.Study(parameters=params, algorithm=alg,
                         lower_is_better=True,
                         disable_dashboard=True)
    return study
Exemplo n.º 9
0
def test_study():
    mock_algorithm = mock.MagicMock()
    mock_algorithm.get_suggestion.return_value = {'a': 1, 'b': 2}
    mock_stopping_rule = mock.MagicMock()

    s = sherpa.Study(parameters=get_test_parameters(),
                     algorithm=mock_algorithm,
                     stopping_rule=mock_stopping_rule,
                     lower_is_better=True)

    t = s.get_suggestion()
    assert t.id == 1
    assert t.parameters == {'a': 1, 'b': 2}
    mock_algorithm.get_suggestion.assert_called_with(s.parameters, s.results,
                                                     s.lower_is_better)

    s.add_observation(trial=t, iteration=1, objective=0.1,
                      context={'other_metrics': 0.2})
    s.add_observation(trial=t, iteration=2, objective=0.01,
                      context={'other_metrics': 0.02})
    s.finalize(trial=t, status='COMPLETED')

    expected_df = pandas.DataFrame(collections.OrderedDict(
        [('Trial-ID', [1, 1, 1]),
         ('Status', ['INTERMEDIATE', 'INTERMEDIATE', 'COMPLETED']),
         ('Iteration', [1, 2, 2]),
         ('a', [1, 1, 1]),
         ('b', [2, 2, 2]),
         ('Objective', [0.1, 0.01, 0.01]),
         ('other_metrics', [0.2, 0.02, 0.02])]
    ))

    assert s.results.equals(expected_df)
def test_wait():
    parameters = [sherpa.Continuous('myparam', [0, 1])]
    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for _ in range(10*3 - 1):
        trial = study.get_suggestion()
        print(trial.id, trial.parameters, "{}/{}".format(gs.k, gs.K[gs.t]),
              "{}/{}".format(gs.t, gs.T))
        study.add_observation(trial,
                              iteration=1,
                              objective=trial.parameters['myparam'] + numpy.random.normal(
                                  scale=0.01))
        study.finalize(trial)

    trial = study.get_suggestion()
    assert trial.parameters['stage'] == 1

    waittrial = study.get_suggestion()
    assert waittrial == 'WAIT'
    study.add_observation(trial,
                          iteration=1,
                          objective=trial.parameters['myparam'] + numpy.random.normal(
                              scale=0.01))
    study.finalize(trial)

    trial = study.get_suggestion()
    assert trial.parameters['stage'] == 2
Exemplo n.º 11
0
def test_repeat_results_aggregation():
    parameters = [sherpa.Continuous('myparam', [0, 1])]

    class MyAlg(sherpa.algorithms.Algorithm):
        allows_repetition = True
        def get_suggestion(self, parameters, results, lower_is_better):
            if results is not None and len(results) > 0:
                print(results)
                assert 'ObjectiveStdErr' in results.columns
                assert 'ObjectiveVar' in results.columns
                assert (results.loc[:, 'Objective'] == 0.).all()
                exp_std_err = numpy.sqrt(numpy.var([-1,0,1],ddof=1)/(3-1))
                assert (numpy.isclose(results.loc[:, 'ObjectiveStdErr'], exp_std_err)).all()
            return {'myparam': numpy.random.random()}


    alg = MyAlg()
    gs = sherpa.algorithms.Repeat(algorithm=alg,
                                  num_times=3,
                                  agg=True)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)
    for trial in study:
        study.add_observation(trial,
                              iteration=1,
                              objective=trial.id%3-1)  # 1->-1, 2->0, 3->1, 4->-1, ...
        study.finalize(trial)
        print(study.results)
        if trial.id > 10:
            break
def test_results_aggregation():
    parameters = [sherpa.Continuous('myparam', [0, 1])]

    class MyAlg(Algorithm):
        def get_suggestion(self, parameters, results, lower_is_better):
            if results is not None and len(results) > 0:
                print(results)
                assert 'ObjectiveStdErr' in results.columns
                assert (results.loc[:, 'Objective'] == 0.).all()
                exp_std_err = numpy.sqrt(numpy.var([-1,0,1],ddof=1)/(3-1))
                assert (numpy.isclose(results.loc[:, 'ObjectiveStdErr'], exp_std_err)).all()
            return {'myparam': numpy.random.random()}


    alg = MyAlg()
    gs = SequentialTesting(algorithm=alg,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)
    for trial in study:
        study.add_observation(trial,
                              iteration=1,
                              objective=trial.id%3-1)
        study.finalize(trial)
        print(study.results)
Exemplo n.º 13
0
    def __init__(self, pop, log_dir):
        parameters = [
            # Auto Encoder Parameters
            sherpa.Discrete('hidden_units_AE', range=[2, 8]),
            sherpa.Discrete('lstm_units_AE', range=[2, 16]),
            sherpa.Continuous('dropout_AE', range=[0, 0.5]),
            sherpa.Continuous('lr_AE', range=[0.000001, 0.1], scale='log'),
            # Detector Parameters
            sherpa.Discrete('hidden_units_DET', range=[2, 8]),
            sherpa.Discrete('lstm_units_DET', range=[2, 8]),
            sherpa.Continuous('dropout_DET', range=[0, 0.5]),
            sherpa.Continuous('leaky_alpha_DET', range=[0.01, 0.4]),
            sherpa.Continuous('lr_DET', range=[0.000001, 0.1], scale='log'),
            # GAN parameters
            sherpa.Continuous('lr_GAN', range=[0.000001, 0.1], scale='log')
        ]

        # Set an evolutionary algorithm for parameter search, enforce early stopping
        algorithm = sherpa.algorithms.PopulationBasedTraining(
            population_size=pop)
        rule = sherpa.algorithms.MedianStoppingRule(min_iterations=5,
                                                    min_trials=1)
        self.study = sherpa.Study(parameters,
                                  algorithm,
                                  lower_is_better=True,
                                  stopping_rule=rule,
                                  dashboard_port=9800)

        self.logs_dir = log_dir
def test_bayes_opt():
    def f(x, sd=1):
        y = (x - 3) ** 2 + 10.
        if sd == 0:
            return y
        else:
            return y + numpy.random.normal(loc=0., scale=sd,
                                           size=numpy.array(x).shape)

    parameters = [sherpa.Continuous('x', [1, 6])]

    alg = GPyOpt(max_num_trials=10)
    gs = SequentialTesting(algorithm=alg,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)
    for trial in study:
        study.add_observation(trial,
                              iteration=1,
                              objective=f(trial.parameters['x']))
        study.finalize(trial)
        print(study.results)
def test_overall_larger_is_better():
    parameters = [sherpa.Continuous('myparam', [0, 10]),
                  sherpa.Discrete('myparam2', [0, 10])]
    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=False,
                         disable_dashboard=True)

    for trial in study:
        print(trial.id, trial.parameters, "{}/{}".format(gs.k, gs.K[gs.t]),
              "{}/{}".format(gs.t, gs.T))

        study.add_observation(trial,
                              iteration=1,
                              objective=trial.parameters[
                                            'myparam'] + numpy.random.normal(
                                  scale=1.))
        study.finalize(trial)

    completed = study.results.query("Status == 'COMPLETED'")
    assert completed.myparam.max() in completed[completed.stage == 2].myparam.unique()
Exemplo n.º 16
0
def test_branin():
    def branin(x1, x2):
        # Global minimum 0.397887 at (-pi, 12.275), (pi, 2.275),
        # and (9.42478, 2.475)
        a = 1
        b = 5.1/(4*math.pi**2)
        c = 5/math.pi
        r = 6
        s = 10
        t = 1/(8*math.pi)
        return a*(x2 - b*x1**2 + c*x1 - r)**2 + s*(1-t)*math.cos(x1)+s

    parameters = [sherpa.Continuous('x1', [-5., 10.]),
                  sherpa.Continuous('x2', [0., 15.])]

    bayesian_optimization = BayesianOptimization(num_grid_points=2, max_num_trials=50, fine_tune=True)
    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = branin(trial.parameters['x1'], trial.parameters['x2'])
        print("Branin-Hoo: {}".format(fval))
        study.add_observation(trial=trial,
                              iteration=1,
                              objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    print(study.get_best_result())
    assert np.isclose(rval['Objective'], 0.397887, rtol=1e-3)
Exemplo n.º 17
0
def test_local_search():
    parameters = [
        sherpa.Continuous('cont', [0, 1]),
        sherpa.Ordinal('ord', [1, 2, 3])
    ]

    seed = {'cont': 0.5, 'ord': 2}
    alg = sherpa.algorithms.LocalSearch(seed_configuration=seed)

    study = sherpa.Study(parameters=parameters,
                         algorithm=alg,
                         lower_is_better=True,
                         disable_dashboard=True)

    def mock_objective(p):
        return p['cont'] / p['ord']

    # Initial suggestion.
    t = study.get_suggestion()
    tlist = [t]
    tbest = t
    assert t.parameters == seed
    study.add_observation(t,
                          objective=mock_objective(t.parameters),
                          iteration=1)
    study.finalize(t)

    # Perform a suggestion.
    t = study.get_suggestion()
    tlist.append(t)
    if mock_objective(t.parameters) < mock_objective(tbest.parameters):
        tbest = t
    study.add_observation(t,
                          objective=mock_objective(t.parameters),
                          iteration=1)
    study.finalize(t)
    if t.parameters['ord'] == 2:
        assert t.parameters['cont'] != 0.5
        assert abs(t.parameters['cont'] - 0.5) < 0.2
    else:
        assert t.parameters['cont'] == 0.5
        t.parameters['ord'] in [1, 3]

    # Do more iterations.
    for i in range(50):
        t = study.get_suggestion()
        #print(t.parameters)
        assert t.parameters['ord'] in [1, 2, 3]
        assert t.parameters['cont'] >= 0.0
        assert t.parameters['cont'] <= 1.0
        # All new suggestions should be based on tbest.
        assert t.parameters['ord'] == tbest.parameters['ord'] \
               or t.parameters['cont'] == tbest.parameters['cont']
        tlist.append(t)
        if mock_objective(t.parameters) < mock_objective(tbest.parameters):
            tbest = t
        study.add_observation(t,
                              objective=mock_objective(t.parameters),
                              iteration=1)
        study.finalize(t)
Exemplo n.º 18
0
def test_1d_minimize():
    def obj_func(x):
        # Global maximum of 4 is at x=4
        return -4. * numpy.exp(-(x - 4.)**2 / 10.) * numpy.cos(1.5 *
                                                               (x - 4.))**2

    parameters = [sherpa.Continuous('x1', [0., 7.])]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=12,
                                   model_type='GP',
                                   acquisition_type='EI',
                                   initial_data_points=[{
                                       'x1': 2
                                   }, {
                                       'x1': 5
                                   }],
                                   num_initial_data_points=2)

    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(trial.parameters['x1'])
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    print(rval)

    # bounds = [{'name': 'x', 'type': 'continuous', 'domain': (0, 7)}]
    # Xinit = numpy.array([2, 5]).reshape(-1, 1)
    # yinit = obj_func(Xinit)
    # myBopt = gpyopt_package.methods.BayesianOptimization(f=obj_func,
    #                                              # function to optimize
    #                                              domain=bounds,
    #                                              # box-constraints of the problem
    #                                              acquisition_type='EI',
    #                                              X=Xinit,
    #                                              y=yinit,
    #                                              initial_design_numdata=0,
    #                                              initial_design_type='random',
    #                                              evaluator_type='local_penalization',
    #                                              batch_size=1,
    #                                              maximize=True,
    #                                              exact_feval=False)
    # # Run the optimization
    # max_iter = 10  # evaluation budget
    # max_time = 60  # time budget
    # eps = 10e-6  # Minimum allows distance between the las two observations
    #
    # myBopt.run_optimization(max_iter, max_time, eps)
    # print(myBopt.get_evaluations())

    assert numpy.isclose(rval['x1'], 4., atol=0.1)
Exemplo n.º 19
0
def test_pbt():
    parameters = [sherpa.Continuous(name='param_a', range=[0, 1])]

    algorithm = sherpa.algorithms.PopulationBasedTraining(
        population_size=20, parameter_range={'param_a': [0., 1.2]})

    study = sherpa.Study(parameters=parameters,
                         algorithm=algorithm,
                         lower_is_better=True,
                         disable_dashboard=True)

    for _ in range(20):
        trial = study.get_suggestion()
        print("Trial-ID={}".format(trial.id))
        print(trial.parameters)
        print()
        study.add_observation(trial=trial,
                              iteration=1,
                              objective=trial.id * 0.1)
        study.finalize(trial=trial, status='COMPLETED')

    for _ in range(20):
        trial = study.get_suggestion()
        print("Trial-ID={}".format(trial.id))
        print(trial.parameters)
        print()
        parent_param = study.results.loc[study.results['Trial-ID'] == int(
            trial.parameters['load_from'])]['param_a'].iloc[0]
        print(parent_param)
        assert (trial.parameters['param_a'] == 0.8 * parent_param
                or trial.parameters['param_a'] == 1.0 * parent_param
                or trial.parameters['param_a'] == 1.2 * parent_param
                or trial.parameters['param_a'] == 0.
                or trial.parameters['param_a'] == 1.2)
        assert int(trial.parameters['load_from']) <= 10
        study.add_observation(trial=trial,
                              iteration=1,
                              objective=trial.id * 0.1)
        study.finalize(trial=trial, status='COMPLETED')

    for _ in range(20):
        trial = study.get_suggestion()
        print("Trial-ID={}".format(trial.id))
        print(trial.parameters)
        print()
        parent_param = study.results.loc[study.results['Trial-ID'] == int(
            trial.parameters['load_from'])]['param_a'].iloc[0]
        assert (trial.parameters['param_a'] == 0.8 * parent_param
                or trial.parameters['param_a'] == 1.0 * parent_param
                or trial.parameters['param_a'] == 1.2 * parent_param
                or trial.parameters['param_a'] == 0.
                or trial.parameters['param_a'] == 1.2)
        # assert int(trial.parameters['load_from']) <= 27
        study.add_observation(trial=trial,
                              iteration=1,
                              objective=trial.id * 0.1)
        study.finalize(trial=trial, status='COMPLETED')
Exemplo n.º 20
0
def get_test_study():
    mock_algorithm = mock.MagicMock()
    mock_algorithm.get_suggestion.return_value = {'a': 1, 'b': 2}
    mock_stopping_rule = mock.MagicMock()

    s = sherpa.Study(parameters=get_test_parameters(),
                     algorithm=mock_algorithm,
                     stopping_rule=mock_stopping_rule,
                     lower_is_better=True)

    return s
Exemplo n.º 21
0
def get_mock_study():
    mock_algorithm = mock.MagicMock()
    mock_algorithm.get_suggestion.return_value = {'a': 1, 'b': 2}
    mock_stopping_rule = mock.MagicMock()

    return sherpa.Study(parameters=[
        sherpa.Discrete('a', [1, 2]),
        sherpa.Choice('b', [2, 5, 7])
    ],
                        algorithm=mock_algorithm,
                        stopping_rule=mock_stopping_rule,
                        lower_is_better=True,
                        disable_dashboard=True)
Exemplo n.º 22
0
def test_get_best_result():
    parameters = [sherpa.Choice('a', [1,2,3])]
    gs = sherpa.algorithms.GridSearch()
    study = sherpa.Study(parameters=parameters, algorithm=gs,
                         lower_is_better=True,
                         disable_dashboard=True)

    objectives = [1.1,1.2,1.3]

    for obj, trial in zip(objectives, study):
        study.add_observation(trial, objective=obj)
        study.finalize(trial)

    assert study.get_best_result()['a'] == 1
Exemplo n.º 23
0
def test_repeat_get_best_result_called_midway():
    parameters = [sherpa.Choice('a', [1,2,3])]
    gs = sherpa.algorithms.GridSearch()
    gs = sherpa.algorithms.Repeat(algorithm=gs, num_times=3)
    study = sherpa.Study(parameters=parameters, algorithm=gs,
                         lower_is_better=True,
                         disable_dashboard=True)

    objectives = [2.1,2.2,2.3, 9., 0.1, 9.1, 1.1,1.2,1.3]
    expected = [None, None, 1, 1, 1, 1, 1, 1, 3]

    for exp, obj, trial in zip(expected, objectives, study):
        study.add_observation(trial, objective=obj)
        study.finalize(trial)
        assert study.get_best_result().get('a') == exp
Exemplo n.º 24
0
def test_chain():
    parameters = [sherpa.Continuous('a', [0, 1]),
                  sherpa.Choice('b', ['x', 'y', 'z'])]
    algorithm = sherpa.algorithms.Chain(algorithms=[sherpa.algorithms.GridSearch(num_grid_points=2),
                                                    sherpa.algorithms.RandomSearch(max_num_trials=10)])
    study = sherpa.Study(parameters=parameters, algorithm=algorithm,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        if trial.id < 7:
            assert trial.parameters['a'] in [0, 1]
            assert trial.parameters['b'] == ['x', 'y', 'z'][trial.id%3-1]
        else:
            assert trial.parameters['a'] not in [0, 1]
Exemplo n.º 25
0
def get_study():
    parameters = [
        sherpa.Ordinal("conv3d_num_filters", [16, 32, 64, 128]),
        sherpa.Ordinal("conv3d_kernel_size", [(3, 5, 5), (5, 5, 5), (5, 7, 7)]),
        sherpa.Discrete("encoder_rnn_num_layers", [1, 3]),
        sherpa.Continuous("encoder_rnn_dropout", [0.0, 0.3]),
        sherpa.Continuous("lr", [2e-4, 4e-3], scale="log"),
    ]
    algorithm = sherpa.algorithms.RandomSearch(max_num_trials=16)
    stopping_rule = sherpa.algorithms.MedianStoppingRule(min_iterations=8, min_trials=4)
    return sherpa.Study(
        parameters=parameters,
        algorithm=algorithm,
        lower_is_better=True,
        stopping_rule=stopping_rule,
    )
Exemplo n.º 26
0
def test_mixed_dtype():
    algorithm = GPyOpt(max_num_trials=4)
    parameters = [
        sherpa.Choice('param_int', [0, 1]),
        sherpa.Choice('param_float', [0.1, 1.1]),
    ]
    study = sherpa.Study(
        parameters=parameters,
        algorithm=algorithm,
        lower_is_better=True,
        disable_dashboard=True,
    )
    for trial in study:
        study.add_observation(trial, iteration=0, objective=0)
        study.finalize(trial)
    assert type(trial.parameters['param_int']) == int
    assert type(trial.parameters['param_float']) == float
Exemplo n.º 27
0
def test_genetic():
    """
    Since genetic algorithms are stochastic we will check for average improvements while testing new configurations
    """
    parameters = [
        sherpa.Ordinal(name='param_a',
                       range=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
        sherpa.Ordinal(
            name='param_b',
            range=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
        sherpa.Ordinal(name='param_c',
                       range=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
    ]

    algorithm = sherpa.algorithms.Genetic()

    study = sherpa.Study(parameters=parameters,
                         algorithm=algorithm,
                         lower_is_better=False,
                         disable_dashboard=True)
    mean_values = []
    for _ in range(500):
        results = study.results
        if results.shape[0] > 0:
            results = results[results['Status'] == 'COMPLETED']
            mean_values.append(results['Objective'].mean())
        trial = study.get_suggestion()
        print("Trial-ID={}".format(trial.id))
        print(trial.parameters)
        print()
        study.add_observation(trial=trial,
                              iteration=1,
                              objective=trial.parameters['param_a'] * 0.1 +
                              trial.parameters['param_c'] * 0.1)
        study.finalize(trial=trial, status='COMPLETED')
    ascending = 0
    for pos in range(len(mean_values) - 1):
        if mean_values[pos + 1] > mean_values[pos]:
            ascending += 1
    print(ascending / len(mean_values))
    assert ascending / len(
        mean_values
    ) > 0.7, "At least 70% of times we add a new result we must improve the average Objective"

    results = results[results['Status'] == 'COMPLETED']
Exemplo n.º 28
0
    def build(self, model, f_in=None, f_mod=None, f_log=True, f_stats=True):
        f_in = models.path(model, "train.in") if not f_in else f_in
        f_mod = models.path(model, "model.%s" %
                            self.ext()) if not f_mod else f_mod
        if f_log is True: f_log = models.path(model, "train.log")
        f_stats = False  # TODO

        algorithm = sherpa.algorithms.GPyOpt()
        study = sherpa.Study(parameters=PARAMETERS,
                             algorithm=algorithm,
                             lower_is_better=True)
        best_obj = None
        results = {}
        pids = []

        ##log.disable()
        start = time.time()
        for trial in study:
            print(trial)
            (obj, context, pid, result, learner,
             model0) = self.observe(trial.parameters, model, **self.crossval)
            print("observed.")
            if (not best_obj) or obj < best_obj:
                best_obj = obj
                best_learner = learner
                best_model = model0
            results.update(result)
            pids.append(pid)
            #redirect.finish(*redir)
            ##log.enable()
            log.text("| %s = %s" % (learner.desc(), obj))
            ##log.disable()
            #redir = redirect.start(f_log)
            study.add_observation(trial=trial, objective=obj, context=context)
            study.finalize(trial)
            if time.time() - start > self.tunetime:
                break
            print("done.")

        #redirect.finish(*redir)
        ##log.enable()
        os.system(
            "cp %s/model.%s %s" %
            (models.path(best_model), best_learner.ext(), models.path(model)))
        expres.dump.solved(pids=pids, results=results, **self.crossval)
Exemplo n.º 29
0
    def hyper_param(self, epochs):
        parameters = [
            sherpa.Continuous('learning_rate', [1e-4, 1e-2]),
            sherpa.Discrete('num_units', [32, 128]),
            sherpa.Choice('activation', ['relu', 'adam', 'sigmoid'])
        ]
        algorithm = bayesian_optimization.GPyOpt(max_num_trials=50)
        study = sherpa.Study(parameters=parameters,
                             algorithm=algorithm,
                             lower_is_better=False)
        x_test = self._label / 300
        y_test = self._label

        # Create model
        model = models.Sequential()
        model.add(
            layers.Embedding(self.totalwords,
                             64,
                             input_length=maxSequenceLen - 1))
        model.add(layers.LSTM(128))
        model.add(layers.Dense(self.totalwords, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        epochs = epochs
        for trial in study:
            lr = trial.parameters['learning_rate']
            num_units = trial.parameters['num_units']
            act = trial.parameters['activation']

            for i in range(epochs):
                model.fit(self.predictors, self._label, batch_size=self.batch)
                loss, accuracy = model.evaluate(x_test, y_test)
                study.add_observation(trial=trial,
                                      iteration=i,
                                      objective=accuracy,
                                      context={'loss': loss})

                if study.should_trial_stop(trial):
                    break
            study.finalize(trial=trial)
            print(study.get_best_result())
Exemplo n.º 30
0
def create_study(algorithm, parameters, lower_is_better=False, algo_params={}):
    #handle algorithm input
    if algorithm.__class__ == str:
        algorithm = dict_retrieve(SHERPA_ALGORITHM_MAPPER, algorithm)
    elif isinstance(algorithm, sherpa.algorithms.core.Algorithm):
        #keep value
        pass
    else:
        raise ValueError(
            'algorithm must be of type sherpa.algorithms.core.Algorithm or Str (bayes or random)'
        )
    print(parameters)
    study = sherpa.Study(
        parameters=parameters,
        algorithm=algorithm(**algo_params),
        lower_is_better=lower_is_better,
        disable_dashboard=
        True  #dashboard disables since it doesnt work on windows
    )
    return study