def test_get_best_result_sample_lower():
    parameters = [sherpa.Choice('a', [0, 1]),
                  sherpa.Choice('b', [3, 4])]

    results_df = pandas.DataFrame(collections.OrderedDict(
        [('Trial-ID', list(range(1, 13))),
         ('Status', [sherpa.TrialStatus.COMPLETED] * 12),
         ('stage', [1] * 8 + [2] * 4),
         ('a', [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]),
         ('b', [3, 3, 4, 4, 3, 3, 4, 4, 3, 3, 4, 4]),
         ('Objective', [1., 1.1, 2.1, 2.2, 5., 5.1, 6., 6.1, 1., 1.1, 2.1, 2.2])]
    ))

    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=4,
                           n=(3, 6, 9),
                           P=0.5,
                           sample_best=True)

    s = set()
    for _ in range(20):
        best_config = gs.get_best_result(parameters, results_df,
                                         lower_is_better=True)
        s.add(tuple(sorted(best_config.items())))
        
    # check that over 20 tries each config gets sampled at least once 
    assert len(s) == 2
def test_repeat_grid_search():
    parameters = [sherpa.Choice('a', [1, 2]),
                  sherpa.Choice('b', ['a', 'b'])]

    alg = sherpa.algorithms.GridSearch()
    alg = sherpa.algorithms.Repeat(algorithm=alg, num_times=3)

    suggestion = alg.get_suggestion(parameters)
    seen = list()

    while suggestion != sherpa.AlgorithmState.DONE:
        seen.append((suggestion['a'], suggestion['b']))
        suggestion = alg.get_suggestion(parameters)

    expected_params = [(1, 'a'),
                       (1, 'b'),
                       (2, 'a'),
                       (2, 'b')]

    expected = list(itertools.chain.from_iterable(
        itertools.repeat(x, 3) for x in expected_params))

    print(sorted(expected))
    print(sorted(seen))

    assert sorted(expected) == sorted(seen)
def test_prep_df_for_linreg():
    parameters = [sherpa.Choice('a', [0, 1]),
                  sherpa.Choice('b', [3, 4])]

    configs = [{'a': 0, 'b': 3}, {'a': 0, 'b': 4}]

    results_df = pandas.DataFrame(collections.OrderedDict(
        [('Trial-ID', list(range(1, 9))),
         ('Status', [sherpa.TrialStatus.COMPLETED] * 8),
         ('stage', [1] * 8),
         ('a', [0, 0, 0, 0, 1, 1, 1, 1]),
         ('b', [3, 3, 4, 4, 3, 3, 4, 4]),
         ('Objective', [1., 1.1, 2., 2.1, 5., 5.1, 6., 6.1])]
    ))

    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=4,
                           n=(3, 6, 9),
                           P=0.5)

    r = gs._prep_df_for_linreg(parameters, results_df, configs, lower_is_better=True)
    cols = sorted(r.columns.tolist())

    r_expected = results_df.loc[results_df.Objective < 5.]
    r_expected = r_expected.assign(Rank=pandas.Series([1, 1, 2, 2]))
    print(r)
    print(r_expected)

    assert r.loc[:, cols].equals(r_expected.loc[:, cols])
def test_get_best_configs_larger_is_better():
    parameters = [sherpa.Choice('a', [0, 1]),
                  sherpa.Choice('b', [3, 4])]

    configs = [{'a': 0, 'b': 3},
               {'a': 0, 'b': 4},
               {'a': 1, 'b': 3},
               {'a': 1, 'b': 4}]

    results_df = pandas.DataFrame(collections.OrderedDict(
        [('Trial-ID', list(range(1, 9))),
         ('Status', [sherpa.TrialStatus.COMPLETED] * 8),
         ('stage', [1] * 8),
         ('a', [0, 0, 0, 0, 1, 1, 1, 1]),
         ('b', [3, 3, 4, 4, 3, 3, 4, 4]),
         ('Objective', [1., 1.1, 1.1, 1.2, 6., 6.1, 6., 6.1])]
    ))

    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=4,
                           n=(3, 6, 9),
                           P=0.5)

    best_configs = gs._get_best_configs(parameters, results_df, configs,
                                        lower_is_better=False,
                                        alpha=0.05)
    print(best_configs)
    print(configs[2:])
    assert best_configs == configs[2:]
示例#5
0
def test_optimize_mix():
    # Test a mixture of these
    parameters = [
        sherpa.Continuous('continuous', [
            0.,
            1,
        ]),
        sherpa.Choice('choice', [1, 2, 3, 4]),
        sherpa.Choice('choice2', [1, 2, 3]),
        sherpa.Discrete('discrete', [0, 100])
    ]

    # Function should have maximum: [0.5, 0, 0, 0, 1, 0, 0, 1, 0.5]
    # Maximum should be 7
    def fun(x):
        cont = -1. * (x[0] - 0.5)**2
        ch = np.dot(x[1:5], np.array([1, 2, 3, 4]))
        ch2 = np.dot(x[5:8], np.array([1, 2, 3]))
        discr = -1. * (x[-1] - 0.5)**2
        return cont + ch + ch2 + discr

    bayesian_optimization = BayesianOptimization()
    bayesian_optimization.num_candidates = 100

    candidates = bayesian_optimization._generate_candidates(parameters)
    X = bayesian_optimization._to_design(candidates, parameters)

    Xoptimized, fun_values = bayesian_optimization._maximize(X, fun)
    # print(Xoptimized)
    # print(fun_values)
    print(Xoptimized[fun_values.argmax()])
    print(fun_values.max())
    assert np.all(
        np.isclose(Xoptimized[fun_values.argmax()],
                   np.array([0.5, 0., 0., 0., 1., 0., 0., 1., 0.5])))
示例#6
0
def test_grid_search_repeat():
    parameters = [
        sherpa.Choice('a', [1, 2]),
        sherpa.Choice('b', ['a', 'b']),
        sherpa.Continuous('c', [1, 4])
    ]

    alg = sherpa.algorithms.GridSearch(repeat=3)

    suggestion = alg.get_suggestion(parameters)
    seen = list()

    while suggestion:
        seen.append((suggestion['a'], suggestion['b'], suggestion['c']))
        suggestion = alg.get_suggestion(parameters)

    expected_params = [(1, 'a', 2.0), (1, 'a', 3.0), (1, 'b', 2.0),
                       (1, 'b', 3.0), (2, 'a', 2.0), (2, 'a', 3.0),
                       (2, 'b', 2.0), (2, 'b', 3.0)]

    expected = list(
        itertools.chain.from_iterable(
            itertools.repeat(x, 3) for x in expected_params))

    print(sorted(expected))
    print(sorted(seen))

    assert sorted(expected) == sorted(seen)
 def get_sherpa_parameter(self, name, good_values_only=False):
     if good_values_only:
         return sherpa.Choice(name=name, range=self.good_values)
     elif self.is_categorical:
         return sherpa.Choice(name=name, range=self.categories)
     elif self.type == int:
         return sherpa.Discrete(name=name,
                                range=self.range,
                                scale='log' if self.log_scale else 'linear')
     else:
         return sherpa.Continuous(
             name=name,
             range=self.range,
             scale='log' if self.log_scale else 'linear')
示例#8
0
def test_random_search():
    parameters = [
        sherpa.Continuous('a', [0, 1]),
        sherpa.Choice('b', ['x', 'y', 'z'])
    ]
    rs = sherpa.algorithms.RandomSearch(max_num_trials=10, repeat=10)
    config_repeat = {}

    for i in range(10):
        config = rs.get_suggestion(parameters=parameters)
        assert config != config_repeat
        for j in range(9):
            config_repeat = rs.get_suggestion(parameters=parameters)
            assert config == config_repeat

    assert rs.get_suggestion(parameters=parameters) is None

    rs = sherpa.algorithms.RandomSearch(max_num_trials=10, repeat=1)
    last_config = {}

    for i in range(10):
        config = rs.get_suggestion(parameters=parameters)
        assert config != last_config
        last_config = config

    assert rs.get_suggestion(parameters=parameters) is None

    rs = sherpa.algorithms.RandomSearch()
    last_config = {}

    for _ in range(1000):
        config = rs.get_suggestion(parameters=parameters)
        assert config != last_config
        last_config = config
示例#9
0
def test_design():
    parameters = [
        sherpa.Choice('choice', ['a', 'b', 'c', 'd']),
        sherpa.Continuous('continuous', [0., 0.4]),
        sherpa.Discrete('discrete', [0, 12])
    ]

    bayesian_optimization = BayesianOptimization()
    bayesian_optimization.num_candidates = 100

    candidates = bayesian_optimization._generate_candidates(parameters)
    assert len(candidates) == bayesian_optimization.num_candidates
    assert len(candidates.columns) == len(parameters)

    X = bayesian_optimization._to_design(candidates, parameters)
    assert X.shape == (bayesian_optimization.num_candidates, 6)
    for j in range(X.shape[1]):
        assert (X[:, j] >= 0.).all() and (X[:, j] <= 1.).all()

    df = bayesian_optimization._from_design(X)
    pd.testing.assert_frame_equal(df, candidates)

    row = bayesian_optimization._from_design(X[0])
    row_dict = row.iloc[0].to_dict()
    candidates_dict = candidates.iloc[0].to_dict()
    assert row_dict['choice'] == candidates_dict['choice']
    assert row_dict['discrete'] == candidates_dict['discrete']
    assert np.isclose(row_dict['continuous'], candidates_dict['continuous'])
示例#10
0
def test_user_code_fails(test_dir):

    tempdir = test_dir

    parameters = [
        sherpa.Choice(name="param_a", range=[1, 2, 3]),
        sherpa.Continuous(name="param_b", range=[0, 1])
    ]

    algorithm = sherpa.algorithms.RandomSearch(max_num_trials=3)

    db_port = 27000
    scheduler = sherpa.schedulers.LocalScheduler()

    filename = os.path.join(tempdir, "test.py")
    with open(filename, 'w') as f:
        f.write(testscript2)

    with pytest.warns(RuntimeWarning):
        results = sherpa.optimize(filename=filename,
                                  lower_is_better=True,
                                  algorithm=algorithm,
                                  parameters=parameters,
                                  output_dir=tempdir,
                                  scheduler=scheduler,
                                  max_concurrent=1,
                                  db_port=db_port)
class TestLocalSearch:
    @pytest.mark.parametrize("parameter,seed,expected",
                             [(sherpa.Ordinal('p', [0, 1, 2, 3, 4]), {'p': 2}, [1, 3]),
                              (sherpa.Continuous('p', [0, 1]), {'p': 0.5}, [0.5*0.8, 0.5*1.2]),
                              (sherpa.Discrete('p', [0, 10]), {'p': 5}, [4, 6]),
                              (sherpa.Choice('p', [0, 1, 2, 3, 4]), {'p': 2}, [0, 1, 3, 4])])
    def test_seed_and_first_suggestion(self, parameter, seed, expected):
        study = get_local_search_study_lower_is_better([parameter],
                                                       seed)
        trial = study.get_suggestion()
        assert trial.parameters['p'] == seed['p']
        study.add_observation(trial, objective=trial.parameters['p'], iteration=1)
        study.finalize(trial)

        trial = study.get_suggestion()
        assert trial.parameters['p'] in expected

    @pytest.mark.parametrize("parameter,seed,expected",
                             [(sherpa.Ordinal('p', [0, 1, 2, 3, 4]), {'p': 2}, [0, 1]),
                              (sherpa.Continuous('p', [0, 1]), {'p': 0.5}, [0.5*(0.8), 0.5*(0.8)**2]),
                              (sherpa.Discrete('p', [0, 10]), {'p': 5}, [int(5*(0.8)), int(5*(0.8)**2)]),
                              (sherpa.Choice('p', [0, 1, 2]), {'p': 2}, [0])])
    def test_expected_value_after_three_iterations(self, parameter, seed, expected):
        study = get_local_search_study_lower_is_better([parameter],
                                                       seed)
        for trial in study:
            study.add_observation(trial, objective=trial.parameters['p'], iteration=1)
            study.finalize(trial)
            if trial.id == 3:
                break

        assert study.get_best_result()['Objective'] in expected

    @pytest.mark.parametrize("param1,seed1,param2,seed2", [(sherpa.Ordinal('p1', [0, 1, 2, 3, 4]), {'p1': 2},
                                                            sherpa.Continuous('p2', [0, 1]), {'p2': 0.5})])
    def test_only_one_parameter_is_perturbed_at_a_time(self, param1, seed1, param2, seed2):
        seed = dict(seed1, **seed2)
        study = get_local_search_study_lower_is_better([param1, param2],
                                                       seed=seed)
        trial = study.get_suggestion()
        study.add_observation(trial, objective=1, iteration=1)
        study.finalize(trial)

        trial = study.get_suggestion()
        assert not all(
            param_value != seed[param_name] for param_name, param_value in
            trial.parameters.items())
示例#12
0
def test_mixed_dtype():
    algorithm = GPyOpt(max_num_trials=4)
    parameters = [
        sherpa.Choice('param_int', [0, 1]),
        sherpa.Choice('param_float', [0.1, 1.1]),
    ]
    study = sherpa.Study(
        parameters=parameters,
        algorithm=algorithm,
        lower_is_better=True,
        disable_dashboard=True,
    )
    for trial in study:
        study.add_observation(trial, iteration=0, objective=0)
        study.finalize(trial)
    assert type(trial.parameters['param_int']) == int
    assert type(trial.parameters['param_float']) == float
示例#13
0
def parameters():
    parameters = [
        sherpa.Continuous('dropout', [0., 0.5]),
        sherpa.Continuous('lr', [1e-7, 1e-1], 'log'),
        sherpa.Choice('activation', ['relu', 'tanh', 'sigmoid']),
        sherpa.Discrete('num_hidden', [100, 300])
    ]
    return parameters
示例#14
0
def test_grid_search():
    parameters = [
        sherpa.Choice('a', [1, 2]),
        sherpa.Choice('b', ['a', 'b']),
        sherpa.Continuous('c', [1, 4])
    ]

    alg = sherpa.algorithms.GridSearch()

    suggestion = alg.get_suggestion(parameters)
    seen = set()

    while suggestion:
        seen.add((suggestion['a'], suggestion['b'], suggestion['c']))
        suggestion = alg.get_suggestion(parameters)

    assert seen == {(1, 'a', 2.0), (1, 'a', 3.0), (1, 'b', 2.0), (1, 'b', 3.0),
                    (2, 'a', 2.0), (2, 'a', 3.0), (2, 'b', 2.0), (2, 'b', 3.0)}
示例#15
0
def build_sherpa_augmentations_space():
    params = [
        sherpa.Continuous(name='width_shift', range=[0.0, 0.2]),
        sherpa.Continuous(name='height_shift', range=[0.0, 0.2]),
        sherpa.Continuous(name='zoom', range=[0.0, 0.3]),
        sherpa.Choice(name='horizontal_flip', range=[False, True]),
        sherpa.Discrete(name='rotation', range=[0, 30])
    ]
    return params
示例#16
0
def test_strip_add_choice():
    parameters = [sherpa.Choice('choice', ['a', 'b', 'c', 'd']),
                  sherpa.Continuous('continuous', [0., 0.4]),
                  sherpa.Choice('choice2', [1, 2, 3]),
                  sherpa.Discrete('discrete', [0, 12])]

    bayesian_optimization = BayesianOptimization()
    bayesian_optimization.num_candidates = 5

    candidates = bayesian_optimization._generate_candidates(parameters)

    X = bayesian_optimization._to_design(candidates, parameters)
    for i, row in enumerate(X):
        print(row)
        x, args = bayesian_optimization._strip_choice(row)
        # print("x: ", x)
        # print("args: ", args)
        new_row = bayesian_optimization._add_choice(x, *args)
        print(new_row)
        assert np.all(row == new_row)
示例#17
0
def parameter_mask_setter(parameters, parameter_class):
    '''
    creates a optimizable mask for a given parameter
    :param parameters:
    :param parameter_class:
    :return:
    '''
    type_sufix = parameter_class_sufix_mapper(parameter_class)
    return {
        parameter: sherpa.Choice(parameter + type_sufix, [True, False])
        for parameter in parameters
    }
示例#18
0
def get_mock_study():
    mock_algorithm = mock.MagicMock()
    mock_algorithm.get_suggestion.return_value = {'a': 1, 'b': 2}
    mock_stopping_rule = mock.MagicMock()

    return sherpa.Study(parameters=[
        sherpa.Discrete('a', [1, 2]),
        sherpa.Choice('b', [2, 5, 7])
    ],
                        algorithm=mock_algorithm,
                        stopping_rule=mock_stopping_rule,
                        lower_is_better=True,
                        disable_dashboard=True)
示例#19
0
def build_sherpa_parameter_space():
    params = [
        sherpa.Ordinal(name='depth', range=[2,3]),
        sherpa.Discrete(name='dense_neurons', range=[100, 164]),
        sherpa.Discrete(name='init_filters', range=[8,32]),
        sherpa.Choice(name='use_batchnorm', range=[False, True]),
        sherpa.Continuous(name='dropout', range=[0.35, 0.55]),
        sherpa.Ordinal(name='batch_size', range=[512, 1024]),
        sherpa.Continuous(name='learning_rate', range=[0.005, 0.01]),
        sherpa.Continuous(name='beta1', range=[0.45, 0.55]),
        sherpa.Continuous(name='beta2', range=[0.95, 1.0])
    ]
    return params
def test_get_best_result_larger():
    parameters = [sherpa.Choice('a', [0, 1]),
                  sherpa.Choice('b', [3, 4])]

    results_df = pandas.DataFrame(collections.OrderedDict(
        [('Trial-ID', list(range(1, 9))),
         ('Status', [sherpa.TrialStatus.COMPLETED] * 8),
         ('stage', [1] * 8),
         ('a', [0, 0, 0, 0, 1, 1, 1, 1]),
         ('b', [3, 3, 4, 4, 3, 3, 4, 4]),
         ('Objective', [1., 1.1, 2.1, 2.2, 5., 5.1, 6., 6.1])]
    ))

    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=4,
                           n=(3, 6, 9),
                           P=0.5)

    best_config = gs.get_best_result(parameters, results_df,
                                      lower_is_better=False)
    assert best_config == {'a': 1, 'b': 4, 'MeanObjective': 6.05}
示例#21
0
def test_get_best_result():
    parameters = [sherpa.Choice('a', [1,2,3])]
    gs = sherpa.algorithms.GridSearch()
    study = sherpa.Study(parameters=parameters, algorithm=gs,
                         lower_is_better=True,
                         disable_dashboard=True)

    objectives = [1.1,1.2,1.3]

    for obj, trial in zip(objectives, study):
        study.add_observation(trial, objective=obj)
        study.finalize(trial)

    assert study.get_best_result()['a'] == 1
def test_repeat_rs():
    parameters = [sherpa.Continuous('a', [0, 1]),
                  sherpa.Choice('b', ['x', 'y', 'z'])]
    rs = sherpa.algorithms.RandomSearch(max_num_trials=10)
    rs = sherpa.algorithms.Repeat(algorithm=rs, num_times=10)
    config_repeat = {}

    for i in range(10):
        config = rs.get_suggestion(parameters=parameters)
        assert config != config_repeat
        for j in range(9):
            config_repeat = rs.get_suggestion(parameters=parameters)
            assert config == config_repeat

    assert rs.get_suggestion(parameters=parameters) == sherpa.AlgorithmState.DONE
示例#23
0
def test_chain():
    parameters = [sherpa.Continuous('a', [0, 1]),
                  sherpa.Choice('b', ['x', 'y', 'z'])]
    algorithm = sherpa.algorithms.Chain(algorithms=[sherpa.algorithms.GridSearch(num_grid_points=2),
                                                    sherpa.algorithms.RandomSearch(max_num_trials=10)])
    study = sherpa.Study(parameters=parameters, algorithm=algorithm,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        if trial.id < 7:
            assert trial.parameters['a'] in [0, 1]
            assert trial.parameters['b'] == ['x', 'y', 'z'][trial.id%3-1]
        else:
            assert trial.parameters['a'] not in [0, 1]
示例#24
0
def test_repeat_get_best_result_called_midway():
    parameters = [sherpa.Choice('a', [1,2,3])]
    gs = sherpa.algorithms.GridSearch()
    gs = sherpa.algorithms.Repeat(algorithm=gs, num_times=3)
    study = sherpa.Study(parameters=parameters, algorithm=gs,
                         lower_is_better=True,
                         disable_dashboard=True)

    objectives = [2.1,2.2,2.3, 9., 0.1, 9.1, 1.1,1.2,1.3]
    expected = [None, None, 1, 1, 1, 1, 1, 1, 3]

    for exp, obj, trial in zip(expected, objectives, study):
        study.add_observation(trial, objective=obj)
        study.finalize(trial)
        assert study.get_best_result().get('a') == exp
示例#25
0
def test_transformers():
    parameter = sherpa.Choice('choice', ['a', 'b', 'c', 'd'])
    transformer = BayesianOptimization.ChoiceTransformer(parameter)
    assert np.all(
        transformer.transform(['d', 'c', 'b', 'a']) == np.flip(np.eye(4),
                                                               axis=0))

    assert all(
        transformer.reverse(transformer.transform(['d', 'c', 'b', 'a'])) ==
        np.array(['d', 'c', 'b', 'a']))

    parameter = sherpa.Continuous('continuous', [0., 0.4])
    transformer = BayesianOptimization.ContinuousTransformer(parameter)
    assert np.all(
        transformer.transform([0.2, 0.4, 0.]) == np.array([0.5, 1.0, 0.0]))
    assert np.all(
        transformer.reverse(transformer.transform([0.2, 0.4, 0.])) == np.array(
            [0.2, 0.4, 0.]))

    parameter = sherpa.Continuous('continuous-log', [0.00001, 0.1], 'log')
    transformer = BayesianOptimization.ContinuousTransformer(parameter)
    print(transformer.transform([0.01]))
    assert np.all(
        transformer.transform([0.0001, 0.001, 0.01]) == np.array(
            [0.25, 0.5, 0.75]))
    print(transformer.reverse(transformer.transform([0.0001, 0.001, 0.01])))
    assert np.all(
        transformer.reverse(transformer.transform([0.0001, 0.001, 0.01])) ==
        np.array([0.0001, 0.001, 0.01]))

    parameter = sherpa.Discrete('discrete', [0, 12])
    transformer = BayesianOptimization.DiscreteTransformer(parameter)
    assert np.all(
        transformer.transform([3, 6, 9]) == np.array([0.25, 0.5, 0.75]))
    assert np.all(
        transformer.reverse(transformer.transform([3, 6, 9])) == np.array(
            [3, 6, 9]))
    assert np.all(transformer.reverse([0.2, 0.3, 0.4]) == np.array([2, 4, 5]))

    parameter = sherpa.Discrete('discrete-log', [10, 100000], 'log')
    transformer = BayesianOptimization.DiscreteTransformer(parameter)
    assert np.all(
        transformer.transform([10, 100, 1000, 10000, 100000]) == np.array(
            [0., 0.25, 0.5, 0.75, 1.]))
    assert np.all(
        transformer.reverse(
            transformer.transform([10, 100, 1000, 10000, 100000])) == np.array(
                [10, 100, 1000, 10000, 100000]))
def test_grid_search():
    parameters = [sherpa.Choice('choice', ['a', 'b']),
                  sherpa.Continuous('continuous', [2, 3])]

    alg = sherpa.algorithms.GridSearch(num_grid_points=2)

    suggestion = alg.get_suggestion(parameters)
    seen = set()

    while suggestion != sherpa.AlgorithmState.DONE:
        seen.add((suggestion['choice'], suggestion['continuous']))
        suggestion = alg.get_suggestion(parameters)

    assert seen == {('a', 2.0),
                    ('a', 3.0),
                    ('b', 2.0),
                    ('b', 3.0)}
示例#27
0
    def hyper_param(self, epochs):
        parameters = [
            sherpa.Continuous('learning_rate', [1e-4, 1e-2]),
            sherpa.Discrete('num_units', [32, 128]),
            sherpa.Choice('activation', ['relu', 'adam', 'sigmoid'])
        ]
        algorithm = bayesian_optimization.GPyOpt(max_num_trials=50)
        study = sherpa.Study(parameters=parameters,
                             algorithm=algorithm,
                             lower_is_better=False)
        x_test = self._label / 300
        y_test = self._label

        # Create model
        model = models.Sequential()
        model.add(
            layers.Embedding(self.totalwords,
                             64,
                             input_length=maxSequenceLen - 1))
        model.add(layers.LSTM(128))
        model.add(layers.Dense(self.totalwords, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        epochs = epochs
        for trial in study:
            lr = trial.parameters['learning_rate']
            num_units = trial.parameters['num_units']
            act = trial.parameters['activation']

            for i in range(epochs):
                model.fit(self.predictors, self._label, batch_size=self.batch)
                loss, accuracy = model.evaluate(x_test, y_test)
                study.add_observation(trial=trial,
                                      iteration=i,
                                      objective=accuracy,
                                      context={'loss': loss})

                if study.should_trial_stop(trial):
                    break
            study.finalize(trial=trial)
            print(study.get_best_result())
示例#28
0
def test_repeat_wait_for_completion():
    parameters = [
        sherpa.Continuous('a', [0, 1]),
        sherpa.Choice('b', ['x', 'y', 'z'])
    ]
    rs = sherpa.algorithms.RandomSearch(max_num_trials=10)
    rs = sherpa.algorithms.Repeat(algorithm=rs,
                                  num_times=10,
                                  wait_for_completion=True)
    study = sherpa.Study(parameters=parameters,
                         algorithm=rs,
                         lower_is_better=True,
                         disable_dashboard=True)

    for i in range(10):
        tnew = study.get_suggestion()
        print(tnew.parameters)
        assert isinstance(tnew.parameters, dict)
        config = tnew.parameters
        study.add_observation(tnew, objective=float(i), iteration=1)
        study.finalize(tnew)

        for j in range(9):
            t = study.get_suggestion()
            config_repeat = t.parameters
            assert config == config_repeat

            if j < 8:
                study.add_observation(t, objective=float(i), iteration=1)
                study.finalize(t)

        # Obtained 10/10 repeats for the configuration, but haven't added
        # results for the last one. Obtaining a new suggestion we expect WAIT.
        twait = study.get_suggestion()
        assert twait == sherpa.AlgorithmState.WAIT
        study.add_observation(t, objective=float(i), iteration=1)
        study.finalize(t)

    tdone = study.get_suggestion()
    assert tdone is None
def test_random_search():
    parameters = [sherpa.Continuous('a', [0, 1]),
                  sherpa.Choice('b', ['x', 'y', 'z'])]

    rs = sherpa.algorithms.RandomSearch(max_num_trials=10)
    last_config = {}

    for i in range(10):
        config = rs.get_suggestion(parameters=parameters)
        assert config != last_config
        last_config = config

    assert rs.get_suggestion(parameters=parameters) == sherpa.AlgorithmState.DONE


    rs = sherpa.algorithms.RandomSearch()
    last_config = {}

    for _ in range(1000):
        config = rs.get_suggestion(parameters=parameters)
        assert config != last_config
        last_config = config
示例#30
0
def test_3d():
    def obj_func(x, y, z):
        assert isinstance(x, float)
        assert isinstance(y, str)
        assert isinstance(z, int)
        # Global maximum of 4 is at x=4
        return -4. * numpy.exp(-(x - 4.)**2 / 10.) * numpy.cos(
            1.5 * (x - 4.))**2 - int(y) * z

    parameters = [
        sherpa.Continuous('x', [0., 7.]),
        sherpa.Choice('y', ["-1", "0", "1"]),
        sherpa.Discrete('z', [1, 5])
    ]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=100,
                                   model_type='GP',
                                   acquisition_type='EI')

    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(**trial.parameters)
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial, iteration=1, objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    print(rval)

    assert numpy.isclose(rval['x'], 4., atol=0.1)
    assert rval['y'] == 1
    assert rval['z'] == 5