示例#1
0
def test_simple_hpo():

    def f(args):
      x = args['x']
      return x*x

    s = {'x': {'dist': st.uniform(loc=-10., scale=20), 'lo': -10., 'hi': 10.}}
    trials = []

    #Test fmin and ability to continue adding to trials
    best = fmin(loss_fn=f, space=s, max_evals=40, trials=trials)
    best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)

    assert len(trials) == 50, "HPO continuation trials not working"
    
    # Test verbose flag
    best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)

    yarray = np.array([tr['loss'] for tr in trials])
    np.testing.assert_array_less(yarray, 100.)

    xarray = np.array([tr['x'] for tr in trials])
    np.testing.assert_array_less(np.abs(xarray), 10.)

    assert best['loss'] < 100., "HPO out of range"
    assert np.abs(best['x']) < 10., "HPO out of range"

    #Test unknown distributions
    s2 = {'x': {'dist': 'normal', 'mu': 0., 'sigma': 1.}}
    trials2 = []
    with pytest.raises(ValueError) as excinfo:
        best2 = fmin(loss_fn=f, space=s2, max_evals=40, trials=trials2)
    assert "Unknown distribution type for variable" in str(excinfo.value)

    s3 = {'x': {'dist': st.norm(loc=0., scale=1.)}}
    trials3 = []
    best3 = fmin(loss_fn=f, space=s3, max_evals=40, trials=trials3)
示例#2
0
def test_simple_hpo():
    def f(args):
        x = args['x']
        return x * x

    s = {'x': {'dist': st.uniform(loc=-10., scale=20), 'lo': -10., 'hi': 10.}}
    trials = []

    #Test fmin and ability to continue adding to trials
    best = fmin(loss_fn=f, space=s, max_evals=40, trials=trials)
    best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)

    assert len(trials) == 50, "HPO continuation trials not working"

    # Test verbose flag
    best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)

    yarray = np.array([tr['loss'] for tr in trials])
    np.testing.assert_array_less(yarray, 100.)

    xarray = np.array([tr['x'] for tr in trials])
    np.testing.assert_array_less(np.abs(xarray), 10.)

    assert best['loss'] < 100., "HPO out of range"
    assert np.abs(best['x']) < 10., "HPO out of range"

    #Test unknown distributions
    s2 = {'x': {'dist': 'normal', 'mu': 0., 'sigma': 1.}}
    trials2 = []
    with pytest.raises(ValueError) as excinfo:
        best2 = fmin(loss_fn=f, space=s2, max_evals=40, trials=trials2)
    assert "Unknown distribution type for variable" in str(excinfo.value)

    s3 = {'x': {'dist': st.norm(loc=0., scale=1.)}}
    trials3 = []
    best3 = fmin(loss_fn=f, space=s3, max_evals=40, trials=trials3)
示例#3
0
# The trials object is just a list that stores the samples generated and the
# corresponding function values at those sample points.
trials = []

# Maximum number of samples that will be generated.
# This is the maximum number of function evaluations that will be performed.
n_hpo_samples = 100

# Call the fmin function that does the optimization.
# The function to be optimized should take in a dictionary. You will probably
# need to wrap your function to do this (see branin() and branin_wrapper()).
# You can pass in a non-empty trials object as well e.g. from a previous
# fmin run. We just append to the trials object and will use existing data
# in our optimization.
print("Starting optimization through hpo")
best = hpo.fmin(loss_fn=branin_wrapper, space=space,
                max_evals=n_hpo_samples, trials=trials)

# Print out the best value obtained through HPO
print("Best obtained through HPO (", n_hpo_samples, " samples) = ",
       best['x1'], best['x2'], "; min value = ", best['loss'])

#####################################
# Optimization through grid search
#####################################

# Divide the space into a uniform grid (meshgrid)
n = 200
x1 = np.linspace(x1lo, x1hi, n)
x2 = np.linspace(x2lo, x2hi, n)
x1_grid, x2_grid = np.meshgrid(x1, x2)
示例#4
0
# corresponding function values at those sample points.
trials = []

# Maximum number of samples that will be generated.
# This is the maximum number of function evaluations that will be performed.
n_hpo_samples = 100

# Call the fmin function that does the optimization.
# The function to be optimized should take in a dictionary. You will probably
# need to wrap your function to do this (see branin() and branin_wrapper()).
# You can pass in a non-empty trials object as well e.g. from a previous
# fmin run. We just append to the trials object and will use existing data
# in our optimization.
print("Starting optimization through hpo")
best = hpo.fmin(loss_fn=branin_wrapper,
                space=space,
                max_evals=n_hpo_samples,
                trials=trials)

# Print out the best value obtained through HPO
print("Best obtained through HPO (", n_hpo_samples, " samples) = ", best['x1'],
      best['x2'], "; min value = ", best['loss'])

#####################################
# Optimization through grid search
#####################################

# Divide the space into a uniform grid (meshgrid)
n = 200
x1 = np.linspace(x1lo, x1hi, n)
x2 = np.linspace(x2lo, x2hi, n)
x1_grid, x2_grid = np.meshgrid(x1, x2)