Exemplo n.º 1
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    bandit = base.Bandit({
        'loss':
        hp_loguniform('lu', -2, 2) +
        hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
        hp_quniform('qu', -4.999, 5, 1) + hp_uniform('u', 0, 10)
    })
    algo = base.Random(bandit)
    trials = base.Trials()
    exp = base.Experiment(trials, algo)
    exp.catch_bandit_exceptions = False
    N = 1000
    exp.run(N)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print idxs.keys()

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
Exemplo n.º 2
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    bandit = base.Bandit({
                'loss': hp_loguniform('lu', -2, 2) +
                    hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
                    hp_quniform('qu', -4.999, 5, 1) +
                    hp_uniform('u', 0, 10)})
    algo = base.Random(bandit)
    trials = base.Trials()
    exp = base.Experiment(trials, algo)
    exp.catch_bandit_exceptions = False
    N = 1000
    exp.run(N)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print idxs.keys()

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
Exemplo n.º 3
0
def many_dists():
    a = hp_choice('a', [0, 1, 2])
    b = hp_randint('b', 10)
    c = hp_uniform('c', 4, 7)
    d = hp_loguniform('d', -2, 0)
    e = hp_quniform('e', 0, 10, 3)
    f = hp_qloguniform('f', 0, 3, 2)
    g = hp_normal('g', 4, 7)
    h = hp_lognormal('h', -2, 2)
    i = hp_qnormal('i', 0, 10, 2)
    j = hp_qlognormal('j', 0, 2, 1)
    z = a + b + c + d + e + f + g + h + i + j
    return {'loss': scope.float(scope.log(1e-12 + z**2))}
Exemplo n.º 4
0
def many_dists():
    a=hp_choice('a', [0, 1, 2])
    b=hp_randint('b', 10)
    c=hp_uniform('c', 4, 7)
    d=hp_loguniform('d', -2, 0)
    e=hp_quniform('e', 0, 10, 3)
    f=hp_qloguniform('f', 0, 3, 2)
    g=hp_normal('g', 4, 7)
    h=hp_lognormal('h', -2, 2)
    i=hp_qnormal('i', 0, 10, 2)
    j=hp_qlognormal('j', 0, 2, 1)
    z = a + b + c + d + e + f + g + h + i + j
    return {'loss': scope.float(scope.log(1e-12 + z ** 2))}
Exemplo n.º 5
0
def pipeline_extension(prefix, X, n_patches, max_filters):
    assert max_filters > 16
    f_layer = new_fbncc_layer(prefix, X, n_patches,
            n_filters=s_int(
                hp_qloguniform('%sfb_nfilters' % prefix,
                    np.log(8.01), np.log(max_filters), q=16)),
            size=rfilter_size('%sfb_size' % prefix, 3, 8),
            )

    p_layer = partial(slm_lpool,
            stride=hp_choice('%sp_stride' % prefix, [1, 2]),
            order=hp_choice('%sp_order' % prefix,
                [1, 2, hp_lognormal('%sp_order_real' % prefix,
                    mu=np.log(1), sigma=np.log(3))]),
            ker_size=rfilter_size('%sp_size' % prefix, 2, 8))

    return [f_layer, p_layer]
Exemplo n.º 6
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    space = {
        'loss': (
            hp_loguniform('lu', -2, 2) +
            hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
            hp_quniform('qu', -4.999, 5, 1) +
            hp_uniform('u', 0, 10)),
        'status': 'ok'}
    trials = base.Trials()
    N = 1000
    fmin(lambda x: x,
        space=space,
        algo=rand.suggest,
        trials=trials,
        max_evals=N,
        rstate=np.random.RandomState(124),
        catch_eval_exceptions=False)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print(list(idxs.keys()))

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print(h)
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print(h)
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
Exemplo n.º 7
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    space = {
        'loss': (hp_loguniform('lu', -2, 2) +
                 hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
                 hp_quniform('qu', -4.999, 5, 1) + hp_uniform('u', 0, 10)),
        'status':
        'ok'
    }
    trials = base.Trials()
    N = 1000
    fmin(lambda x: x,
         space=space,
         algo=rand.suggest,
         trials=trials,
         max_evals=N,
         rstate=np.random.RandomState(124),
         catch_eval_exceptions=False)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print(list(idxs.keys()))

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print(h)
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print(h)
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
import copy
from collections import OrderedDict
import numpy as np

try:
    from hyperopt.pyll import scope
except ImportError:
    print 'Trying standalone pyll'
    from pyll import scope
from hyperopt.pyll_utils import hp_uniform, hp_loguniform, hp_quniform, hp_qloguniform
from hyperopt.pyll_utils import hp_normal, hp_lognormal, hp_qnormal, hp_qlognormal
from hyperopt.pyll_utils import hp_choice


num_filters1 = scope.int(hp_qloguniform('num_filters1',np.log(16), np.log(96), q=16))
filter1_size = scope.int(hp_quniform('filter1_shape', 2, 12, 1))

num_filters2 = scope.int(hp_qloguniform('num_filters2',np.log(16), np.log(96), q=16))
filter2_size = scope.int(hp_quniform('filter2_shape', 2, 12, 1))

num_filters3 = scope.int(hp_qloguniform('num_filters3',np.log(16), np.log(96), q=16))
filter3_size = scope.int(hp_quniform('filter3_shape', 2, 9, 1))

num_filters4 = scope.int(hp_qloguniform('num_filters4',np.log(16), np.log(64), q=16))
filter4_size = scope.int(hp_quniform('filter4_shape', 2, 9, 1))

pool1_sizex = scope.int(hp_quniform('pool1_sizex', 2, 5, 1))
pool1_type = hp_choice('pool1_type', ['max', 'avg', hp_uniform('pool_order_1', 1, 12)])

pool2_sizex = scope.int(hp_quniform('pool2_sizex', 2, 5, 1))
pool2_type = hp_choice('pool2_type', ['max', 'avg', hp_uniform('pool_order_2', 1, 4)])