def test_distributions(): # test that the distributions come out right # XXX: test more distributions bandit = base.Bandit({ 'loss': hp_loguniform('lu', -2, 2) + hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) + hp_quniform('qu', -4.999, 5, 1) + hp_uniform('u', 0, 10) }) algo = base.Random(bandit) trials = base.Trials() exp = base.Experiment(trials, algo) exp.catch_bandit_exceptions = False N = 1000 exp.run(N) assert len(trials) == N idxs, vals = base.miscs_to_idxs_vals(trials.miscs) print idxs.keys() COUNTMAX = 130 COUNTMIN = 70 # -- loguniform log_lu = np.log(vals['lu']) assert len(log_lu) == N assert -2 < np.min(log_lu) assert np.max(log_lu) < 2 h = np.histogram(log_lu)[0] print h assert np.all(COUNTMIN < h) assert np.all(h < COUNTMAX) # -- quantized log uniform qlu = vals['qlu'] assert np.all(np.fmod(qlu, 2) == 0) assert np.min(qlu) == 2 assert np.max(qlu) == 20 bc_qlu = np.bincount(qlu) assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8] # -- quantized uniform qu = vals['qu'] assert np.min(qu) == -5 assert np.max(qu) == 5 assert np.all(np.fmod(qu, 1) == 0) bc_qu = np.bincount(np.asarray(qu).astype('int') + 5) assert np.all(40 < bc_qu), bc_qu # XXX: how to get the distribution flat # with new rounding rule? assert np.all(bc_qu < 125), bc_qu assert np.all(bc_qu < COUNTMAX) # -- uniform u = vals['u'] assert np.min(u) > 0 assert np.max(u) < 10 h = np.histogram(u)[0] print h assert np.all(COUNTMIN < h) assert np.all(h < COUNTMAX)
def opt_q_uniform(target): rng = np.random.RandomState(123) x = hp_quniform('x', 1.01, 10, 1) return { 'loss': (x - target)**2 + scope.normal(0, 1, rng=rng), 'status': STATUS_OK }
def test_distributions(): # test that the distributions come out right # XXX: test more distributions bandit = base.Bandit({ 'loss': hp_loguniform('lu', -2, 2) + hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) + hp_quniform('qu', -4.999, 5, 1) + hp_uniform('u', 0, 10)}) algo = base.Random(bandit) trials = base.Trials() exp = base.Experiment(trials, algo) exp.catch_bandit_exceptions = False N = 1000 exp.run(N) assert len(trials) == N idxs, vals = base.miscs_to_idxs_vals(trials.miscs) print idxs.keys() COUNTMAX = 130 COUNTMIN = 70 # -- loguniform log_lu = np.log(vals['lu']) assert len(log_lu) == N assert -2 < np.min(log_lu) assert np.max(log_lu) < 2 h = np.histogram(log_lu)[0] print h assert np.all(COUNTMIN < h) assert np.all(h < COUNTMAX) # -- quantized log uniform qlu = vals['qlu'] assert np.all(np.fmod(qlu, 2) == 0) assert np.min(qlu) == 2 assert np.max(qlu) == 20 bc_qlu = np.bincount(qlu) assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8] # -- quantized uniform qu = vals['qu'] assert np.min(qu) == -5 assert np.max(qu) == 5 assert np.all(np.fmod(qu, 1) == 0) bc_qu = np.bincount(np.asarray(qu).astype('int') + 5) assert np.all(40 < bc_qu), bc_qu # XXX: how to get the distribution flat # with new rounding rule? assert np.all(bc_qu < 125), bc_qu assert np.all(bc_qu < COUNTMAX) # -- uniform u = vals['u'] assert np.min(u) > 0 assert np.max(u) < 10 h = np.histogram(u)[0] print h assert np.all(COUNTMIN < h) assert np.all(h < COUNTMAX)
def many_dists(): a = hp_choice('a', [0, 1, 2]) b = hp_randint('b', 10) c = hp_uniform('c', 4, 7) d = hp_loguniform('d', -2, 0) e = hp_quniform('e', 0, 10, 3) f = hp_qloguniform('f', 0, 3, 2) g = hp_normal('g', 4, 7) h = hp_lognormal('h', -2, 2) i = hp_qnormal('i', 0, 10, 2) j = hp_qlognormal('j', 0, 2, 1) z = a + b + c + d + e + f + g + h + i + j return {'loss': scope.float(scope.log(1e-12 + z**2))}
def many_dists(): a=hp_choice('a', [0, 1, 2]) b=hp_randint('b', 10) c=hp_uniform('c', 4, 7) d=hp_loguniform('d', -2, 0) e=hp_quniform('e', 0, 10, 3) f=hp_qloguniform('f', 0, 3, 2) g=hp_normal('g', 4, 7) h=hp_lognormal('h', -2, 2) i=hp_qnormal('i', 0, 10, 2) j=hp_qlognormal('j', 0, 2, 1) z = a + b + c + d + e + f + g + h + i + j return {'loss': scope.float(scope.log(1e-12 + z ** 2))}
def test_distributions(): # test that the distributions come out right # XXX: test more distributions space = { 'loss': ( hp_loguniform('lu', -2, 2) + hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) + hp_quniform('qu', -4.999, 5, 1) + hp_uniform('u', 0, 10)), 'status': 'ok'} trials = base.Trials() N = 1000 fmin(lambda x: x, space=space, algo=rand.suggest, trials=trials, max_evals=N, rstate=np.random.RandomState(124), catch_eval_exceptions=False) assert len(trials) == N idxs, vals = base.miscs_to_idxs_vals(trials.miscs) print(list(idxs.keys())) COUNTMAX = 130 COUNTMIN = 70 # -- loguniform log_lu = np.log(vals['lu']) assert len(log_lu) == N assert -2 < np.min(log_lu) assert np.max(log_lu) < 2 h = np.histogram(log_lu)[0] print(h) assert np.all(COUNTMIN < h) assert np.all(h < COUNTMAX) # -- quantized log uniform qlu = vals['qlu'] assert np.all(np.fmod(qlu, 2) == 0) assert np.min(qlu) == 2 assert np.max(qlu) == 20 bc_qlu = np.bincount(qlu) assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8] # -- quantized uniform qu = vals['qu'] assert np.min(qu) == -5 assert np.max(qu) == 5 assert np.all(np.fmod(qu, 1) == 0) bc_qu = np.bincount(np.asarray(qu).astype('int') + 5) assert np.all(40 < bc_qu), bc_qu # XXX: how to get the distribution flat # with new rounding rule? assert np.all(bc_qu < 125), bc_qu assert np.all(bc_qu < COUNTMAX) # -- uniform u = vals['u'] assert np.min(u) > 0 assert np.max(u) < 10 h = np.histogram(u)[0] print(h) assert np.all(COUNTMIN < h) assert np.all(h < COUNTMAX)
def test_distributions(): # test that the distributions come out right # XXX: test more distributions space = { 'loss': (hp_loguniform('lu', -2, 2) + hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) + hp_quniform('qu', -4.999, 5, 1) + hp_uniform('u', 0, 10)), 'status': 'ok' } trials = base.Trials() N = 1000 fmin(lambda x: x, space=space, algo=rand.suggest, trials=trials, max_evals=N, rstate=np.random.RandomState(124), catch_eval_exceptions=False) assert len(trials) == N idxs, vals = base.miscs_to_idxs_vals(trials.miscs) print(list(idxs.keys())) COUNTMAX = 130 COUNTMIN = 70 # -- loguniform log_lu = np.log(vals['lu']) assert len(log_lu) == N assert -2 < np.min(log_lu) assert np.max(log_lu) < 2 h = np.histogram(log_lu)[0] print(h) assert np.all(COUNTMIN < h) assert np.all(h < COUNTMAX) # -- quantized log uniform qlu = vals['qlu'] assert np.all(np.fmod(qlu, 2) == 0) assert np.min(qlu) == 2 assert np.max(qlu) == 20 bc_qlu = np.bincount(qlu) assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8] # -- quantized uniform qu = vals['qu'] assert np.min(qu) == -5 assert np.max(qu) == 5 assert np.all(np.fmod(qu, 1) == 0) bc_qu = np.bincount(np.asarray(qu).astype('int') + 5) assert np.all(40 < bc_qu), bc_qu # XXX: how to get the distribution flat # with new rounding rule? assert np.all(bc_qu < 125), bc_qu assert np.all(bc_qu < COUNTMAX) # -- uniform u = vals['u'] assert np.min(u) > 0 assert np.max(u) < 10 h = np.histogram(u)[0] print(h) assert np.all(COUNTMIN < h) assert np.all(h < COUNTMAX)
import copy from collections import OrderedDict import numpy as np try: from hyperopt.pyll import scope except ImportError: print 'Trying standalone pyll' from pyll import scope from hyperopt.pyll_utils import hp_uniform, hp_loguniform, hp_quniform, hp_qloguniform from hyperopt.pyll_utils import hp_normal, hp_lognormal, hp_qnormal, hp_qlognormal from hyperopt.pyll_utils import hp_choice num_filters1 = scope.int(hp_qloguniform('num_filters1',np.log(16), np.log(96), q=16)) filter1_size = scope.int(hp_quniform('filter1_shape', 2, 12, 1)) num_filters2 = scope.int(hp_qloguniform('num_filters2',np.log(16), np.log(96), q=16)) filter2_size = scope.int(hp_quniform('filter2_shape', 2, 12, 1)) num_filters3 = scope.int(hp_qloguniform('num_filters3',np.log(16), np.log(96), q=16)) filter3_size = scope.int(hp_quniform('filter3_shape', 2, 9, 1)) num_filters4 = scope.int(hp_qloguniform('num_filters4',np.log(16), np.log(64), q=16)) filter4_size = scope.int(hp_quniform('filter4_shape', 2, 9, 1)) pool1_sizex = scope.int(hp_quniform('pool1_sizex', 2, 5, 1)) pool1_type = hp_choice('pool1_type', ['max', 'avg', hp_uniform('pool_order_1', 1, 12)]) pool2_sizex = scope.int(hp_quniform('pool2_sizex', 2, 5, 1)) pool2_type = hp_choice('pool2_type', ['max', 'avg', hp_uniform('pool_order_2', 1, 4)])
def rfilter_size(label, smin, smax, q=1): """Return an integer size from smin to smax inclusive with equal prob """ return s_int(hp_quniform(label, smin - q / 2.0 + 1e-5, smax + q / 2.0, q))
import copy from collections import OrderedDict import numpy as np try: from hyperopt.pyll import scope except ImportError: print 'Trying standalone pyll' from pyll import scope from hyperopt.pyll_utils import hp_uniform, hp_loguniform, hp_quniform, hp_qloguniform from hyperopt.pyll_utils import hp_normal, hp_lognormal, hp_qnormal, hp_qlognormal from hyperopt.pyll_utils import hp_choice num_filters1 = scope.int(hp_quniform('num_filters1', 32, 128, 16)) filter1_size = scope.int(hp_quniform('filter1_shape', 5, 12, 1)) num_filters2 = scope.int(hp_quniform('num_filters2', 64, 400, 16)) filter2_size = scope.int(hp_quniform('filter2_shape', 4, 7, 1)) num_filters3 = scope.int(hp_quniform('num_filters3', 64, 400, 16)) filter3_size = scope.int(hp_quniform('filter3_shape', 3, 5, 1)) num_filters4 = scope.int(hp_quniform('num_filters4', 64, 400, 16)) filter4_size = scope.int(hp_quniform('filter4_shape', 3, 4, 1)) num_filters5 = scope.int(hp_quniform('num_filters5', 64, 400, 16)) filter5_size = scope.int(hp_quniform('filter5_shape', 2, 3, 1)) pool1_sizex = scope.int(hp_quniform('pool1_sizex', 2, 4, 1)) pool1_type = hp_choice('pool1_type', ['max', 'avg', hp_uniform('pool_order_1', 1, 4)])
def opt_q_uniform(target): rng = np.random.RandomState(123) x = hp_quniform('x', 1.01, 10, 1) return {'loss': (x - target) ** 2 + scope.normal(0, 1, rng=rng), 'status': STATUS_OK}
def opt_q_uniform(target): x = hp_quniform('x', 1.01, 10, 1) return {'loss': (x - target)**2 + scope.normal(0, 1)}
def opt_q_uniform(target): x = hp_quniform('x', 1.01, 10, 1) return {'loss': (x - target) ** 2 + scope.normal(0, 1)}