def opt_q_uniform(target): rng = np.random.default_rng(123) x = hp.quniform("x", 1.01, 10, 1) return { "loss": (x - target)**2 + scope.normal(0, 1, rng=rng), "status": STATUS_OK, }
def opt_q_uniform(target): rng = np.random.RandomState(123) x = hp_quniform('x', 1.01, 10, 1) return { 'loss': (x - target)**2 + scope.normal(0, 1, rng=rng), 'status': STATUS_OK }
def test_repeatable(): u = scope.uniform(0, 1) aa = as_apply( dict(u=u, n=scope.normal(5, 0.1), l=[0, 1, scope.one_of(2, 3), u])) dd1 = sample(aa, np.random.RandomState(3)) dd2 = sample(aa, np.random.RandomState(3)) dd3 = sample(aa, np.random.RandomState(4)) assert dd1 == dd2 assert dd1 != dd3
def test_repeatable(): u = scope.uniform(0, 1) aa = as_apply(dict( u = u, n = scope.normal(5, 0.1), l = [0, 1, scope.one_of(2, 3), u])) dd1 = sample(aa, np.random.RandomState(3)) dd2 = sample(aa, np.random.RandomState(3)) dd3 = sample(aa, np.random.RandomState(4)) assert dd1 == dd2 assert dd1 != dd3
def test_sample(): u = scope.uniform(0, 1) aa = as_apply( dict(u=u, n=scope.normal(5, 0.1), l=[0, 1, scope.one_of(2, 3), u])) print aa dd = sample(aa, np.random.RandomState(3)) assert 0 < dd['u'] < 1 assert 4 < dd['n'] < 6 assert dd['u'] == dd['l'][3] assert dd['l'][:2] == (0, 1) assert dd['l'][2] in (2, 3)
def test_sample(): u = scope.uniform(0, 1) aa = as_apply( dict(u=u, n=scope.normal(5, 0.1), l=[0, 1, scope.one_of(2, 3), u])) print(aa) dd = sample(aa, np.random.default_rng(3)) assert 0 < dd["u"] < 1 assert 4 < dd["n"] < 6 assert dd["u"] == dd["l"][3] assert dd["l"][:2] == (0, 1) assert dd["l"][2] in (2, 3)
def test_sample(): u = scope.uniform(0, 1) aa = as_apply(dict( u = u, n = scope.normal(5, 0.1), l = [0, 1, scope.one_of(2, 3), u])) print aa dd = sample(aa, np.random.RandomState(3)) assert 0 < dd['u'] < 1 assert 4 < dd['n'] < 6 assert dd['u'] == dd['l'][3] assert dd['l'][:2] == (0, 1) assert dd['l'][2] in (2, 3)
def n_arms(N=2): """ Each arm yields a reward from a different Gaussian. The correct arm is arm 0. """ rng = np.random.RandomState(123) x = hp.choice('x', [0, 1]) reward_mus = as_apply([-1] + [0] * (N - 1)) reward_sigmas = as_apply([1] * N) return {'loss': scope.normal(reward_mus[x], reward_sigmas[x], rng=rng), 'loss_variance': 1.0, 'status': base.STATUS_OK}
def n_arms(N=2): """ Each arm yields a reward from a different Gaussian. The correct arm is arm 0. """ rng = np.random.default_rng(123) x = hp.choice("x", [0, 1]) reward_mus = as_apply([-1] + [0] * (N - 1)) reward_sigmas = as_apply([1] * N) return { "loss": scope.normal(reward_mus[x], reward_sigmas[x], rng=rng), "loss_variance": 1.0, "status": base.STATUS_OK, }
def gauss_wave2(): """ Variant of the GaussWave problem in which noise is added to the score function, and there is an option to either have no sinusoidal variation, or a negative cosine with variable amplitude. Immediate local max is to sample x from spec and turn off the neg cos. Better solution is to move x a bit to the side, turn on the neg cos and turn up the amp to 1. """ rng = np.random.RandomState(123) var = .1 x = hp.uniform('x', -20, 20) amp = hp.uniform('amp', 0, 1) t = (scope.normal(0, var, rng=rng) + 2 * scope.exp(-(old_div(x, 5.0)) ** 2)) return {'loss': - hp.choice('hf', [t, t + scope.sin(x) * amp]), 'loss_variance': var, 'status': base.STATUS_OK}
def gauss_wave2(): """ Variant of the GaussWave problem in which noise is added to the score function, and there is an option to either have no sinusoidal variation, or a negative cosine with variable amplitude. Immediate local max is to sample x from spec and turn off the neg cos. Better solution is to move x a bit to the side, turn on the neg cos and turn up the amp to 1. """ rng = np.random.default_rng(123) var = 0.1 x = hp.uniform("x", -20, 20) amp = hp.uniform("amp", 0, 1) t = scope.normal(0, var, rng=rng) + 2 * scope.exp(-((old_div(x, 5.0))**2)) return { "loss": -hp.choice("hf", [t, t + scope.sin(x) * amp]), "loss_variance": var, "status": base.STATUS_OK, }
def opt_q_uniform(target): rng = np.random.RandomState(123) x = hp.quniform('x', 1.01, 10, 1) return {'loss': (x - target) ** 2 + scope.normal(0, 1, rng=rng), 'status': STATUS_OK}
def opt_q_uniform(target): x = hp_quniform('x', 1.01, 10, 1) return {'loss': (x - target)**2 + scope.normal(0, 1)}
def opt_q_uniform(target): x = hp_quniform('x', 1.01, 10, 1) return {'loss': (x - target) ** 2 + scope.normal(0, 1)}