def test_sampling_random_state(): f = hp_module.Float('f', 1e-3, 1e3, sampling='log') rand_sample = f.random_sample() assert rand_sample >= f.min_value assert rand_sample <= f.max_value def log_scale(x, min_value, max_value): return math.log(x / min_value) / math.log(max_value / min_value) x = 1e-1 min_value, max_value = 1e-10, 1e10 # Scale x to [0, 1]. x_scaled = log_scale(x, min_value, max_value) # Scale back. x_rescaled = hp_module._log_sample(x_scaled, min_value, max_value) assert np.allclose(x, x_rescaled) f = hp_module.Float('f', 1e-3, 1e3, sampling='reverse_log') rand_sample = f.random_sample() assert rand_sample >= f.min_value assert rand_sample <= f.max_value def reverse_log_scale(x, a, b): return 1 - math.log((b + a - x) / a) / math.log(b / a) x = 1e5 min_value, max_value = 1e-10, 1e10 x_scaled = reverse_log_scale(x, min_value, max_value) x_rescaled = hp_module._reverse_log_sample(x_scaled, min_value, max_value) assert np.allclose(x, x_rescaled)
def test_Float(): # Test with step arg linear = hp_module.Float('linear', min_value=0.5, max_value=9.5, step=0.1, default=9.) linear = hp_module.Float.from_config(linear.get_config()) assert linear.default == 9. assert 0.5 <= linear.random_sample() <= 9.5 assert isinstance(linear.random_sample(), float) assert linear.random_sample(123) == linear.random_sample(123) # Test without step arg linear = hp_module.Float('linear', min_value=0.5, max_value=6.5, default=2.) linear = hp_module.Float.from_config(linear.get_config()) assert linear.default == 2. assert 0.5 <= linear.random_sample() < 6.5 assert isinstance(linear.random_sample(), float) assert linear.random_sample(123) == linear.random_sample(123) # No default linear = hp_module.Float('linear', min_value=0.5, max_value=9.5, step=0.1) assert linear.default == 0.5
def test_bayesian_oracle_with_zero_y(tmp_dir): hp_list = [hp_module.Choice('a', [1, 2], default=1), hp_module.Int('b', 3, 10, default=3), hp_module.Float('c', 0, 1, 0.1, default=0), hp_module.Fixed('d', 7), hp_module.Choice('e', [9, 0], default=9)] oracle = bo_module.BayesianOptimizationOracle() for i in range(100): oracle.populate_space(str(i), hp_list) oracle.result(str(i), 0)
def test_save_before_result(tmp_dir): hp_list = [hp_module.Choice('a', [1, 2], default=1), hp_module.Int('b', 3, 10, default=3), hp_module.Float('c', 0, 1, 0.1, default=0), hp_module.Fixed('d', 7), hp_module.Choice('e', [9, 0], default=9)] oracle = bo_module.BayesianOptimizationOracle() oracle.populate_space(str(1), hp_list) oracle.save(os.path.join(tmp_dir, 'temp_oracle')) oracle.result(str(1), 0)
def test_sampling_arg(): f = hp_module.Float('f', 1e-20, 1e10, sampling='log') f = hp_module.Float.from_config(f.get_config()) assert f.sampling == 'log' i = hp_module.Int('i', 0, 10, sampling='linear') i = hp_module.Int.from_config(i.get_config()) assert i.sampling == 'linear' with pytest.raises(ValueError, match='`sampling` must be one of'): hp_module.Int('j', 0, 10, sampling='invalid')
def test_float_proto(): hp = hp_module.Float('a', -10, 10, sampling='linear', default=3) proto = hp.to_proto() assert proto.name == 'a' assert proto.min_value == -10. assert proto.max_value == 10. assert proto.sampling == kerastuner_pb2.Sampling.LINEAR assert proto.default == 3. # Zero is the default, gets converted to `None` in `from_proto`. assert proto.step == 0. new_hp = hp_module.Float.from_proto(proto) assert new_hp.get_config() == hp.get_config()
def test_bayesian_dynamic_space(tmp_dir): hp_list = [hp_module.Choice('a', [1, 2], default=1)] oracle = bo_module.BayesianOptimizationOracle() for i in range(10): oracle.populate_space(str(i), hp_list) oracle.result(str(i), i) hp_list.append(hp_module.Int('b', 3, 10, default=3)) assert 'b' in oracle.populate_space('1_0', hp_list)['values'] hp_list.append(hp_module.Float('c', 0, 1, 0.1, default=0)) assert 'c' in oracle.populate_space('1_1', hp_list)['values'] hp_list.append(hp_module.Fixed('d', 7)) assert 'd' in oracle.populate_space('1_2', hp_list)['values'] hp_list.append(hp_module.Choice('e', [9, 0], default=9)) assert 'e' in oracle.populate_space('1_3', hp_list)['values']
def test_reverse_log_sampling_random_state(): f = hp_module.Float('f', 1e-3, 1e3, sampling='reverse_log') rand_sample = f.random_sample() assert rand_sample >= f.min_value assert rand_sample <= f.max_value val = 1e-3 prob = hp_module.value_to_cumulative_prob(val, f) assert prob == 0 new_val = hp_module.cumulative_prob_to_value(prob, f) assert np.isclose(val, new_val) val = 1 prob = hp_module.value_to_cumulative_prob(val, f) assert prob > 0 and prob < 1 new_val = hp_module.cumulative_prob_to_value(prob, f) assert np.isclose(val, new_val)
def test_log_sampling_random_state(): f = hp_module.Float("f", 1e-3, 1e3, sampling="log") rand_sample = f.random_sample() assert rand_sample >= f.min_value assert rand_sample <= f.max_value val = 1e-3 prob = hp_module.value_to_cumulative_prob(val, f) assert prob == 0 new_val = hp_module.cumulative_prob_to_value(prob, f) assert np.isclose(val, new_val) val = 1 prob = hp_module.value_to_cumulative_prob(val, f) assert prob == 0.5 new_val = hp_module.cumulative_prob_to_value(prob, f) assert np.isclose(val, new_val) val = 1e3 prob = hp_module.value_to_cumulative_prob(val, f) assert prob == 1 new_val = hp_module.cumulative_prob_to_value(prob, f) assert np.isclose(val, new_val)
def test_sampling_arg(): f = hp_module.Float("f", 1e-20, 1e10, sampling="log") f = hp_module.Float.from_config(f.get_config()) assert f.sampling == "log" i = hp_module.Int("i", 0, 10, sampling="linear") i = hp_module.Int.from_config(i.get_config()) assert i.sampling == "linear" with pytest.raises(ValueError, match="`sampling` must be one of"): hp_module.Int("j", 0, 10, sampling="invalid") with pytest.raises( ValueError, match="`sampling` `min_value` 1 is greater than the `max_value` 0", ): hp_module.Int("k", 1, 0, sampling="linear") with pytest.raises( ValueError, match="`sampling` `min_value` 1 is greater than the `max_value` 0", ): hp_module.Int("k", 1, 0, sampling="linear")