def test_seeding(space, algo): """Verify that seeding after init have no effects""" optimizer = PrimaryAlgo(space, algo) optimizer.seed_rng(1) a = optimizer.suggest(1)[0] assert not numpy.allclose(a, optimizer.suggest(1)[0]) optimizer.seed_rng(1) assert not numpy.allclose(a, optimizer.suggest(1)[0])
def test_seeding(space): """Verify that seeding makes sampling deterministic""" bayesian_optimizer = PrimaryAlgo(space, 'bayesianoptimizer') bayesian_optimizer.seed_rng(1) a = bayesian_optimizer.suggest(1)[0] assert not numpy.allclose(a, bayesian_optimizer.suggest(1)[0]) bayesian_optimizer.seed_rng(1) assert numpy.allclose(a, bayesian_optimizer.suggest(1)[0])
def test_seeding(space): """Verify that seeding makes sampling deterministic""" optimizer = PrimaryAlgo(space, 'hyperband') optimizer.seed_rng(1) a = optimizer.suggest(1)[0] assert not numpy.allclose(a, optimizer.suggest(1)[0]) optimizer.seed_rng(1) assert numpy.allclose(a, optimizer.suggest(1)[0])
def test_seed_rng(space): """Test that algo is seeded properly""" optimizer = PrimaryAlgo(space, 'hyperband') optimizer.seed_rng(1) a = optimizer.suggest(1) # Hyperband will always return the full first rung assert numpy.allclose(a, optimizer.suggest(1)) optimizer.seed_rng(2) assert not numpy.allclose(a, optimizer.suggest(1))
def test_seeding(space): """Verify that seeding makes sampling deterministic""" tpe_optimizer = PrimaryAlgo(space, 'tpeoptimizer') tpe_optimizer.seed_rng(1) a = tpe_optimizer.suggest(1)[0] assert not numpy.allclose(a, tpe_optimizer.suggest(1)[0]) tpe_optimizer.seed_rng(1) assert numpy.allclose(a, tpe_optimizer.suggest(1)[0])
def test_seeding(space): """Verify that seeding makes sampling deterministic""" optimizer = PrimaryAlgo(space, "meshadaptivedirectsearch") optimizer.seed_rng(1) a = optimizer.suggest(1)[0] with pytest.raises(AssertionError): numpy.testing.assert_equal(a, optimizer.suggest(1)[0]) optimizer.seed_rng(1) numpy.testing.assert_equal(a, optimizer.suggest(1)[0])
def test_set_state(space): """Verify that resetting state makes sampling deterministic""" bayesian_optimizer = PrimaryAlgo(space, 'bayesianoptimizer') bayesian_optimizer.seed_rng(1) state = bayesian_optimizer.state_dict a = bayesian_optimizer.suggest(1)[0] assert not numpy.allclose(a, bayesian_optimizer.suggest(1)[0]) bayesian_optimizer.set_state(state) assert numpy.allclose(a, bayesian_optimizer.suggest(1)[0])
def test_set_state(space): """Verify that resetting state makes sampling deterministic""" optimizer = PrimaryAlgo(space, 'hyperband') optimizer.seed_rng(1) state = optimizer.state_dict a = optimizer.suggest(1)[0] assert not numpy.allclose(a, optimizer.suggest(1)[0]) optimizer.set_state(state) assert numpy.allclose(a, optimizer.suggest(1)[0])
def test_seeding_noisy_grid_search(space): """Verify that seeding have effect at init""" optimizer = PrimaryAlgo(space, {'noisygridsearch': {'seed': 1}}) a = optimizer.suggest(1)[0] assert not numpy.allclose(a, optimizer.suggest(1)[0]) optimizer = PrimaryAlgo(space, {'noisygridsearch': {'seed': 1}}) assert numpy.allclose(a, optimizer.suggest(1)[0]) optimizer = PrimaryAlgo(space, {'noisygridsearch': {'seed': 2}}) assert not numpy.allclose(a, optimizer.suggest(1)[0])
def test_set_state(space): """Test that state is reset properly""" optimizer = PrimaryAlgo(space, 'hyperband') optimizer.seed_rng(1) state = optimizer.state_dict points = optimizer.suggest(1) # Hyperband will always return the full first rung assert numpy.allclose(points, optimizer.suggest(1)) optimizer.seed_rng(2) assert not numpy.allclose(points, optimizer.suggest(1)) optimizer.set_state(state) assert numpy.allclose(points, optimizer.suggest(1))
class BayesianOptimizer: def __init__(self, space, max_trials, seed, **kwargs): self.primary = PrimaryAlgo(space, {'BayesianOptimizer': kwargs}) self.primary.algorithm.random_state = seed self.max_trials = max_trials self.trial_count = 0 @property def space(self): return self.primary.space def is_completed(self): return self.trial_count >= self.max_trials def get_params(self, seed=None): if seed is None: seed = random.randint(0, 100000) self.primary.algorithm._init_optimizer() optimizer = self.primary.algorithm.optimizer optimizer.rng.seed(seed) # Giving the same seed could be problematic since optimizer.rng and # optimizer.base_estimator.rng would be synchronized and sample the same values. optimizer.base_estimator_.random_state = optimizer.rng.randint( 0, 100000) params = unflatten( dict(zip(self.space.keys(), self.primary.suggest()[0]))) logger.debug('Sampling:\n{}'.format(pprint.pformat(params))) return params def observe(self, params, objective): params = flatten(params) params = [[params[param_name] for param_name in self.space.keys()]] results = [dict(objective=objective)] self.primary.observe(params, results)
def test_deltas_noisy_grid_search(monkeypatch, space): """Verify that deltas are applied properly""" deltas = {'yolo1': 3, 'yolo2': 1} class Dummy(): def __init__(self, seed): pass def uniform(self, a, b, size): return numpy.ones(size) monkeypatch.setattr('numpy.random.RandomState', Dummy) config = {'seed': 3, 'deltas': deltas, 'n_points': 2} optimizer = PrimaryAlgo(space, {'noisygridsearch': config}) a = optimizer.suggest(4) assert a[0][0] == -3 + deltas['yolo1'] / 2 assert a[0][1] == numpy.exp(numpy.log(1) + deltas['yolo2'] / 2) assert a[1][0] == -3 + deltas['yolo1'] / 2 assert a[1][1] == numpy.exp(numpy.log(10) + deltas['yolo2'] / 2) assert a[2][0] == 3 + deltas['yolo1'] / 2 assert a[2][1] == numpy.exp(numpy.log(1) + deltas['yolo2'] / 2) assert a[3][0] == 3 + deltas['yolo1'] / 2 assert a[3][1] == numpy.exp(numpy.log(10) + deltas['yolo2'] / 2)
class TPEOptimizer: def __init__(self, space, max_trials, seed, **kwargs): self.primary = PrimaryAlgo(space, {'TPEOptimizer': kwargs}) self.primary.algorithm.random_state = seed self.max_trials = max_trials self.trial_count = 0 @property def space(self): return self.primary.space def is_completed(self): return self.trial_count >= self.max_trials def get_params(self, seed): if seed is None: seed = random.randint(0, 100000) self.primary.algorithm.study.sampler.rng.seed(seed) self.primary.algorithm.study.sampler.random_sampler.rng.seed(seed) params = unflatten( dict(zip(self.space.keys(), self.primary.suggest()[0]))) logger.debug('Sampling:\n{}'.format(pprint.pformat(params))) return params def observe(self, params, objective): params = flatten(params) params = [[params[param_name] for param_name in self.space.keys()]] results = [dict(objective=objective)] self.primary.observe(params, results)