class BayesianOptimizer: def __init__(self, space, max_trials, seed, **kwargs): self.primary = PrimaryAlgo(space, {'BayesianOptimizer': kwargs}) self.primary.algorithm.random_state = seed self.max_trials = max_trials self.trial_count = 0 @property def space(self): return self.primary.space def is_completed(self): return self.trial_count >= self.max_trials def get_params(self, seed=None): if seed is None: seed = random.randint(0, 100000) self.primary.algorithm._init_optimizer() optimizer = self.primary.algorithm.optimizer optimizer.rng.seed(seed) # Giving the same seed could be problematic since optimizer.rng and # optimizer.base_estimator.rng would be synchronized and sample the same values. optimizer.base_estimator_.random_state = optimizer.rng.randint( 0, 100000) params = unflatten( dict(zip(self.space.keys(), self.primary.suggest()[0]))) logger.debug('Sampling:\n{}'.format(pprint.pformat(params))) return params def observe(self, params, objective): params = flatten(params) params = [[params[param_name] for param_name in self.space.keys()]] results = [dict(objective=objective)] self.primary.observe(params, results)
class TPEOptimizer: def __init__(self, space, max_trials, seed, **kwargs): self.primary = PrimaryAlgo(space, {'TPEOptimizer': kwargs}) self.primary.algorithm.random_state = seed self.max_trials = max_trials self.trial_count = 0 @property def space(self): return self.primary.space def is_completed(self): return self.trial_count >= self.max_trials def get_params(self, seed): if seed is None: seed = random.randint(0, 100000) self.primary.algorithm.study.sampler.rng.seed(seed) self.primary.algorithm.study.sampler.random_sampler.rng.seed(seed) params = unflatten( dict(zip(self.space.keys(), self.primary.suggest()[0]))) logger.debug('Sampling:\n{}'.format(pprint.pformat(params))) return params def observe(self, params, objective): params = flatten(params) params = [[params[param_name] for param_name in self.space.keys()]] results = [dict(objective=objective)] self.primary.observe(params, results)