def __init__( self, study_name: str, storage: Union[str, storages.BaseStorage], sampler: Optional["samplers.BaseSampler"] = None, pruner: Optional[pruners.BasePruner] = None, ) -> None: self.study_name = study_name storage = storages.get_storage(storage) study_id = storage.get_study_id_from_name(study_name) super(Study, self).__init__(study_id, storage) self.sampler = sampler or samplers.TPESampler() self.pruner = pruner or pruners.MedianPruner() self._optimize_lock = threading.Lock() self._stop_flag = False
def __init__( self, study_name, # type: str storage, # type: Union[str, storages.BaseStorage] sampler=None, # type: samplers.BaseSampler pruner=None # type: pruners.BasePruner ): # type: (...) -> None self.study_name = study_name storage = storages.get_storage(storage) study_id = storage.get_study_id_from_name(study_name) super(Study, self).__init__(study_id, storage) self.sampler = sampler or samplers.TPESampler() self.pruner = pruner or pruners.MedianPruner() self._optimize_lock = threading.Lock()
def test_suggest_low_equals_high(storage_init_func): # type: (typing.Callable[[], storages.BaseStorage]) -> None study = create_study(storage_init_func(), sampler=samplers.TPESampler(n_startup_trials=0)) trial = Trial(study, study.storage.create_new_trial_id(study.study_id)) # Parameter values are determined without suggestion when low == high. with patch.object(trial, '_suggest', wraps=trial._suggest) as mock_object: assert trial.suggest_uniform('a', 1., 1.) == 1. # Suggesting a param. assert trial.suggest_uniform('a', 1., 1.) == 1. # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_loguniform('b', 1., 1.) == 1. # Suggesting a param. assert trial.suggest_loguniform('b', 1., 1.) == 1. # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_discrete_uniform('c', 1., 1., 1.) == 1. # Suggesting a param. assert trial.suggest_discrete_uniform('c', 1., 1., 1.) == 1. # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_int('d', 1, 1) == 1 # Suggesting a param. assert trial.suggest_int('d', 1, 1) == 1 # Suggesting the same param. assert mock_object.call_count == 0
def test_suggest_low_equals_high(storage_init_func): # type: (typing.Callable[[], storages.BaseStorage]) -> None study = create_study(storage_init_func(), sampler=samplers.TPESampler(n_startup_trials=0)) trial = Trial(study, study._storage.create_new_trial(study._study_id)) # Parameter values are determined without suggestion when low == high. with patch.object(trial, "_suggest", wraps=trial._suggest) as mock_object: assert trial.suggest_uniform("a", 1.0, 1.0) == 1.0 # Suggesting a param. assert trial.suggest_uniform("a", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_loguniform("b", 1.0, 1.0) == 1.0 # Suggesting a param. assert trial.suggest_loguniform( "b", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_discrete_uniform( "c", 1.0, 1.0, 1.0) == 1.0 # Suggesting a param. assert (trial.suggest_discrete_uniform("c", 1.0, 1.0, 1.0) == 1.0 ) # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_int("d", 1, 1) == 1 # Suggesting a param. assert trial.suggest_int("d", 1, 1) == 1 # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_float("e", 1.0, 1.0) == 1.0 # Suggesting a param. assert trial.suggest_float("e", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_float("f", 0.5, 0.5, log=True) == 0.5 # Suggesting a param. assert trial.suggest_float( "f", 0.5, 0.5, log=True) == 0.5 # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_float("g", 0.5, 0.5, log=False) == 0.5 # Suggesting a param. assert trial.suggest_float( "g", 0.5, 0.5, log=False) == 0.5 # Suggesting the same param. assert mock_object.call_count == 0
def __init__( self, study_name, # type: str storage, # type: Union[str, storages.BaseStorage] sampler=None, # type: samplers.BaseSampler pruner=None, # type: pruners.BasePruner force_garbage_collection=True, # type: bool ): # type: (...) -> None self.study_name = study_name storage = storages.get_storage(storage) study_id = storage.get_study_id_from_name(study_name) super(Study, self).__init__(study_id, storage) self.sampler = sampler or samplers.TPESampler() self.pruner = pruner or pruners.MedianPruner() self.logger = logging.get_logger(__name__) self._optimize_lock = threading.Lock() self.force_garbage_collection = force_garbage_collection
def _make_study(self, is_higher_better: bool) -> study_module.Study: direction = "maximize" if is_higher_better else "minimize" if self.study is None: seed = self._get_random_state() sampler = samplers.TPESampler(seed=seed) return study_module.create_study( direction=direction, sampler=sampler ) _direction = ( study_module.StudyDirection.MAXIMIZE if is_higher_better else study_module.StudyDirection.MINIMIZE ) if self.study.direction != _direction: raise ValueError( "direction of study must be '{}'.".format(direction) ) return self.study
def fit( self, X, # type: TwoDimArrayLikeType y=None, # type: Optional[Union[OneDimArrayLikeType, TwoDimArrayLikeType]] groups=None, # type: Optional[OneDimArrayLikeType] **fit_params # type: Any ): # type: (...) -> 'OptunaSearchCV' """Run fit with all sets of parameters. Args: X: Training data. y: Target variable. groups: Group labels for the samples used while splitting the dataset into train/test set. **fit_params: Parameters passed to ``fit`` on the estimator. Returns: self: Return self. """ self._check_params() random_state = check_random_state(self.random_state) max_samples = self.subsample n_samples = _num_samples(X) old_level = _logger.getEffectiveLevel() if self.verbose > 1: _logger.setLevel(DEBUG) elif self.verbose > 0: _logger.setLevel(INFO) else: _logger.setLevel(WARNING) self.sample_indices_ = np.arange(n_samples) if type(max_samples) is float: max_samples = int(max_samples * n_samples) if max_samples < n_samples: self.sample_indices_ = random_state.choice(self.sample_indices_, max_samples, replace=False) self.sample_indices_.sort() X_res = _safe_indexing(X, self.sample_indices_) y_res = _safe_indexing(y, self.sample_indices_) groups_res = _safe_indexing(groups, self.sample_indices_) fit_params_res = fit_params if fit_params_res is not None: fit_params_res = _check_fit_params(X, fit_params, self.sample_indices_) classifier = is_classifier(self.estimator) cv = check_cv(self.cv, y_res, classifier) self.n_splits_ = cv.get_n_splits(X_res, y_res, groups=groups_res) self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) if self.study is None: seed = random_state.randint(0, np.iinfo("int32").max) sampler = samplers.TPESampler(seed=seed) self.study_ = study_module.create_study(direction="maximize", sampler=sampler) else: self.study_ = self.study objective = _Objective( self.estimator, self.param_distributions, X_res, y_res, cv, self.enable_pruning, self.error_score, fit_params_res, groups_res, self.max_iter, self.return_train_score, self.scorer_, ) _logger.info("Searching the best hyperparameters using {} " "samples...".format(_num_samples(self.sample_indices_))) self.study_.optimize(objective, n_jobs=self.n_jobs, n_trials=self.n_trials, timeout=self.timeout) _logger.info("Finished hyperparemeter search!") if self.refit: self._refit(X, y, **fit_params) _logger.setLevel(old_level) return self
def test_suggest_low_equals_high(storage_mode: str) -> None: with patch.object( distributions, "_get_single_value", wraps=distributions._get_single_value ) as mock_object, StorageSupplier(storage_mode) as storage: study = create_study(storage=storage, sampler=samplers.TPESampler(n_startup_trials=0)) trial = Trial(study, study._storage.create_new_trial(study._study_id)) assert trial.suggest_uniform("a", 1.0, 1.0) == 1.0 # Suggesting a param. assert mock_object.call_count == 1 assert trial.suggest_uniform("a", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 1 assert trial.suggest_loguniform("b", 1.0, 1.0) == 1.0 # Suggesting a param. assert mock_object.call_count == 2 assert trial.suggest_loguniform( "b", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 2 assert trial.suggest_discrete_uniform( "c", 1.0, 1.0, 1.0) == 1.0 # Suggesting a param. assert mock_object.call_count == 3 assert (trial.suggest_discrete_uniform("c", 1.0, 1.0, 1.0) == 1.0 ) # Suggesting the same param. assert mock_object.call_count == 3 assert trial.suggest_int("d", 1, 1) == 1 # Suggesting a param. assert mock_object.call_count == 4 assert trial.suggest_int("d", 1, 1) == 1 # Suggesting the same param. assert mock_object.call_count == 4 assert trial.suggest_float("e", 1.0, 1.0) == 1.0 # Suggesting a param. assert mock_object.call_count == 5 assert trial.suggest_float("e", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 5 assert trial.suggest_float("f", 0.5, 0.5, log=True) == 0.5 # Suggesting a param. assert mock_object.call_count == 6 assert trial.suggest_float( "f", 0.5, 0.5, log=True) == 0.5 # Suggesting the same param. assert mock_object.call_count == 6 assert trial.suggest_float("g", 0.5, 0.5, log=False) == 0.5 # Suggesting a param. assert mock_object.call_count == 7 assert trial.suggest_float( "g", 0.5, 0.5, log=False) == 0.5 # Suggesting the same param. assert mock_object.call_count == 7 assert trial.suggest_float("h", 0.5, 0.5, step=1.0) == 0.5 # Suggesting a param. assert mock_object.call_count == 8 assert trial.suggest_float( "h", 0.5, 0.5, step=1.0) == 0.5 # Suggesting the same param. assert mock_object.call_count == 8 assert trial.suggest_int("i", 1, 1, log=True) == 1 # Suggesting a param. assert mock_object.call_count == 9 assert trial.suggest_int("i", 1, 1, log=True) == 1 # Suggesting the same param. assert mock_object.call_count == 9
def test_suggest_low_equals_high(storage_init_func): # type: (Callable[[], storages.BaseStorage]) -> None study = create_study(storage_init_func(), sampler=samplers.TPESampler(n_startup_trials=0)) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with patch.object( optuna.distributions, "_get_single_value", wraps=optuna.distributions._get_single_value) as mock_object: assert trial.suggest_uniform("a", 1.0, 1.0) == 1.0 # Suggesting a param. assert mock_object.call_count == 1 assert trial.suggest_uniform("a", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 1 assert trial.suggest_loguniform("b", 1.0, 1.0) == 1.0 # Suggesting a param. assert mock_object.call_count == 2 assert trial.suggest_loguniform( "b", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 2 assert trial.suggest_discrete_uniform( "c", 1.0, 1.0, 1.0) == 1.0 # Suggesting a param. assert mock_object.call_count == 3 assert (trial.suggest_discrete_uniform("c", 1.0, 1.0, 1.0) == 1.0 ) # Suggesting the same param. assert mock_object.call_count == 3 assert trial.suggest_int("d", 1, 1) == 1 # Suggesting a param. assert mock_object.call_count == 4 assert trial.suggest_int("d", 1, 1) == 1 # Suggesting the same param. assert mock_object.call_count == 4 assert trial.suggest_float("e", 1.0, 1.0) == 1.0 # Suggesting a param. assert mock_object.call_count == 5 assert trial.suggest_float("e", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 5 assert trial.suggest_float("f", 0.5, 0.5, log=True) == 0.5 # Suggesting a param. assert mock_object.call_count == 6 assert trial.suggest_float( "f", 0.5, 0.5, log=True) == 0.5 # Suggesting the same param. assert mock_object.call_count == 6 assert trial.suggest_float("g", 0.5, 0.5, log=False) == 0.5 # Suggesting a param. assert mock_object.call_count == 7 assert trial.suggest_float( "g", 0.5, 0.5, log=False) == 0.5 # Suggesting the same param. assert mock_object.call_count == 7 assert trial.suggest_float("h", 0.5, 0.5, step=1.0) == 0.5 # Suggesting a param. assert mock_object.call_count == 8 assert trial.suggest_float( "h", 0.5, 0.5, step=1.0) == 0.5 # Suggesting the same param. assert mock_object.call_count == 8 assert trial.suggest_int("i", 1, 1, log=True) == 1 # Suggesting a param. assert mock_object.call_count == 9 assert trial.suggest_int("i", 1, 1, log=True) == 1 # Suggesting the same param. assert mock_object.call_count == 9