Beispiel #1
0
def _objective(trial: optuna.trial.Trial) -> float:

    p0 = trial.suggest_uniform("p0", -3.3, 5.2)
    p1 = trial.suggest_uniform("p1", 2.0, 2.0)
    p2 = trial.suggest_loguniform("p2", 0.0001, 0.3)
    p3 = trial.suggest_loguniform("p3", 1.1, 1.1)
    p4 = trial.suggest_int("p4", -100, 8)
    p5 = trial.suggest_int("p5", -20, -20)
    p6 = trial.suggest_discrete_uniform("p6", 10, 20, 2)
    p7 = trial.suggest_discrete_uniform("p7", 0.1, 1.0, 0.1)
    p8 = trial.suggest_discrete_uniform("p8", 2.2, 2.2, 0.5)
    p9 = trial.suggest_categorical("p9", ["9", "3", "0", "8"])
    assert isinstance(p9, str)

    return p0 + p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8 + int(p9)
Beispiel #2
0
    def modelExtraTreesClassifier(self, trial: optuna.trial.Trial):
        opt_params = dict(n_estimators=trial.suggest_int("n_estimators",
                                                         2,
                                                         2**10,
                                                         log=True),
                          learning_rate=trial.suggest_discrete_uniform(
                              'learning_rate', 0.001, 1, 0.001),
                          max_depth=trial.suggest_int("max_depth", 2, 2**4),
                          criterion=trial.suggest_categorical(
                              "criterion", ["gini", "entropy"]))
        clf = ExtraTreesClassifier(n_estimators=100,
                                   criterion="gini",
                                   max_depth=None,
                                   min_samples_split=2,
                                   min_samples_leaf=1,
                                   min_weight_fraction_leaf=0.,
                                   max_features="auto",
                                   max_leaf_nodes=None,
                                   min_impurity_decrease=0.,
                                   min_impurity_split=None,
                                   bootstrap=False,
                                   oob_score=False,
                                   n_jobs=None,
                                   random_state=None,
                                   verbose=0,
                                   warm_start=False,
                                   class_weight=None,
                                   ccp_alpha=0.0,
                                   max_samples=None)

        clf.set_params(**{**opt_params, **self.params})
        return clf
Beispiel #3
0
    def objective2(trial: optuna.trial.Trial) -> float:

        p1 = trial.suggest_loguniform("p1", 50,
                                      100)  # The range has been changed
        p3 = trial.suggest_discrete_uniform("p3", 0, 9, 3)
        p5 = trial.suggest_uniform("p5", 0, 1)

        return p1 + p3 + p5
Beispiel #4
0
    def objective0(trial: optuna.trial.Trial) -> float:

        p0 = trial.suggest_uniform("p0", 0, 10)
        p1 = trial.suggest_loguniform("p1", 1, 10)
        p2 = trial.suggest_int("p2", 0, 10)
        p3 = trial.suggest_discrete_uniform("p3", 0, 9, 3)
        p4 = trial.suggest_categorical("p4", ["10", "20", "30"])
        assert isinstance(p4, str)
        return p0 + p1 + p2 + p3 + int(p4)
Beispiel #5
0
    def objective1(trial: optuna.trial.Trial) -> float:

        # p0, p2 and p4 are deleted.
        p1 = trial.suggest_loguniform("p1", 1, 10)
        p3 = trial.suggest_discrete_uniform("p3", 0, 9, 3)

        # p5 is added.
        p5 = trial.suggest_uniform("p5", 0, 1)

        return p1 + p3 + p5
Beispiel #6
0
 def objective(trial: optuna.trial.Trial) -> Tuple[float, float]:
     p0 = trial.suggest_float("p0", -10, 10)
     p1 = trial.suggest_uniform("p1", 3, 5)
     p2 = trial.suggest_loguniform("p2", 0.00001, 0.1)
     p3 = trial.suggest_discrete_uniform("p3", 100, 200, q=5)
     p4 = trial.suggest_int("p4", -20, -15)
     p5 = cast(int, trial.suggest_categorical("p5", [7, 1, 100]))
     p6 = trial.suggest_float("p6", -10, 10, step=1.0)
     p7 = trial.suggest_int("p7", 1, 7, log=True)
     return (
         p0 + p1 + p2,
         p3 + p4 + p5 + p6 + p7,
     )
Beispiel #7
0
    def modelAdaBoostClassifier(self, trial: optuna.trial.Trial):
        opt_params = dict(
            n_estimators=trial.suggest_int("n_estimators", 2, 2**10, log=True),
            learning_rate=trial.suggest_discrete_uniform(
                'learning_rate', 0.001, 1, 0.001),
        )
        clf = AdaBoostClassifier(base_estimator=None,
                                 n_estimators=50,
                                 learning_rate=1.,
                                 algorithm='SAMME.R',
                                 random_state=None)

        clf.set_params(**{**opt_params, **self.params})
        return clf
Beispiel #8
0
 def modelCatBoostClassifier(self, trial: optuna.trial.Trial):
     opt_params = dict(
         num_leaves=trial.suggest_int("num_leaves", 2, 2**8),
         learning_rate=trial.suggest_discrete_uniform(
             'learning_rate', 0.001, 1, 0.001),
         n_estimators=trial.suggest_int("n_estimators", 2, 2**10, log=True),
         min_child_samples=trial.suggest_int('min_child_samples', 2, 2**8),
         min_child_weight=trial.suggest_loguniform('min_child_weight', 1e-8,
                                                   1),
         min_split_gain=trial.suggest_loguniform('min_split_gain', 1e-8, 1),
         subsample=trial.suggest_uniform('subsample', 0.4, 1),
         subsample_freq=trial.suggest_int("subsample_freq", 0, 2**4),
         colsample_bytree=trial.suggest_uniform('colsample_bytree', 0.4, 1),
         reg_alpha=trial.suggest_loguniform('reg_alpha', 1e-8, 10),
         reg_lambda=trial.suggest_loguniform('reg_lambda', 1e-8, 10),
     )
     clf = CatBoostClassifier()
     clf.set_params(**{**opt_params, **self.params})
     return clf
Beispiel #9
0
 def modelBaggingClassifier(self, trial: optuna.trial.Trial):
     opt_params = dict(
         n_estimators=trial.suggest_int("n_estimators", 2, 2**10, log=True),
         max_samples=trial.suggest_uniform('max_samples', 0.1, 1),
         learning_rate=trial.suggest_discrete_uniform(
             'learning_rate', 0.001, 1, 0.001),
     )
     clf = BaggingClassifier(base_estimator=None,
                             n_estimators=10,
                             max_samples=1.0,
                             max_features=1.0,
                             bootstrap=True,
                             bootstrap_features=False,
                             oob_score=False,
                             warm_start=False,
                             n_jobs=-1,
                             random_state=None,
                             verbose=0)
     clf.set_params(**{**opt_params, **self.params})
     return clf
Beispiel #10
0
 def modelLGBMClassifier(self, trial: optuna.trial.Trial):
     opt_params = dict(
         num_leaves=trial.suggest_int("num_leaves", 2, 2**8),
         learning_rate=trial.suggest_discrete_uniform(
             'learning_rate', 0.001, 1, 0.001),
         n_estimators=trial.suggest_int("n_estimators", 2, 2**10, log=True),
         min_child_samples=trial.suggest_int('min_child_samples', 2, 2**8),
         min_child_weight=trial.suggest_loguniform('min_child_weight', 1e-8,
                                                   1),
         min_split_gain=trial.suggest_loguniform('min_split_gain', 1e-8, 1),
         subsample=trial.suggest_uniform('subsample', 0.4, 1),
         subsample_freq=trial.suggest_int("subsample_freq", 0, 2**4),
         colsample_bytree=trial.suggest_uniform('colsample_bytree', 0.4, 1),
         reg_alpha=trial.suggest_loguniform('reg_alpha', 1e-8, 10),
         reg_lambda=trial.suggest_loguniform('reg_lambda', 1e-8, 10),
     )
     clf = LGBMClassifier(boosting_type='gbdt',
                          num_leaves=31,
                          max_depth=-1,
                          learning_rate=0.1,
                          n_estimators=100,
                          subsample_for_bin=200000,
                          objective=None,
                          class_weight=None,
                          min_split_gain=0.,
                          min_child_weight=1e-3,
                          min_child_samples=20,
                          subsample=1.,
                          subsample_freq=0,
                          colsample_bytree=1.,
                          reg_alpha=0.,
                          reg_lambda=0.,
                          random_state=None,
                          n_jobs=-1,
                          silent=True,
                          importance_type='split')
     clf.set_params(**{**opt_params, **self.params})
     return clf
Beispiel #11
0
 def modelXGBClassifier(self, trial: optuna.trial.Trial):
     opt_params = dict(
         max_depth=trial.suggest_int("max_depth", 2, 2**4),
         learning_rate=trial.suggest_discrete_uniform(
             'learning_rate', 0.001, 1, 0.001),
         n_estimators=trial.suggest_int("n_estimators", 2, 2**10, log=True),
         gamma=trial.suggest_loguniform('gamma', 1e-8, 1),
         min_child_weight=trial.suggest_loguniform('min_child_weight', 1e-8,
                                                   2**10),
         subsample=trial.suggest_uniform('subsample', 0.1, 1),
         colsample_bytree=trial.suggest_uniform('colsample_bytree', 0.1, 1),
         colsample_bylevel=trial.suggest_uniform('colsample_bylevel', 0.1,
                                                 1),
         reg_alpha=trial.suggest_loguniform('reg_alpha', 1e-8, 10),
         reg_lambda=trial.suggest_loguniform('reg_lambda', 1e-8, 10),
     )
     clf = XGBClassifier(max_depth=3,
                         learning_rate=0.1,
                         n_estimators=100,
                         silent=True,
                         objective="binary:logistic",
                         booster='gbtree',
                         n_jobs=1,
                         gamma=0,
                         min_child_weight=1,
                         max_delta_step=0,
                         subsample=1,
                         colsample_bytree=1,
                         colsample_bylevel=1,
                         reg_alpha=0,
                         reg_lambda=1,
                         scale_pos_weight=1,
                         base_score=0.5,
                         random_state=0,
                         missing=None)
     clf.set_params(**{**opt_params, **self.params})
     return clf
Beispiel #12
0
    def _objective(self, trial: optuna.trial.Trial):
        cv = trial.suggest_int('cv', 2, 2**4)

        opt_params = dict(
            objective=trial.suggest_categorical("objective",
                                                ["Logloss", "CrossEntropy"]),
            boosting_type=trial.suggest_categorical("boosting_type",
                                                    ["Ordered", "Plain"]),
            bootstrap_type=trial.suggest_categorical(
                "bootstrap_type", ["Bayesian", "Bernoulli", "MVS"]),
            # used_ram_limit="3gb",
            max_depth=trial.suggest_int("max_depth", 2, 2**4),
            learning_rate=trial.suggest_discrete_uniform(
                'learning_rate', 0.001, 1, 0.001),
            # n_estimators=trial.suggest_int("n_estimators", 2, 2 ** 10, log=True),
            colsample_bylevel=trial.suggest_float("colsample_bylevel", 0.01,
                                                  0.1),
            reg_lambda=trial.suggest_float("reg_lambda", 1e-8, 100, log=True))

        if opt_params["bootstrap_type"] == "Bayesian":
            opt_params["bagging_temperature"] = trial.suggest_float(
                "bagging_temperature", 0, 10)
        elif opt_params["bootstrap_type"] == "Bernoulli":
            opt_params["subsample"] = trial.suggest_float("subsample", 0.1, 1)

        if self.params is not None:
            opt_params.update(self.params)

        clf_oof = CatBoostClassifierOOF(self.X,
                                        self.y,
                                        params=opt_params,
                                        cv=cv,
                                        feval=self.feval)
        clf_oof.run()

        return clf_oof.oof_score  # todo: f1
Beispiel #13
0
 def _objective(self, trial: optuna.trial.Trial):
     threshold = trial.suggest_discrete_uniform('threshold', 0.001, 1,
                                                0.001)
     y_pred_ = np.where(self.y_pred > threshold, 1, 0)
     score = f1_score(self.y_true, y_pred_)
     return score