Beispiel #1
0
    def _preprocess(self, trial: optuna.trial.Trial) -> None:
        if self.pbar is not None:
            self.pbar.set_description(self.pbar_fmt.format(self.step_name, self.best_score))

        if "lambda_l1" in self.target_param_names:
            self.lgbm_params["lambda_l1"] = trial.suggest_float("lambda_l1", 1e-8, 10.0, log=True)
        if "lambda_l2" in self.target_param_names:
            self.lgbm_params["lambda_l2"] = trial.suggest_float("lambda_l2", 1e-8, 10.0, log=True)
        if "num_leaves" in self.target_param_names:
            tree_depth = self.lgbm_params.get("max_depth", _DEFAULT_TUNER_TREE_DEPTH)
            max_num_leaves = 2 ** tree_depth if tree_depth > 0 else 2 ** _DEFAULT_TUNER_TREE_DEPTH
            self.lgbm_params["num_leaves"] = trial.suggest_int("num_leaves", 2, max_num_leaves)
        if "feature_fraction" in self.target_param_names:
            # `GridSampler` is used for sampling feature_fraction value.
            # The value 1.0 for the hyperparameter is always sampled.
            param_value = min(trial.suggest_float("feature_fraction", 0.4, 1.0 + _EPS), 1.0)
            self.lgbm_params["feature_fraction"] = param_value
        if "bagging_fraction" in self.target_param_names:
            # `TPESampler` is used for sampling bagging_fraction value.
            # The value 1.0 for the hyperparameter might by sampled.
            param_value = min(trial.suggest_float("bagging_fraction", 0.4, 1.0 + _EPS), 1.0)
            self.lgbm_params["bagging_fraction"] = param_value
        if "bagging_freq" in self.target_param_names:
            self.lgbm_params["bagging_freq"] = trial.suggest_int("bagging_freq", 1, 7)
        if "min_child_samples" in self.target_param_names:
            # `GridSampler` is used for sampling min_child_samples value.
            # The value 1.0 for the hyperparameter is always sampled.
            param_value = trial.suggest_int("min_child_samples", 5, 100)
            self.lgbm_params["min_child_samples"] = param_value
Beispiel #2
0
    def __call__(self, trial: optuna.trial.Trial) -> float:
        data = TreeDataModule(
            self._filename,
            batch_size=trial.suggest_int("batch_size", 32, 160, 32),
        )
        kwargs = {
            "lstm_size":
            trial.suggest_categorical("lstm_size", [512, 1024, 2048]),
            "dropout_prob":
            trial.suggest_float("dropout", 0.1, 0.5, step=0.1),
            "learning_rate":
            trial.suggest_float("lr", 1e-3, 1e-1, log=True),
            "weight_decay":
            trial.suggest_float("weight_decay", 1e-3, 1e-1, log=True),
        }
        model = RouteDistanceModel(**kwargs)

        gpus = int(torch.cuda.is_available())
        pruning_callback = PyTorchLightningPruningCallback(
            trial, monitor="val_monitor")
        trainer = Trainer(
            gpus=gpus,
            logger=True,  # become a tensorboard logger
            checkpoint_callback=False,
            callbacks=[pruning_callback],  # type: ignore
            max_epochs=EPOCHS,
        )
        trainer.fit(model, datamodule=data)
        return trainer.callback_metrics["val_monitor"].item()
Beispiel #3
0
    def objective2(trial: optuna.trial.Trial) -> float:

        p1 = trial.suggest_float("p1", 50, 100,
                                 log=True)  # The range has been changed
        p3 = trial.suggest_float("p3", 0, 9, step=3)
        p5 = trial.suggest_float("p5", 0, 1)

        return p1 + p3 + p5
Beispiel #4
0
def _objective_func(trial: optuna.trial.Trial) -> float:

    x = trial.suggest_float("x", -1.0, 1.0)
    y = trial.suggest_float("y", 20, 30, log=True)
    z = trial.suggest_categorical("z", (-1.0, 1.0))
    assert isinstance(z, float)
    trial.set_user_attr("my_user_attr", "my_user_attr_value")
    return (x - 2)**2 + (y - 25)**2 + z
Beispiel #5
0
def _multiobjective_func(trial: optuna.trial.Trial) -> Tuple[float, float]:

    x = trial.suggest_float("x", low=-10, high=10)
    y = trial.suggest_float("y", low=1, high=10, log=True)
    first_objective = (x - 2)**2 + (y - 25)**2
    second_objective = (x - 2)**3 + (y - 25)**3

    return first_objective, second_objective
Beispiel #6
0
    def objective0(trial: optuna.trial.Trial) -> float:

        p0 = trial.suggest_float("p0", 0, 10)
        p1 = trial.suggest_float("p1", 1, 10, log=True)
        p2 = trial.suggest_int("p2", 0, 10)
        p3 = trial.suggest_float("p3", 0, 9, step=3)
        p4 = trial.suggest_categorical("p4", ["10", "20", "30"])
        assert isinstance(p4, str)
        return p0 + p1 + p2 + p3 + int(p4)
Beispiel #7
0
def objective_test_upgrade_distributions(trial: optuna.trial.Trial) -> float:
    x1 = trial.suggest_float("x1", -5, 5)
    x2 = trial.suggest_float("x2", 1e-5, 1e-3, log=True)
    x3 = trial.suggest_float("x3", -6, 6, step=2)
    y1 = trial.suggest_int("y1", 0, 10)
    y2 = trial.suggest_int("y2", 1, 20, log=True)
    y3 = trial.suggest_int("y3", 5, 15, step=3)
    z = cast(float, trial.suggest_categorical("z", [-5, 0, 5]))
    return x1**2 + x2**2 + x3**2 + y1**2 + y2**2 + y3**2 + z**2
Beispiel #8
0
def _objective_func_long_user_attr(trial: optuna.trial.Trial) -> float:

    x = trial.suggest_float("x", -1.0, 1.0)
    y = trial.suggest_float("y", 20, 30, log=True)
    z = trial.suggest_categorical("z", (-1.0, 1.0))
    assert isinstance(z, float)
    long_str = str(list(range(5000)))
    trial.set_user_attr("my_user_attr", long_str)
    return (x - 2)**2 + (y - 25)**2 + z
Beispiel #9
0
    def objective1(trial: optuna.trial.Trial) -> float:

        # p0, p2 and p4 are deleted.
        p1 = trial.suggest_float("p1", 1, 10, log=True)
        p3 = trial.suggest_float("p3", 0, 9, step=3)

        # p5 is added.
        p5 = trial.suggest_float("p5", 0, 1)

        return p1 + p3 + p5
Beispiel #10
0
def _multiobjective_func(trial: optuna.trial.Trial) -> Tuple[float, float]:

    x = trial.suggest_float("x", low=-10, high=10)
    y = trial.suggest_float("y", low=1, high=10, log=True)
    z = trial.suggest_categorical("z", (-1.0, 1.0))
    assert isinstance(z, float)
    first_objective = (x - 2)**2 + (y - 25)**2 + z
    second_objective = (x - 2)**3 + (y - 25)**3 - z

    return first_objective, second_objective
Beispiel #11
0
def _objective_func(trial: optuna.trial.Trial) -> float:
    u = trial.suggest_int("u", 0, 10, step=2)
    v = trial.suggest_int("v", 1, 10, log=True)
    w = trial.suggest_float("w", -1.0, 1.0, step=0.1)
    x = trial.suggest_float("x", -1.0, 1.0)
    y = trial.suggest_float("y", 20.0, 30.0, log=True)
    z = trial.suggest_categorical("z", (-1.0, 1.0))
    assert isinstance(z, float)
    trial.set_user_attr("my_user_attr", "my_user_attr_value")
    return u + v + w + (x - 2) ** 2 + (y - 25) ** 2 + z
Beispiel #12
0
 def objective(trial: optuna.trial.Trial) -> Tuple[float, float]:
     p0 = trial.suggest_float("p0", -10, 10)
     p1 = trial.suggest_uniform("p1", 3, 5)
     p2 = trial.suggest_loguniform("p2", 0.00001, 0.1)
     p3 = trial.suggest_discrete_uniform("p3", 100, 200, q=5)
     p4 = trial.suggest_int("p4", -20, -15)
     p5 = cast(int, trial.suggest_categorical("p5", [7, 1, 100]))
     p6 = trial.suggest_float("p6", -10, 10, step=1.0)
     p7 = trial.suggest_int("p7", 1, 7, log=True)
     return (
         p0 + p1 + p2,
         p3 + p4 + p5 + p6 + p7,
     )
Beispiel #13
0
def _convert(trial: optuna.trial.Trial,
             template: TuningParametersTemplate) -> Dict[str, Any]:
    result: Dict[str, Any] = {}
    for k, v in template.params_dict.items():
        if isinstance(v, RandInt):
            if v.log and v.q is not None:
                value = trial.suggest_float(name=k, low=0, high=1.0)
                result[k] = uniform_to_integers(
                    value,
                    low=v.low,
                    high=v.high,
                    q=v.q,  # type: ignore
                    log=True,
                    include_high=v.include_high,
                )
            else:
                _high: Any = v.high if v.include_high else v.high - 1
                result[k] = trial.suggest_int(name=k,
                                              low=v.low,
                                              high=_high,
                                              step=v.q,
                                              log=v.log)
        elif isinstance(v, Rand):
            if v.log and v.q is not None:
                value = trial.suggest_float(name=k, low=0, high=1.0)
                result[k] = uniform_to_discrete(
                    value,
                    low=v.low,
                    high=v.high,
                    q=v.q,
                    log=True,
                    include_high=v.include_high,
                )
            else:
                _high = v.high
                if v.q is not None and not v.include_high:
                    _high -= _IGNORABLE_ERROR
                result[k] = trial.suggest_float(name=k,
                                                low=v.low,
                                                high=_high,
                                                step=v.q,
                                                log=v.log)
        elif isinstance(v, TransitionChoice):
            result[k] = v.values[trial.suggest_int(name=k,
                                                   low=0,
                                                   high=len(v.values) - 1)]
        elif isinstance(v, Choice):
            result[k] = trial.suggest_categorical(name=k, choices=v.values)
        else:  # pragma: no cover
            raise NotImplementedError
    return result
Beispiel #14
0
    def _objective(self, trial: optuna.trial.Trial):
        cv = trial.suggest_int('cv', 2, 2**4)
        opt_params = dict(
            max_depth=trial.suggest_int("max_depth", 2, 2**4),
            learning_rate=trial.suggest_float('learning_rate',
                                              0.001,
                                              1,
                                              step=0.001),
            # n_estimators=trial.suggest_int("n_estimators", 2, 2 ** 10, log=True),
            gamma=trial.suggest_float('gamma', 1e-8, 1, log=True),
            min_child_weight=trial.suggest_float('min_child_weight',
                                                 1e-8,
                                                 2**10,
                                                 log=True),
            subsample=trial.suggest_float('subsample', 0.1, 1),
            colsample_bytree=trial.suggest_float('colsample_bytree', 0.1, 1),
            colsample_bylevel=trial.suggest_float('colsample_bylevel', 0.1, 1),
            reg_alpha=trial.suggest_float('reg_alpha', 1e-8, 10, log=True),
            reg_lambda=trial.suggest_float('reg_lambda', 1e-8, 10, log=True),
        )

        if self.params is not None:
            opt_params.update(self.params)

        clf_oof = XGBClassifierOOF(self.X,
                                   self.y,
                                   params=opt_params,
                                   cv=cv,
                                   feval=self.feval)
        clf_oof.run()

        return clf_oof.oof_score  # todo: f1
    def objective(trial: optuna.trial.Trial) -> Tuple[float, float]:
        x = trial.suggest_float("x", 0, 5)

        if trial._trial_id == n:
            assert n in sampler._split_cache
            assert n in sampler._weights_below
        else:
            assert n not in sampler._split_cache
            assert n not in sampler._weights_below

        y = trial.suggest_float("y", 0, 3)
        v0 = 4 * x ** 2 + 4 * y ** 2
        v1 = (x - 5) ** 2 + (y - 5) ** 2
        return v0, v1
Beispiel #16
0
def objective(trial: optuna.trial.Trial) -> float:

    # We optimize the number of layers, hidden units in each layer and dropouts.
    n_layers = trial.suggest_int("n_layers", 1, 3)
    dropout = trial.suggest_float("dropout", 0.2, 0.5)
    output_dims = [
        trial.suggest_int("n_units_l{}".format(i), 4, 128, log=True)
        for i in range(n_layers)
    ]

    model = LightningNet(dropout, output_dims)
    datamodule = MNISTDataModule(data_dir=DIR, batch_size=BATCHSIZE)

    trainer = pl.Trainer(
        logger=True,
        limit_val_batches=PERCENT_VALID_EXAMPLES,
        checkpoint_callback=False,
        max_epochs=EPOCHS,
        gpus=-1 if torch.cuda.is_available() else None,
        callbacks=[PyTorchLightningPruningCallback(trial, monitor="val_acc")],
    )
    hyperparameters = dict(n_layers=n_layers,
                           dropout=dropout,
                           output_dims=output_dims)
    trainer.logger.log_hyperparams(hyperparameters)
    trainer.fit(model, datamodule=datamodule)

    return trainer.callback_metrics["val_acc"].item()
Beispiel #17
0
    def _suggest(self, trial: optuna.trial.Trial) -> Suggestion:

        suggestions: Suggestion = dict()
        for name, config in self.api_config.items():
            low, high = config["range"]
            log = config["space"] == "log"

            if config["space"] == "logit":
                assert 0 < low <= high < 1
                low = np.log(low / (1 - low))
                high = np.log(high / (1 - high))

            if config["type"] == "real":
                param = trial.suggest_float(name, low, high, log=log)

            elif config["type"] == "int":
                param = trial.suggest_int(name, low, high, log=log)

            else:
                # TODO(xadrianzetx) Support `suggest_categorical` if benchmark is extended.
                raise RuntimeError("CategoricalDistribution is not supported in bayesmark.")

            suggestions[name] = param if config["space"] != "logit" else 1 / (1 + np.exp(-param))

        return suggestions
Beispiel #18
0
def tcn(config: BaseConfig, trial: optuna.trial.Trial) -> pl.LightningModule:
    """Returns a tunable PyTorch lightning tcn module.

    Args:
        config (BaseConfig): the hard-coded configuration.
        trial (optuna.Trial): optuna trial.

    Returns:
        pl.LightningModule: a lightning module.
    """

    training_config = get_training_config(
        lr=trial.suggest_loguniform('lr', 1e-3, 1e-0),
        weight_decay=trial.suggest_loguniform('weight_decay', 1e-5, 1e-1),
        max_epochs=config.MAX_EPOCHS)

    tcn = TemporalConvNet(training_config=training_config,
                          num_inputs=config.NUM_INPUTS,
                          num_outputs=config.NUM_OUTPUTS,
                          num_hidden=trial.suggest_int('num_hidden', 1, 4),
                          kernel_size=trial.suggest_int('kernel_size', 2, 4),
                          num_layers=trial.suggest_int('num_layers', 1, 2),
                          dropout=trial.suggest_float('dropout', 0.1, 0.3))

    return tcn
Beispiel #19
0
def mo_objective_test_upgrade(trial: optuna.trial.Trial) -> Tuple[float, float]:
    x = trial.suggest_float("x", -5, 5)
    y = trial.suggest_int("y", 0, 10)
    z = cast(float, trial.suggest_categorical("z", [-5, 0, 5]))
    trial.set_system_attr("a", 0)
    trial.set_user_attr("b", 1)
    return x, x ** 2 + y ** 2 + z ** 2
Beispiel #20
0
        def objective(trial: optuna.trial.Trial, value: float) -> float:

            trial.set_system_attr(
                optuna.integration._lightgbm_tuner.optimize._STEP_NAME_KEY,
                "step{:.0f}".format(value),
            )
            return trial.suggest_float("x", value, value)
Beispiel #21
0
    def objective(trial: optuna.trial.Trial) -> float:
        for i in range(N_REPORTS):
            trial.report(i, step=i)

        x = trial.suggest_float("x", -100, 100)
        y = trial.suggest_int("y", -100, 100)
        return x ** 2 + y ** 2
Beispiel #22
0
def feedforward(config: BaseConfig,
                trial: optuna.trial.Trial) -> pl.LightningModule:
    """Returns a tunable PyTorch lightning feedforward module.

    Args:
        config (BaseConfig): the hard-coded configuration.
        trial (optuna.Trial): optuna trial.

    Returns:
        pl.LightningModule: a lightning module.
    """

    model = FeedForward(num_inputs=config.NUM_INPUTS,
                        num_outputs=config.NUM_OUTPUTS,
                        num_hidden=trial.suggest_int('num_hidden', 1, 4),
                        num_layers=trial.suggest_int('num_layers', 1, 2),
                        dropout=trial.suggest_float('dropout', 0.0, 0.5),
                        activation=trial.suggest_categorical(
                            'activation', ['relu', 'none']))

    training_config = get_training_config(
        lr=trial.suggest_loguniform('lr', 1e-3, 1e-0),
        weight_decay=trial.suggest_loguniform('weight_decay', 1e-5, 1e-1),
        max_epochs=config.MAX_EPOCHS)

    pl_model = TemporalConvNet(training_config=training_config,
                               lr=trial.suggest_loguniform('lr', 1e-3, 1e-0),
                               weight_decay=trial.suggest_loguniform(
                                   'weight_decay', 1e-5, 1e-1),
                               max_epochs=config.MAX_EPOCHS)

    return pl_model
Beispiel #23
0
def objective(trial: optuna.trial.Trial) -> float:
    num_units = trial.suggest_int("NUM_UNITS", 16, 32)
    dropout_rate = trial.suggest_float("DROPOUT_RATE", 0.1, 0.2)
    optimizer = trial.suggest_categorical("OPTIMIZER", ["sgd", "adam"])

    accuracy = train_test_model(num_units, dropout_rate, optimizer)  # type: ignore
    return accuracy
Beispiel #24
0
def objective_test_upgrade(trial: optuna.trial.Trial) -> float:
    x = trial.suggest_float("x", -5, 5)  # optuna==0.9.0 does not have suggest_float.
    y = trial.suggest_int("y", 0, 10)
    z = cast(float, trial.suggest_categorical("z", [-5, 0, 5]))
    trial.set_system_attr("a", 0)
    trial.set_user_attr("b", 1)
    trial.report(0.5, step=0)
    return x ** 2 + y ** 2 + z ** 2
Beispiel #25
0
def _objective_func(trial: optuna.trial.Trial) -> float:
    x = trial.suggest_uniform("x", -1.0, 1.0)
    y = trial.suggest_loguniform("y", 20.0, 30.0)
    z = trial.suggest_categorical("z", (-1.0, 1.0))
    w = trial.suggest_float("w", -1.0, 1.0, step=0.1)
    assert isinstance(z, float)
    trial.set_user_attr("my_user_attr", "my_user_attr_value")
    return (x - 2)**2 + (y - 25)**2 + z + w
Beispiel #26
0
    def __call__(self, trial: optuna.trial.Trial):

        classifier_name = trial.suggest_categorical("classifier", ["SVC", "RandomForest"])
        if classifier_name == "SVC":
            svc_c = trial.suggest_float("svc_c", 1e-10, 1e10, log=True)
            classifier_obj = sklearn.svm.SVC(C=svc_c, gamma="auto")
        else:
            rf_max_depth = trial.suggest_int("rf_max_depth", 2, 32, log=True)
            classifier_obj = ensemble.RandomForestClassifier(
                max_depth=rf_max_depth, n_estimators=10
            )

        score = cross_val_score(classifier_obj, self.X, self.y, n_jobs=1, cv=self.cv)
        accuracy = score.mean()
        return accuracy
Beispiel #27
0
    def _objective(self, trial: optuna.trial.Trial):
        cv = trial.suggest_int('cv', 2, 2**4)

        opt_params = dict(
            objective=trial.suggest_categorical("objective",
                                                ["Logloss", "CrossEntropy"]),
            boosting_type=trial.suggest_categorical("boosting_type",
                                                    ["Ordered", "Plain"]),
            bootstrap_type=trial.suggest_categorical(
                "bootstrap_type", ["Bayesian", "Bernoulli", "MVS"]),
            # used_ram_limit="3gb",
            max_depth=trial.suggest_int("max_depth", 2, 2**4),
            learning_rate=trial.suggest_discrete_uniform(
                'learning_rate', 0.001, 1, 0.001),
            # n_estimators=trial.suggest_int("n_estimators", 2, 2 ** 10, log=True),
            colsample_bylevel=trial.suggest_float("colsample_bylevel", 0.01,
                                                  0.1),
            reg_lambda=trial.suggest_float("reg_lambda", 1e-8, 100, log=True))

        if opt_params["bootstrap_type"] == "Bayesian":
            opt_params["bagging_temperature"] = trial.suggest_float(
                "bagging_temperature", 0, 10)
        elif opt_params["bootstrap_type"] == "Bernoulli":
            opt_params["subsample"] = trial.suggest_float("subsample", 0.1, 1)

        if self.params is not None:
            opt_params.update(self.params)

        clf_oof = CatBoostClassifierOOF(self.X,
                                        self.y,
                                        params=opt_params,
                                        cv=cv,
                                        feval=self.feval)
        clf_oof.run()

        return clf_oof.oof_score  # todo: f1
Beispiel #28
0
    def _objective(self, trial: optuna.trial.Trial):
        cv = trial.suggest_int('cv', 2, 2**4)
        opt_params = dict(
            num_leaves=trial.suggest_int("num_leaves", 2, 2**8),
            learning_rate=trial.suggest_float('learning_rate',
                                              0.001,
                                              1,
                                              step=0.001),
            min_child_samples=trial.suggest_int('min_child_samples', 2, 2**8),
            min_child_weight=trial.suggest_float('min_child_weight',
                                                 1e-8,
                                                 1,
                                                 log=True),
            min_split_gain=trial.suggest_float('min_split_gain',
                                               1e-8,
                                               1,
                                               log=True),
            bagging_fraction=trial.suggest_float('bagging_fraction', 0.4, 1),
            bagging_freq=trial.suggest_int("bagging_freq", 0, 2**4),
            feature_fraction=trial.suggest_float('feature_fraction', 0.4, 1),
            lambda_l1=trial.suggest_float('lambda_l1', 1e-8, 10, log=True),
            lambda_l2=trial.suggest_float('lambda_l2', 1e-8, 10, log=True),
        )

        if self.params is not None:
            opt_params.update(self.params)

        cv_result = lgb.cv(opt_params,
                           self.dtrain,
                           num_boost_round=10000,
                           nfold=cv,
                           stratified='reg' not in opt_params.get(
                               'application',
                               opt_params.get('objective', 'reg')),
                           feval=None,
                           early_stopping_rounds=100,
                           verbose_eval=100,
                           show_stdv=False,
                           seed=0,
                           eval_train_metric=False)

        score = -1
        self.best_num_boost_round = 0
        for key in cv_result:
            if 'mean' in key:
                _ = cv_result[key]
                score = _[-1]
                self.best_num_boost_round = len(_)

        print(
            f'CV Score: {score if score != -1 else "cv_result donot contain mean-metric"}'
        )

        return score
Beispiel #29
0
def define_model(trial: optuna.trial.Trial) -> nn.Sequential:
    n_layers = trial.suggest_int("n_layers", 1, 3)
    dropout = trial.suggest_float("dropout", 0.2, 0.5)
    input_dim = 28 * 28
    layers = [nn.Flatten()]
    for i in range(n_layers):
        output_dim = trial.suggest_int("n_units_l{}".format(i),
                                       4,
                                       128,
                                       log=True)
        layers.append(nn.Linear(input_dim, output_dim))
        layers.append(nn.ReLU())
        layers.append(nn.Dropout(dropout))

        input_dim = output_dim
    layers.append(nn.Linear(input_dim, CLASSES))

    return nn.Sequential(*layers)
Beispiel #30
0
def _objective(trial: optuna.trial.Trial) -> float:

    p0 = trial.suggest_float("p0", -3.3, 5.2)
    p1 = trial.suggest_float("p1", 2.0, 2.0)
    p2 = trial.suggest_float("p2", 0.0001, 0.3, log=True)
    p3 = trial.suggest_float("p3", 1.1, 1.1, log=True)
    p4 = trial.suggest_int("p4", -100, 8)
    p5 = trial.suggest_int("p5", -20, -20)
    p6 = trial.suggest_float("p6", 10, 20, step=2)
    p7 = trial.suggest_float("p7", 0.1, 1.0, step=0.1)
    p8 = trial.suggest_float("p8", 2.2, 2.2, step=0.5)
    p9 = trial.suggest_categorical("p9", ["9", "3", "0", "8"])
    assert isinstance(p9, str)

    return p0 + p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8 + int(p9)