def sample_a2c_params(trial: optuna.Trial) -> Dict[str, Any]:
    """
    Sampler for A2C hyperparams.

    :param trial:
    :return:
    """
    gamma = trial.suggest_categorical("gamma", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
    normalize_advantage = trial.suggest_categorical("normalize_advantage", [False, True])
    max_grad_norm = trial.suggest_categorical("max_grad_norm", [0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 5])
    use_rms_prop = trial.suggest_categorical("use_rms_prop", [False, True])
    gae_lambda = trial.suggest_categorical("gae_lambda", [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0])
    n_steps = trial.suggest_categorical("n_steps", [8, 16, 32, 64, 128, 256, 512, 1024, 2048])
    lr_schedule = trial.suggest_categorical("lr_schedule", ["linear", "constant"])
    learning_rate = trial.suggest_loguniform("lr", 1e-5, 1)
    ent_coef = trial.suggest_loguniform("ent_coef", 0.00000001, 0.1)
    vf_coef = trial.suggest_uniform("vf_coef", 0, 1)
    log_std_init = trial.suggest_uniform("log_std_init", -4, 1)
    ortho_init = trial.suggest_categorical("ortho_init", [False, True])
    net_arch = trial.suggest_categorical("net_arch", ["small", "medium"])
    # sde_net_arch = trial.suggest_categorical("sde_net_arch", [None, "tiny", "small"])
    # full_std = trial.suggest_categorical("full_std", [False, True])
    # activation_fn = trial.suggest_categorical('activation_fn', ['tanh', 'relu', 'elu', 'leaky_relu'])
    activation_fn = trial.suggest_categorical("activation_fn", ["tanh", "relu"])

    if lr_schedule == "linear":
        learning_rate = linear_schedule(learning_rate)

    net_arch = {
        "small": [dict(pi=[64, 64], vf=[64, 64])],
        "medium": [dict(pi=[256, 256], vf=[256, 256])],
    }[net_arch]

    # sde_net_arch = {
    #     None: None,
    #     "tiny": [64],
    #     "small": [64, 64],
    # }[sde_net_arch]

    activation_fn = {"tanh": nn.Tanh, "relu": nn.ReLU, "elu": nn.ELU, "leaky_relu": nn.LeakyReLU}[activation_fn]

    return {
        "n_steps": n_steps,
        "gamma": gamma,
        "gae_lambda": gae_lambda,
        "learning_rate": learning_rate,
        "ent_coef": ent_coef,
        "normalize_advantage": normalize_advantage,
        "max_grad_norm": max_grad_norm,
        "use_rms_prop": use_rms_prop,
        "vf_coef": vf_coef,
        "policy_kwargs": dict(
            log_std_init=log_std_init,
            net_arch=net_arch,
            # full_std=full_std,
            activation_fn=activation_fn,
            # sde_net_arch=sde_net_arch,
            ortho_init=ortho_init,
        ),
    }
Exemplo n.º 2
0
def optimal_params(trial: optuna.Trial):
    params = {}
    penalty = trial.suggest_categorical("penalty", ["l1", "l2"])
    params["penalty"] = penalty

    tol = trial.suggest_uniform("tol", 1e-5, 1e-3)
    params["tol"] = tol
    C = trial.suggest_uniform("C", 1e-4, 10)
    params["C"] = C
    params["random_state"] = 42
    if penalty == "l2":
        solver = trial.suggest_categorical(
            "solver", ["newton-cg", "lbfgs", "sag", "saga"])
        params["solver"] = solver
    else:
        params["solver"] = "saga"
    params["max_iter"] = 10000
    params["multi_class"] = "auto"

    oof_preds = np.zeros((X.shape[0], ))
    fold = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
    for (trn_index, val_index) in fold.split(X, y):
        X_train, X_val = X[trn_index], X[val_index]
        y_train = y[trn_index]
        model = LogisticRegression(**params)
        model.fit(X_train, y_train)
        y_pred = model.predict(X_val)
        oof_preds[val_index] = y_pred
    f1 = f1_score(y, oof_preds, average="macro")
    return 1.0 - f1
def sample_ppo_params(trial: optuna.Trial) -> Dict[str, Any]:
    """
    Sampler for PPO2 hyperparams.

    :param trial:
    :return:
    """
    batch_size = trial.suggest_categorical("batch_size", [8, 16, 32, 64, 128, 256, 512])
    n_steps = trial.suggest_categorical("n_steps", [8, 16, 32, 64, 128, 256, 512, 1024, 2048])
    gamma = trial.suggest_categorical("gamma", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
    learning_rate = trial.suggest_loguniform("lr", 1e-5, 1)
    lr_schedule = "constant"
    # lr_schedule = trial.suggest_categorical('lr_schedule', ['linear', 'constant'])
    ent_coef = trial.suggest_loguniform("ent_coef", 0.00000001, 0.1)
    clip_range = trial.suggest_categorical("clip_range", [0.1, 0.2, 0.3, 0.4])
    n_epochs = trial.suggest_categorical("n_epochs", [1, 5, 10, 20])
    gae_lambda = trial.suggest_categorical("gae_lambda", [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0])
    max_grad_norm = trial.suggest_categorical("max_grad_norm", [0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 5])
    vf_coef = trial.suggest_uniform("vf_coef", 0, 1)
    net_arch = trial.suggest_categorical("net_arch", ["small", "medium"])
    log_std_init = trial.suggest_uniform("log_std_init", -4, 1)
    sde_sample_freq = trial.suggest_categorical("sde_sample_freq", [-1, 8, 16, 32, 64, 128, 256])
    ortho_init = False
    # ortho_init = trial.suggest_categorical('ortho_init', [False, True])
    # activation_fn = trial.suggest_categorical('activation_fn', ['tanh', 'relu', 'elu', 'leaky_relu'])
    activation_fn = trial.suggest_categorical("activation_fn", ["tanh", "relu"])

    # TODO: account when using multiple envs
    if batch_size > n_steps:
        batch_size = n_steps

    if lr_schedule == "linear":
        learning_rate = linear_schedule(learning_rate)

    net_arch = {
        "small": [dict(pi=[64, 64], vf=[64, 64])],
        "medium": [dict(pi=[256, 256], vf=[256, 256])],
    }[net_arch]

    activation_fn = {"tanh": nn.Tanh, "relu": nn.ReLU, "elu": nn.ELU, "leaky_relu": nn.LeakyReLU}[activation_fn]

    return {
        "n_steps": n_steps,
        "batch_size": batch_size,
        "gamma": gamma,
        "learning_rate": learning_rate,
        "ent_coef": ent_coef,
        "clip_range": clip_range,
        "n_epochs": n_epochs,
        "gae_lambda": gae_lambda,
        "max_grad_norm": max_grad_norm,
        "vf_coef": vf_coef,
        "sde_sample_freq": sde_sample_freq,
        "policy_kwargs": dict(
            log_std_init=log_std_init,
            net_arch=net_arch,
            activation_fn=activation_fn,
            ortho_init=ortho_init,
        ),
    }
Exemplo n.º 4
0
    def objective(self, trial: Trial) -> float:
        """
        Optuna optimization method.

        Parameters
        ----------
        trial : optuna.Trial
            An optuna trial object.

        Returns
        -------
        float
            The performance evaluation metric value for a single trial.
        """
        suggest: Dict[str, float] = {
            "vect__max_df":
            trial.suggest_uniform(
                name="vect__max_df",
                low=self.hparams["vectorizer_hparams"]["max_df"][0],
                high=self.hparams["vectorizer_hparams"]["max_df"][1],
            ),
            "decomp__doc_topic_prior":
            trial.suggest_loguniform(
                name="decomp__doc_topic_prior",
                low=self.hparams["lda_hparams"]["alpha"][0],
                high=self.hparams["lda_hparams"]["alpha"][1],
            ),
            "decomp__topic_word_prior":
            trial.suggest_loguniform(
                name="decomp__topic_word_prior",
                low=self.hparams["lda_hparams"]["beta"][0],
                high=self.hparams["lda_hparams"]["beta"][1],
            ),
            "decomp__n_components":
            trial.suggest_int(
                name="decomp__n_components",
                low=self.hparams["lda_hparams"]["num_topics"][0],
                high=self.hparams["lda_hparams"]["num_topics"][1],
            ),
            "decomp__max_iter":
            trial.suggest_int(
                name="decomp__max_iter",
                low=self.hparams["lda_hparams"]["iterations"][0],
                high=self.hparams["lda_hparams"]["iterations"][1],
            ),
            "decomp__learning_decay":
            trial.suggest_uniform(
                name="decomp__learning_decay",
                low=self.hparams["lda_hparams"]["decay"][0],
                high=self.hparams["lda_hparams"]["decay"][1],
            ),
            "decomp__learning_offset":
            trial.suggest_float(
                name="decomp__learning_offset",
                low=self.hparams["lda_hparams"]["offset"][0],
                high=self.hparams["lda_hparams"]["offset"][1],
            ),
        }
        est: Pipeline = self.pipeline.set_params(**suggest).fit(self.X)
        return coherence(pipeline=est, X=self.X)
Exemplo n.º 5
0
def rf_from_trial(trial: optuna.Trial):
    max_leaf_nodes = trial.suggest_categorical('max_leaf_nodes_type',
                                               ['unlimited', 'limited'])
    if max_leaf_nodes == 'unlimited':
        max_leaf_nodes = None
    else:
        max_leaf_nodes = trial.suggest_int('max_leaf_nodes', 1, 1000)
    params = {
        'n_estimators':
        trial.suggest_int('n_estimators', 10, 300),
        'criterion':
        trial.suggest_categorical('criterion', ['mse', 'mae']),
        'min_samples_split':
        trial.suggest_uniform('min_samples_split', 0., 1.),
        'min_samples_leaf':
        trial.suggest_uniform('min_samples_leaf', 0., .5),
        'max_features':
        trial.suggest_categorical('max_features',
                                  ['auto', 'log2', 'sqrt', None]),
        'max_leaf_nodes':
        max_leaf_nodes,
        'random_state':
        trial.suggest_int('random_state', 0, 999999),
        'n_jobs':
        -1,
        'verbose':
        1
    }
    return RandomForestRegressor(**params)
Exemplo n.º 6
0
def objective(trial: optuna.Trial, hparams: Namespace):
    """Return the objective loss for an optuna trial.

    Args:
        trial (optuna.Trial): The optuna trial.
        hparams (Namespace): The argparse `Namespace` that will be passed to
        the `LightningModule`.
    """
    hparams.loss = trial.suggest_categorical(
        'loss', ['CrossEntropy', 'TripletMargin', 'ArcFace'])

    if hparams.loss == 'ArcFace':
        hparams.loss_margin = trial.suggest_uniform('loss_margin', 0.3, 0.9)
        hparams.sampler = None
    elif hparams.loss == 'TripletMargin':
        hparams.loss_margin = trial.suggest_uniform('loss_margin', 0, 0.2)
        hparams.sampler = 'MPerClass'
        hparams.m_per_class = 5
        hparams.miner = trial.suggest_categorical('miner', ['BatchHard', None])

    hparams.optim = 'SGD'
    hparams.lr = trial.suggest_loguniform('lr', 1e-8, 1e0)

    max_steps = 1e4

    hparams.lr_sched = trial.suggest_categorical('lr_sched',
                                                 [None, 'OneCycleLr'])
    if hparams.lr_sched == 'OneCycleLR':
        hparams.lr_sched_total_steps = max_steps
        hparams.lr_sched_max_lr = 10 * hparams.lr

    hparams.use_sample_data = True
Exemplo n.º 7
0
def sample_dqn_params(trial: optuna.Trial) -> Dict[str, Any]:
    """
    Sampler for DQN hyperparams.

    :param trial:
    :return:
    """
    gamma = trial.suggest_categorical(
        "gamma", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
    learning_rate = trial.suggest_loguniform("learning_rate", 1e-5, 1)
    batch_size = trial.suggest_categorical("batch_size",
                                           [16, 32, 64, 100, 128, 256, 512])
    buffer_size = trial.suggest_categorical(
        "buffer_size",
        [int(1e4), int(5e4), int(1e5), int(1e6)])
    exploration_final_eps = trial.suggest_uniform("exploration_final_eps", 0,
                                                  0.2)
    exploration_fraction = trial.suggest_uniform("exploration_fraction", 0,
                                                 0.5)
    target_update_interval = trial.suggest_categorical(
        "target_update_interval", [1, 1000, 5000, 10000, 15000, 20000])
    learning_starts = trial.suggest_categorical("learning_starts",
                                                [0, 1000, 5000, 10000, 20000])

    train_freq = trial.suggest_categorical("train_freq",
                                           [1, 4, 8, 16, 128, 256, 1000])
    subsample_steps = trial.suggest_categorical("subsample_steps",
                                                [1, 2, 4, 8])
    gradient_steps = max(train_freq // subsample_steps, 1)

    net_arch = trial.suggest_categorical("net_arch",
                                         ["tiny", "small", "medium"])

    net_arch = {
        "tiny": [64],
        "small": [64, 64],
        "medium": [256, 256]
    }[net_arch]

    hyperparams = {
        "gamma": gamma,
        "learning_rate": learning_rate,
        "batch_size": batch_size,
        "buffer_size": buffer_size,
        "train_freq": train_freq,
        "gradient_steps": gradient_steps,
        "exploration_fraction": exploration_fraction,
        "exploration_final_eps": exploration_final_eps,
        "target_update_interval": target_update_interval,
        "learning_starts": learning_starts,
        "policy_kwargs": dict(net_arch=net_arch),
    }

    if trial.using_her_replay_buffer:
        hyperparams = sample_her_params(trial, hyperparams)

    return hyperparams
Exemplo n.º 8
0
    def obtem_metodo(self, trial: optuna.Trial):
        exp_cost = trial.suggest_uniform('exp_cost', 0, 7)
        gamma = trial.suggest_uniform('gamma', 0.0001, 1)

        C = 2**exp_cost

        scikit_method = svm.SVC(C=C, gamma=gamma, kernel='rbf', random_state=2)

        return self.metodo(scikit_method)
Exemplo n.º 9
0
 def objective(self, trial: Trial) -> float:
     suggest: Dict[str, Union[float, int, str]] = {
         "objective":
         "reg:squarederror",  # xgboost v.90
         "tree_method":
         self.tree_method,
         "n_estimators":
         trial.suggest_int("n_estimators", self.params["n_estimators"][0],
                           self.params["n_estimators"][1]),
         "reg_alpha":
         trial.suggest_loguniform("reg_alpha", self.params["alpha"][0],
                                  self.params["alpha"][1]),
         "reg_lambda":
         trial.suggest_loguniform("reg_lambda", self.params["lambda"][0],
                                  self.params["lambda"][1]),
         "learning_rate":
         trial.suggest_loguniform("learning_rate",
                                  self.params["learning_rate"][0],
                                  self.params["learning_rate"][1]),
         "max_depth":
         trial.suggest_int("max_depth", self.params["max_depth"][0],
                           self.params["max_depth"][1]),
         "min_child_weight":
         trial.suggest_int(
             "min_child_weight",
             self.params["min_child_weight"][0],
             self.params["min_child_weight"][1],
         ),
         "gamma":
         trial.suggest_loguniform("gamma", self.params["gamma"][0],
                                  self.params["gamma"][1]),
         "subsample":
         trial.suggest_uniform("subsample", self.params["subsample"][0],
                               self.params["subsample"][1]),
         "colsample_bytree":
         trial.suggest_uniform("colsample_bytree",
                               self.params["colsample"][0],
                               self.params["colsample"][1]),
         "colsample_bylevel":
         trial.suggest_uniform("colsample_bylevel",
                               self.params["colsample"][0],
                               self.params["colsample"][1]),
         "colsample_bynode":
         trial.suggest_uniform("colsample_bynode",
                               self.params["colsample"][0],
                               self.params["colsample"][1]),
         "grow_policy":
         trial.suggest_categorical("grow_policy",
                                   ["depthwise", "lossguide"]),
     }
     est: BaseEstimator = self.model.__class__(**suggest)
     return -cross_val_score(estimator=est,
                             X=self.X,
                             y=self.y,
                             cv=self.cv,
                             scoring="neg_mean_squared_error").mean()
Exemplo n.º 10
0
def objective(trial: optuna.Trial):
    s, p = [], []
    for i in range(8):
        s.append(trial.suggest_uniform("s" + str(i), -0.05, 0.2))
        p.append(trial.suggest_uniform("p" + str(i), -0.2, 0.05))
    a = trial.suggest_uniform("a", -5, 10)
    cd, cl = xfoil.cd_cl(vlab.nurbs(s, p), a)
    if cd is None or cl is None:
        cl, cd = 0, 1
    return -cl / cd  #(cd+1)*(abs(cl-0.6)+1)
Exemplo n.º 11
0
def objective(trial: optuna.Trial):
    m = trial.suggest_uniform("m", 0, 10)
    p = trial.suggest_uniform("p", 0, 10)
    t = trial.suggest_uniform("t", 0, 20)
    a = trial.suggest_uniform("a", -5, 10)
    cd, cl = xfoil.cd_cl(vlab.naca4(m, p, t), a)
    if cd is None or cl is None:
        cl, cd = 0, 1

    return -cl / cd  # (cd+1)*(abs(cl-0.6)+1)
Exemplo n.º 12
0
    def obtem_metodo(self, trial: optuna.Trial) -> MetodoAprendizadoDeMaquina:
        min_samples_split = trial.suggest_uniform('min_samples_split', 0, 0.5)
        max_features = trial.suggest_uniform('max_features', 0, 0.5)
        num_arvores = trial.suggest_int('num_arvores', 1, 5)
        clf_rf = RandomForestClassifier(n_estimators=num_arvores,
                                        min_samples_split=min_samples_split,
                                        max_features=max_features,
                                        random_state=2)

        return MetodoCompeticao(clf_rf)
Exemplo n.º 13
0
    def obtem_metodo(self,trial: optuna.Trial)->MetodoAprendizadoDeMaquina:
        #Atividade 4: complete este método
        #Para passar nos testes, os parametros devem ter o seguintes nomes: "min_samples_split",
        #. "max_features" e "num_arvores". Não mude a ordem de atribuição
        #. abaixo
        min_samples = trial.suggest_uniform('min_samples_split', 0, 0.5)
        max_features = trial.suggest_uniform('max_features', 0, 0.5)
        num_arvores = trial.suggest_int('num_arvores', 1, self.num_arvores_max)
        #coloque, ao instanciar o RandomForestClassifier como random_state=2
        clf_rf = RandomForestClassifier(random_state=2,n_estimators=num_arvores,min_samples_split=min_samples,max_features=max_features)

        return ScikitLearnAprendizadoDeMaquina(clf_rf)
Exemplo n.º 14
0
        def optuna_objective(trial: optuna.Trial):
            alphas, gammas = [], []
            for idx in range(self._depth):
                alpha = trial.suggest_uniform(f'alpha_{idx}', 0.0, 2 * math.pi)
                gamma = trial.suggest_uniform(f'gamma_{idx}', 0.0, math.pi)

                alphas.append(alpha)
                gammas.append(gamma)

            states = self._run_experiment(alphas, gammas)
            cost = self._cost_function(states)

            return cost
Exemplo n.º 15
0
 def _make_cate_predictions(self, trial: optuna.Trial,
                            i: int) -> np.ndarray:
     """Make predictions of CATE by a sampled set of hyperparameters."""
     # hyparparameters
     # for control model
     eta_con = trial.suggest_loguniform('eta_control', 1e-5, 1e-1)
     min_leaf_con = trial.suggest_int('min_samples_leaf_control', 1, 20)
     max_depth_con = trial.suggest_int('max_depth_control', 1, 20)
     subsample_con = trial.suggest_uniform('sub_sample_control', 0.1, 1.0)
     control_params = {
         'n_estimators': 100,
         'learning_rate': eta_con,
         'min_samples_leaf': min_leaf_con,
         'max_depth': max_depth_con,
         'subsample': subsample_con,
         'random_state': 12345
     }
     # for treated model
     eta_trt = trial.suggest_loguniform('eta_treat', 1e-5, 1e-1)
     min_leaf_trt = trial.suggest_int('min_samples_leaf_treat', 1, 20)
     max_depth_trt = trial.suggest_int('max_depth_treat', 1, 20)
     subsample_trt = trial.suggest_uniform('sub_sample_treat', 0.1, 1.0)
     treated_params = {
         'n_estimators': 100,
         'learning_rate': eta_trt,
         'min_samples_leaf': min_leaf_trt,
         'max_depth': max_depth_trt,
         'subsample': subsample_trt,
         'random_state': 12345
     }
     # for overall model
     eta_ova = trial.suggest_loguniform('eta_overall', 1e-5, 1e-1)
     min_leaf_ova = trial.suggest_int('min_samples_leaf_overall', 1, 20)
     max_depth_ova = trial.suggest_int('max_depth_overall', 1, 20)
     subsample_ova = trial.suggest_uniform('sub_sample_overall', 0.1, 1.0)
     overall_params = {
         'n_estimators': 100,
         'learning_rate': eta_ova,
         'min_samples_leaf': min_leaf_ova,
         'max_depth': max_depth_ova,
         'subsample': subsample_ova,
         'random_state': 12345
     }
     # define DAL model
     meta_learner = DAL(controls_model=GBR(**control_params),
                        treated_model=GBR(**treated_params),
                        overall_model=GBR(**overall_params))
     meta_learner.fit(X=self.Xtr[i], T=self.Ttr[i], Y=self.Ytr[i])
     return meta_learner.effect(X=self.Xval[i])
Exemplo n.º 16
0
def objective(trial: optuna.Trial,
              imgs,
              labels,
              skip_pr=True,
              iou_thresh=None) -> float:

    # perform detection
    _, boxes = locate_multiple_modules(
        imgs,
        return_bounding_boxes=True,
        padding=0.0,
        scale=trial.suggest_uniform("scale", 0.2, 1.0) if isinstance(
            trial, optuna.Trial) else trial.params["scale"],
        reject_size_thresh=trial.suggest_uniform(
            "reject_size_thresh", 0.0, 1.0) if isinstance(
                trial, optuna.Trial) else trial.params["reject_size_thresh"],
        reject_fill_thresh=trial.suggest_uniform(
            "reject_fill_thresh", 0.0, 1.0) if isinstance(trial, optuna.Trial)
        else trial.params["reject_fill_thresh"],
    )

    # perform per image evaluation
    iou_per_image = dict()
    precision_per_image = dict()
    recall_per_image = dict()
    for img in imgs:
        label = labels[img.path.name]
        if img.path in boxes.keys():
            pred = boxes[img.path]
            (
                iou_per_image[img.path.name],
                precision_per_image[img.path.name],
                recall_per_image[img.path.name],
            ) = objdetect_metrics([polygon2boundingbox(x[1]) for x in label],
                                  pred, iou_thresh)
        else:
            # we assume that every image should have at least one object
            iou_per_image[img.path.name] = 0.0
            precision_per_image[img.path.name] = 0.0
            recall_per_image[img.path.name] = 0.0

    if skip_pr:
        return np.mean(list(iou_per_image.values()))
    else:
        return (
            np.mean(list(iou_per_image.values())),
            np.mean(list(precision_per_image.values()), axis=0),
            np.mean(list(recall_per_image.values()), axis=0),
        )
Exemplo n.º 17
0
def get_boosting_parameter_suggestions(trial: Trial) -> dict:
    """
    Get parameter sample for Boosting (like XGBoost, LightGBM)

    Args:
        trial(trial.Trial):

    Returns:
        dict: parameter sample generated by trial obj
    """
    return {
        # L2 正則化
        'reg_lambda': trial.suggest_loguniform('reg_lambda', 1e-3, 1e3),
        # L1 正則化
        'reg_alpha': trial.suggest_loguniform('reg_alpha', 1e-3, 1e3),
        # 弱学習木ごとに使う特徴量の割合
        # 0.5 だと全体のうち半分の特徴量を最初に選んで, その範囲内で木を成長させる
        'colsample_bytree': trial.suggest_loguniform('colsample_bytree', .5, 1.),
        # 学習データ全体のうち使用する割合
        # colsample とは反対に row 方向にサンプルする
        'subsample': trial.suggest_loguniform('subsample', .5, 1.),
        # 木の最大の深さ
        # たとえば 5 の時各弱学習木のぶん機は最大でも5に制限される.
        'max_depth': trial.suggest_int('max_depth', low=3, high=8),
        # 末端ノードに含まれる最小のサンプル数
        # これを下回るような分割は作れなくなるため, 大きく設定するとより全体の傾向でしか分割ができなくなる
        # [NOTE]: 数であるのでデータセットの大きさ依存であることに注意
        'min_child_weight': trial.suggest_uniform('min_child_weight', low=.5, high=40)
    }
Exemplo n.º 18
0
    def objective(trial: Trial) -> float:
        trial.set_user_attr('method', 'sdne')
        classifier = classifier_type
        if classifier is None:
            classifier = trial.suggest_categorical('classifier',
                                                   ['SVM', 'EN', 'RF', 'LR'])
        else:
            trial.set_user_attr('classifier', classifier)

        alpha = trial.suggest_uniform('alpha', 0, 0.4)
        beta = trial.suggest_int('beta', 0, 30)
        epochs = trial.suggest_categorical('epochs', [5, 10, 15, 20, 25, 30])

        # Set the inner trial seed
        _set_trial_seed(trial)

        model = embed_train.train_embed_sdne(
            train_graph_filename=train_graph_filename,
            alpha=alpha,
            beta=beta,
            epochs=epochs,
            weighted=weighted,
        )
        return predict_and_evaluate(
            prediction_task=prediction_task,
            model=model,
            graph=graph,
            graph_train=graph_train,
            testing_pos_edges=testing_pos_edges,
            seed=study_seed,
            trial=trial,
            labels=labels,
            node_list=node_list,
            classifier_type=classifier,
        )
Exemplo n.º 19
0
    def _suggest(self, trial: optuna.Trial, v: problem.Var) -> float:
        if v.name in trial.params:
            if isinstance(trial.params[v.name], str):
                assert isinstance(v.range, problem.CategoricalRange)
                return v.range.choices.index(trial.params[v.name])
            else:
                return trial.params[v.name]

        if isinstance(v.range, problem.ContinuousRange):
            if v.distribution == problem.Distribution.UNIFORM:
                return trial.suggest_uniform(v.name, v.range.low, v.range.high)
            elif v.distribution == problem.Distribution.LOG_UNIFORM:
                return trial.suggest_loguniform(v.name, v.range.low,
                                                v.range.high)
        elif isinstance(v.range, problem.DiscreteRange):
            if self._use_discrete_uniform:
                return trial.suggest_discrete_uniform(v.name,
                                                      v.range.low,
                                                      v.range.high - 1,
                                                      q=1)
            elif v.distribution == problem.Distribution.LOG_UNIFORM:
                return trial.suggest_int(v.name,
                                         v.range.low,
                                         v.range.high - 1,
                                         log=True)
            else:
                return trial.suggest_int(v.name, v.range.low, v.range.high - 1)
        elif isinstance(v.range, problem.CategoricalRange):
            category = trial.suggest_categorical(v.name, v.range.choices)
            return v.range.choices.index(category)

        raise ValueError("Unsupported parameter: {}".format(v))
Exemplo n.º 20
0
def func(trial: Trial, x_max: float = 1.0) -> float:

    x = trial.suggest_uniform("x", -x_max, x_max)
    y = trial.suggest_loguniform("y", 20, 30)
    z = trial.suggest_categorical("z", (-1.0, 1.0))
    assert isinstance(z, float)
    return (x - 2)**2 + (y - 25)**2 + z
Exemplo n.º 21
0
    def obtem_metodo(self, trial: optuna.Trial) -> MetodoAprendizadoDeMaquina:

        min_samples = trial.suggest_uniform('min_samples_split', 0, 0.5)
        clf_dtree = DecisionTreeClassifier(min_samples_split=min_samples,
                                           random_state=2)

        return ScikitLearnAprendizadoDeMaquina(clf_dtree)
Exemplo n.º 22
0
def sample_ddpg_params(trial: optuna.Trial) -> Dict[str, Any]:
    """
    Sampler for DDPG hyperparams.

    :param trial:
    :return:
    """
    gamma = trial.suggest_categorical("gamma", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
    learning_rate = trial.suggest_loguniform("lr", 1e-5, 1)
    batch_size = trial.suggest_categorical("batch_size", [16, 32, 64, 100, 128, 256, 512, 1024, 2048])
    buffer_size = trial.suggest_categorical("buffer_size", [int(1e4), int(1e5), int(1e6)])
    # Polyak coeff
    tau = trial.suggest_categorical("tau", [0.001, 0.005, 0.01, 0.02])

    episodic = trial.suggest_categorical("episodic", [True, False])

    if episodic:
        n_episodes_rollout = 1
        train_freq, gradient_steps = -1, -1
    else:
        train_freq = trial.suggest_categorical("train_freq", [1, 16, 128, 256, 1000, 2000])
        gradient_steps = train_freq
        n_episodes_rollout = -1

    noise_type = trial.suggest_categorical("noise_type", ["ornstein-uhlenbeck", "normal", None])
    noise_std = trial.suggest_uniform("noise_std", 0, 1)

    # NOTE: Add "verybig" to net_arch when tuning HER (see TD3)
    net_arch = trial.suggest_categorical("net_arch", ["small", "medium", "big"])
    # activation_fn = trial.suggest_categorical('activation_fn', [nn.Tanh, nn.ReLU, nn.ELU, nn.LeakyReLU])

    net_arch = {
        "small": [64, 64],
        "medium": [256, 256],
        "big": [400, 300],
    }[net_arch]

    hyperparams = {
        "gamma": gamma,
        "tau": tau,
        "learning_rate": learning_rate,
        "batch_size": batch_size,
        "buffer_size": buffer_size,
        "train_freq": train_freq,
        "gradient_steps": gradient_steps,
        "n_episodes_rollout": n_episodes_rollout,
        "policy_kwargs": dict(net_arch=net_arch),
    }

    if noise_type == "normal":
        hyperparams["action_noise"] = NormalActionNoise(
            mean=np.zeros(trial.n_actions), sigma=noise_std * np.ones(trial.n_actions)
        )
    elif noise_type == "ornstein-uhlenbeck":
        hyperparams["action_noise"] = OrnsteinUhlenbeckActionNoise(
            mean=np.zeros(trial.n_actions), sigma=noise_std * np.ones(trial.n_actions)
        )

    return hyperparams
Exemplo n.º 23
0
def train_and_eval(trial: optuna.Trial, study_dir: str, seed: int):
    """
    Objective function for the Optuna `Study` to maximize.

    .. note::
        Optuna expects only the `trial` argument, thus we use `functools.partial` to sneak in custom arguments.

    :param trial: Optuna Trial object for hyper-parameter optimization
    :param study_dir: the parent directory for all trials in this study
    :param seed: seed value for the random number generators, pass `None` for no seeding
    :return: objective function value
    """
    # Synchronize seeds between Optuna trials
    pyrado.set_seed(seed)

    # Environment
    env_hparams = dict(dt=1 / 250., max_steps=1500)
    env = QQubeSwingUpSim(**env_hparams)
    env = ActNormWrapper(env)

    # Policy
    policy_hparam = dict(feats=FeatureStack([
        identity_feat, sign_feat, abs_feat, squared_feat, cubic_feat,
        ATan2Feat(1, 2),
        MultFeat([4, 5])
    ]))
    policy = LinearPolicy(spec=env.spec, **policy_hparam)

    # Algorithm
    algo_hparam = dict(
        num_workers=1,  # parallelize via optuna n_jobs
        max_iter=50,
        pop_size=trial.suggest_int('pop_size', 50, 200),
        num_rollouts=trial.suggest_int('num_rollouts', 4, 10),
        num_is_samples=trial.suggest_int('num_is_samples', 5, 40),
        expl_std_init=trial.suggest_uniform('expl_std_init', 0.1, 0.5),
        symm_sampling=trial.suggest_categorical('symm_sampling',
                                                [True, False]),
    )
    csv_logger = create_csv_step_logger(
        osp.join(study_dir, f'trial_{trial.number}'))
    algo = PoWER(osp.join(study_dir, f'trial_{trial.number}'),
                 env,
                 policy,
                 **algo_hparam,
                 logger=csv_logger)

    # Train without saving the results
    algo.train(snapshot_mode='latest', seed=seed)

    # Evaluate
    min_rollouts = 1000
    sampler = ParallelRolloutSampler(
        env, policy, num_workers=1,
        min_rollouts=min_rollouts)  # parallelize via optuna n_jobs
    ros = sampler.sample()
    mean_ret = sum([r.undiscounted_return() for r in ros]) / min_rollouts

    return mean_ret
Exemplo n.º 24
0
def suggest_kwargs(
    trial: Trial,
    prefix: str,
    kwargs_ranges: Mapping[str, Any],
    kwargs: Optional[Mapping[str, Any]] = None,
):
    _kwargs = {}
    if kwargs:
        _kwargs.update(kwargs)

    for name, info in kwargs_ranges.items():
        if name in _kwargs:
            continue  # has been set by default, won't be suggested

        prefixed_name = f'{prefix}.{name}'
        dtype, low, high = info['type'], info.get('low'), info.get('high')
        if dtype in {int, 'int'}:
            q, scale = info.get('q'), info.get('scale')
            if scale == 'power_two':
                _kwargs[name] = suggest_discrete_power_two_int(
                    trial=trial,
                    name=prefixed_name,
                    low=low,
                    high=high,
                )
            elif q is not None:
                _kwargs[name] = suggest_discrete_uniform_int(
                    trial=trial,
                    name=prefixed_name,
                    low=low,
                    high=high,
                    q=q,
                )
            else:
                _kwargs[name] = trial.suggest_int(name=prefixed_name,
                                                  low=low,
                                                  high=high)

        elif dtype in {float, 'float'}:
            if info.get('scale') == 'log':
                _kwargs[name] = trial.suggest_loguniform(name=prefixed_name,
                                                         low=low,
                                                         high=high)
            else:
                _kwargs[name] = trial.suggest_uniform(name=prefixed_name,
                                                      low=low,
                                                      high=high)
        elif dtype == 'categorical':
            choices = info['choices']
            _kwargs[name] = trial.suggest_categorical(name=prefixed_name,
                                                      choices=choices)
        elif dtype in {bool, 'bool'}:
            _kwargs[name] = trial.suggest_categorical(name=prefixed_name,
                                                      choices=[True, False])
        else:
            logger.warning(
                f'Unhandled data type ({dtype}) for parameter {name}')

    return _kwargs
Exemplo n.º 25
0
def sample_sac_params(trial: optuna.Trial) -> Dict[str, Any]:
    """
    Sampler for SAC hyperparams.

    :param trial:
    :return:
    """
    gamma = trial.suggest_categorical("gamma", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
    learning_rate = trial.suggest_loguniform("learning_rate", 1e-5, 1)
    batch_size = trial.suggest_categorical("batch_size", [16, 32, 64, 128, 256, 512, 1024, 2048])
    buffer_size = trial.suggest_categorical("buffer_size", [int(1e4), int(1e5), int(1e6)])
    learning_starts = trial.suggest_categorical("learning_starts", [0, 1000, 10000, 20000])
    # train_freq = trial.suggest_categorical('train_freq', [1, 10, 100, 300])
    train_freq = trial.suggest_categorical("train_freq", [1, 4, 8, 16, 32, 64, 128, 256, 512])
    # Polyak coeff
    tau = trial.suggest_categorical("tau", [0.001, 0.005, 0.01, 0.02, 0.05, 0.08])
    # gradient_steps takes too much time
    # gradient_steps = trial.suggest_categorical('gradient_steps', [1, 100, 300])
    gradient_steps = train_freq
    # ent_coef = trial.suggest_categorical('ent_coef', ['auto', 0.5, 0.1, 0.05, 0.01, 0.0001])
    ent_coef = "auto"
    # You can comment that out when not using gSDE
    log_std_init = trial.suggest_uniform("log_std_init", -4, 1)
    # NOTE: Add "verybig" to net_arch when tuning HER
    net_arch = trial.suggest_categorical("net_arch", ["small", "medium", "big"])
    # activation_fn = trial.suggest_categorical('activation_fn', [nn.Tanh, nn.ReLU, nn.ELU, nn.LeakyReLU])

    net_arch = {
        "small": [64, 64],
        "medium": [256, 256],
        "big": [400, 300],
        # Uncomment for tuning HER
        # "large": [256, 256, 256],
        # "verybig": [512, 512, 512],
    }[net_arch]

    target_entropy = "auto"
    # if ent_coef == 'auto':
    #     # target_entropy = trial.suggest_categorical('target_entropy', ['auto', 5, 1, 0, -1, -5, -10, -20, -50])
    #     target_entropy = trial.suggest_uniform('target_entropy', -10, 10)

    hyperparams = {
        "gamma": gamma,
        "learning_rate": learning_rate,
        "batch_size": batch_size,
        "buffer_size": buffer_size,
        "learning_starts": learning_starts,
        "train_freq": train_freq,
        "gradient_steps": gradient_steps,
        "ent_coef": ent_coef,
        "tau": tau,
        "target_entropy": target_entropy,
        "policy_kwargs": dict(log_std_init=log_std_init, net_arch=net_arch),
    }

    if trial.using_her_replay_buffer:
        hyperparams = sample_her_params(trial, hyperparams)

    return hyperparams
 def get_trial_hyperparameters(self, trial: optuna.Trial) -> dict:
     if self.model_class == models.LinearColaborativeFilteringModel:
         learning_rate = trial.suggest_uniform('learning_rate', 1e-5, 1e-1)
         number_of_epochs = int(
             trial.suggest_int('number_of_epochs', 10, 15))
         batch_size = int(
             trial.suggest_categorical('batch_size',
                                       [2**power
                                        for power in range(6, 12)]))
         gradient_clip_val = trial.suggest_uniform('gradient_clip_val', 1.0,
                                                   1.0)
         embedding_size = int(trial.suggest_int('embedding_size', 100, 500))
         regularization_factor = trial.suggest_uniform(
             'regularization_factor', 1, 100)
         dropout_probability = trial.suggest_uniform(
             'dropout_probability', 0.0, 1.0)
         hyperparameters = {
             'learning_rate': learning_rate,
             'number_of_epochs': number_of_epochs,
             'batch_size': batch_size,
             'gradient_clip_val': gradient_clip_val,
             'embedding_size': embedding_size,
             'regularization_factor': regularization_factor,
             'dropout_probability': dropout_probability,
         }
     elif self.model_class == models.DeepConcatenationColaborativeFilteringModel:
         learning_rate = trial.suggest_uniform('learning_rate', 1e-4, 1e-3)
         number_of_epochs = int(
             trial.suggest_int('number_of_epochs', 10, 15))
         batch_size = int(
             trial.suggest_categorical('batch_size',
                                       [2**power
                                        for power in range(6, 12)]))
         gradient_clip_val = trial.suggest_uniform('gradient_clip_val', 1.0,
                                                   1.0)
         embedding_size = int(trial.suggest_int('embedding_size', 100, 500))
         dense_layer_count = int(
             trial.suggest_int('dense_layer_count', 1, 4))
         regularization_factor = trial.suggest_uniform(
             'regularization_factor', 1, 100)
         dropout_probability = trial.suggest_uniform(
             'dropout_probability', 0.0, 0.75)
         hyperparameters = {
             'learning_rate': learning_rate,
             'number_of_epochs': number_of_epochs,
             'batch_size': batch_size,
             'gradient_clip_val': gradient_clip_val,
             'embedding_size': embedding_size,
             'dense_layer_count': dense_layer_count,
             'regularization_factor': regularization_factor,
             'dropout_probability': dropout_probability,
         }
     else:
         raise ValueError(f'Unrecognized model {model_class}')
     return hyperparameters
Exemplo n.º 27
0
    def add_suggest(trial: optuna.Trial, user_attrs={}):
        """
        Add hyperparam ranges to an optuna trial and typical user attrs.
        
        Usage:
            trial = optuna.trial.FixedTrial(
                params={         
                    'hidden_size': 128,
                }
            )
            trial = add_suggest(trial)
            trainer = pl.Trainer()
            model = LSTM_PL(dict(**trial.params, **trial.user_attrs), dataset_train,
                            dataset_test, cache_base_path, norm)
            trainer.fit(model)
        """
        trial.suggest_loguniform("learning_rate", 1e-6, 1e-2)
        trial.suggest_uniform("attention_dropout", 0, 0.75)
        # we must have nhead<==hidden_size
        # so           nhead_power.max()<==hidden_size_power.min()
        trial.suggest_discrete_uniform("hidden_size_power", 4, 10, 1)
        trial.suggest_discrete_uniform("hidden_out_size_power", 4, 9, 1)
        trial.suggest_discrete_uniform("nhead_power", 1, 4, 1)
        trial.suggest_int("nlayers", 1, 12)
        trial.suggest_categorical("use_lstm", [False, True])
        trial.suggest_categorical("agg", ['last', 'max', 'mean', 'all'])   

        user_attrs_default = {
            "batch_size": 16,
            "grad_clip": 40,
            "max_nb_epochs": 200,
            "num_workers": 4,
            "num_extra_target": 24 * 4,
            "vis_i": "670",
            "num_context": 24 * 4,
            "input_size": 18,
            "input_size_decoder": 17,
            "context_in_target": False,
            "output_size": 1,
            "patience": 3,
            'min_std': 0.005,
        }
        [trial.set_user_attr(k, v) for k, v in user_attrs_default.items()]
        [trial.set_user_attr(k, v) for k, v in user_attrs.items()]
        return trial
def get_model(trial:Trial,model_params:dict):
    n_lstm = trial.suggest_categorical('lstm',[100,300,600])
    beta = trial.suggest_uniform('beta',0,1.0)
    
    model_params['n_lstm'] = n_lstm
    model_params['beta'] = beta
    
    model = CbLossClassifier(**model_params)
    return model
Exemplo n.º 29
0
def sample_td3_params(trial: optuna.Trial) -> Dict[str, Any]:
    """
    Sampler for TD3 hyperparams.

    :param trial:
    :return:
    """
    gamma = trial.suggest_categorical("gamma", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
    learning_rate = trial.suggest_loguniform("learning_rate", 1e-5, 1)
    batch_size = trial.suggest_categorical("batch_size", [16, 32, 64, 100, 128, 256, 512, 1024, 2048])
    buffer_size = trial.suggest_categorical("buffer_size", [int(1e4), int(1e5), int(1e6)])
    # Polyak coeff
    tau = trial.suggest_categorical("tau", [0.001, 0.005, 0.01, 0.02, 0.05, 0.08])

    train_freq = trial.suggest_categorical("train_freq", [1, 4, 8, 16, 32, 64, 128, 256, 512])
    gradient_steps = train_freq

    noise_type = trial.suggest_categorical("noise_type", ["ornstein-uhlenbeck", "normal", None])
    noise_std = trial.suggest_uniform("noise_std", 0, 1)

    # NOTE: Add "verybig" to net_arch when tuning HER
    net_arch = trial.suggest_categorical("net_arch", ["small", "medium", "big"])
    # activation_fn = trial.suggest_categorical('activation_fn', [nn.Tanh, nn.ReLU, nn.ELU, nn.LeakyReLU])

    net_arch = {
        "small": [64, 64],
        "medium": [256, 256],
        "big": [400, 300],
        # Uncomment for tuning HER
        # "verybig": [256, 256, 256],
    }[net_arch]

    hyperparams = {
        "gamma": gamma,
        "learning_rate": learning_rate,
        "batch_size": batch_size,
        "buffer_size": buffer_size,
        "train_freq": train_freq,
        "gradient_steps": gradient_steps,
        "policy_kwargs": dict(net_arch=net_arch),
        "tau": tau,
    }

    if noise_type == "normal":
        hyperparams["action_noise"] = NormalActionNoise(
            mean=np.zeros(trial.n_actions), sigma=noise_std * np.ones(trial.n_actions)
        )
    elif noise_type == "ornstein-uhlenbeck":
        hyperparams["action_noise"] = OrnsteinUhlenbeckActionNoise(
            mean=np.zeros(trial.n_actions), sigma=noise_std * np.ones(trial.n_actions)
        )

    if trial.using_her_replay_buffer:
        hyperparams = sample_her_params(trial, hyperparams)

    return hyperparams
Exemplo n.º 30
0
def cnn_pipeline_factory(report_dir: Path, trial: Trial) -> ArmorDigitPipeline:
    return ArmorDigitKerasPipeline.from_custom_cnn(
        input_size=32,
        conv_blocks=((32, 32), (64, 64)),
        logs_dir=str(report_dir),
        dropout=trial.suggest_uniform("dropout", 0, 0.99),
        lr=trial.suggest_loguniform("lr", 1e-5, 1e-1),
        dense_size=2**round(
            trial.suggest_discrete_uniform("dense_size_log2", 3, 10, 1)),
    )