Пример #1
0
def objective(trial: optuna.Trial):
    negative_rate = trial.suggest_categorical("negative_rate",
                                              [10, 20, 30, 40, 50, 60])
    in_dim = trial.suggest_categorical('input dim', [100, 200, 500, 1000])
    out_dim = in_dim
    alpha = trial.suggest_loguniform('alpha', 2e-6, 2e-1)
    return run_experiment(negative_rate, in_dim, out_dim, alpha)
Пример #2
0
    def objective(trial: Trial) -> float:
        trial.set_user_attr('method', 'sdne')
        classifier = classifier_type
        if classifier is None:
            classifier = trial.suggest_categorical('classifier',
                                                   ['SVM', 'EN', 'RF', 'LR'])
        else:
            trial.set_user_attr('classifier', classifier)

        alpha = trial.suggest_uniform('alpha', 0, 0.4)
        beta = trial.suggest_int('beta', 0, 30)
        epochs = trial.suggest_categorical('epochs', [5, 10, 15, 20, 25, 30])

        # Set the inner trial seed
        _set_trial_seed(trial)

        model = embed_train.train_embed_sdne(
            train_graph_filename=train_graph_filename,
            alpha=alpha,
            beta=beta,
            epochs=epochs,
            weighted=weighted,
        )
        return predict_and_evaluate(
            prediction_task=prediction_task,
            model=model,
            graph=graph,
            graph_train=graph_train,
            testing_pos_edges=testing_pos_edges,
            seed=study_seed,
            trial=trial,
            labels=labels,
            node_list=node_list,
            classifier_type=classifier,
        )
def sample_a2c_params(trial: optuna.Trial) -> Dict[str, Any]:
    """
    Sampler for A2C hyperparams.

    :param trial:
    :return:
    """
    gamma = trial.suggest_categorical("gamma", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
    normalize_advantage = trial.suggest_categorical("normalize_advantage", [False, True])
    max_grad_norm = trial.suggest_categorical("max_grad_norm", [0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 5])
    use_rms_prop = trial.suggest_categorical("use_rms_prop", [False, True])
    gae_lambda = trial.suggest_categorical("gae_lambda", [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0])
    n_steps = trial.suggest_categorical("n_steps", [8, 16, 32, 64, 128, 256, 512, 1024, 2048])
    lr_schedule = trial.suggest_categorical("lr_schedule", ["linear", "constant"])
    learning_rate = trial.suggest_loguniform("lr", 1e-5, 1)
    ent_coef = trial.suggest_loguniform("ent_coef", 0.00000001, 0.1)
    vf_coef = trial.suggest_uniform("vf_coef", 0, 1)
    log_std_init = trial.suggest_uniform("log_std_init", -4, 1)
    ortho_init = trial.suggest_categorical("ortho_init", [False, True])
    net_arch = trial.suggest_categorical("net_arch", ["small", "medium"])
    # sde_net_arch = trial.suggest_categorical("sde_net_arch", [None, "tiny", "small"])
    # full_std = trial.suggest_categorical("full_std", [False, True])
    # activation_fn = trial.suggest_categorical('activation_fn', ['tanh', 'relu', 'elu', 'leaky_relu'])
    activation_fn = trial.suggest_categorical("activation_fn", ["tanh", "relu"])

    if lr_schedule == "linear":
        learning_rate = linear_schedule(learning_rate)

    net_arch = {
        "small": [dict(pi=[64, 64], vf=[64, 64])],
        "medium": [dict(pi=[256, 256], vf=[256, 256])],
    }[net_arch]

    # sde_net_arch = {
    #     None: None,
    #     "tiny": [64],
    #     "small": [64, 64],
    # }[sde_net_arch]

    activation_fn = {"tanh": nn.Tanh, "relu": nn.ReLU, "elu": nn.ELU, "leaky_relu": nn.LeakyReLU}[activation_fn]

    return {
        "n_steps": n_steps,
        "gamma": gamma,
        "gae_lambda": gae_lambda,
        "learning_rate": learning_rate,
        "ent_coef": ent_coef,
        "normalize_advantage": normalize_advantage,
        "max_grad_norm": max_grad_norm,
        "use_rms_prop": use_rms_prop,
        "vf_coef": vf_coef,
        "policy_kwargs": dict(
            log_std_init=log_std_init,
            net_arch=net_arch,
            # full_std=full_std,
            activation_fn=activation_fn,
            # sde_net_arch=sde_net_arch,
            ortho_init=ortho_init,
        ),
    }
def sample_ppo_params(trial: optuna.Trial) -> Dict[str, Any]:
    """
    Sampler for PPO2 hyperparams.

    :param trial:
    :return:
    """
    batch_size = trial.suggest_categorical("batch_size", [8, 16, 32, 64, 128, 256, 512])
    n_steps = trial.suggest_categorical("n_steps", [8, 16, 32, 64, 128, 256, 512, 1024, 2048])
    gamma = trial.suggest_categorical("gamma", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
    learning_rate = trial.suggest_loguniform("lr", 1e-5, 1)
    lr_schedule = "constant"
    # lr_schedule = trial.suggest_categorical('lr_schedule', ['linear', 'constant'])
    ent_coef = trial.suggest_loguniform("ent_coef", 0.00000001, 0.1)
    clip_range = trial.suggest_categorical("clip_range", [0.1, 0.2, 0.3, 0.4])
    n_epochs = trial.suggest_categorical("n_epochs", [1, 5, 10, 20])
    gae_lambda = trial.suggest_categorical("gae_lambda", [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0])
    max_grad_norm = trial.suggest_categorical("max_grad_norm", [0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 5])
    vf_coef = trial.suggest_uniform("vf_coef", 0, 1)
    net_arch = trial.suggest_categorical("net_arch", ["small", "medium"])
    log_std_init = trial.suggest_uniform("log_std_init", -4, 1)
    sde_sample_freq = trial.suggest_categorical("sde_sample_freq", [-1, 8, 16, 32, 64, 128, 256])
    ortho_init = False
    # ortho_init = trial.suggest_categorical('ortho_init', [False, True])
    # activation_fn = trial.suggest_categorical('activation_fn', ['tanh', 'relu', 'elu', 'leaky_relu'])
    activation_fn = trial.suggest_categorical("activation_fn", ["tanh", "relu"])

    # TODO: account when using multiple envs
    if batch_size > n_steps:
        batch_size = n_steps

    if lr_schedule == "linear":
        learning_rate = linear_schedule(learning_rate)

    net_arch = {
        "small": [dict(pi=[64, 64], vf=[64, 64])],
        "medium": [dict(pi=[256, 256], vf=[256, 256])],
    }[net_arch]

    activation_fn = {"tanh": nn.Tanh, "relu": nn.ReLU, "elu": nn.ELU, "leaky_relu": nn.LeakyReLU}[activation_fn]

    return {
        "n_steps": n_steps,
        "batch_size": batch_size,
        "gamma": gamma,
        "learning_rate": learning_rate,
        "ent_coef": ent_coef,
        "clip_range": clip_range,
        "n_epochs": n_epochs,
        "gae_lambda": gae_lambda,
        "max_grad_norm": max_grad_norm,
        "vf_coef": vf_coef,
        "sde_sample_freq": sde_sample_freq,
        "policy_kwargs": dict(
            log_std_init=log_std_init,
            net_arch=net_arch,
            activation_fn=activation_fn,
            ortho_init=ortho_init,
        ),
    }
Пример #5
0
def rf_from_trial(trial: optuna.Trial):
    max_leaf_nodes = trial.suggest_categorical('max_leaf_nodes_type',
                                               ['unlimited', 'limited'])
    if max_leaf_nodes == 'unlimited':
        max_leaf_nodes = None
    else:
        max_leaf_nodes = trial.suggest_int('max_leaf_nodes', 1, 1000)
    params = {
        'n_estimators':
        trial.suggest_int('n_estimators', 10, 300),
        'criterion':
        trial.suggest_categorical('criterion', ['mse', 'mae']),
        'min_samples_split':
        trial.suggest_uniform('min_samples_split', 0., 1.),
        'min_samples_leaf':
        trial.suggest_uniform('min_samples_leaf', 0., .5),
        'max_features':
        trial.suggest_categorical('max_features',
                                  ['auto', 'log2', 'sqrt', None]),
        'max_leaf_nodes':
        max_leaf_nodes,
        'random_state':
        trial.suggest_int('random_state', 0, 999999),
        'n_jobs':
        -1,
        'verbose':
        1
    }
    return RandomForestRegressor(**params)
Пример #6
0
 def objective(self, trial: Trial) -> float:
     suggest: Dict[str, Union[bool, float, int, str]] = {
         "n_estimators":
         trial.suggest_int("n_estimators", self.params["n_estimators"][0],
                           self.params["n_estimators"][1]),
         "max_depth":
         trial.suggest_int("max_depth", self.params["max_depth"][0],
                           self.params["max_depth"][1]),
         "max_features":
         trial.suggest_categorical("max_features",
                                   self.params["max_features"]),
         "min_samples_leaf":
         trial.suggest_loguniform(
             "min_samples_leaf",
             self.params["min_samples_leaf"][0],
             self.params["min_samples_leaf"][1],
         ),
         "min_samples_split":
         trial.suggest_loguniform(
             "min_samples_split",
             self.params["min_samples_split"][0],
             self.params["min_samples_split"][1],
         ),
         "bootstrap":
         trial.suggest_categorical("bootstrap", (True, False)),
     }
     est: BaseEstimator = self.model.__class__(**suggest)
     return -cross_val_score(estimator=est,
                             X=self.X,
                             y=self.y,
                             cv=self.cv,
                             scoring="neg_mean_squared_error").mean()
def objective(trial:Trial, data, target):
    train_x, valid_x, train_y, valid_y = train_test_split(data, target, test_size=0.25)
    dtrain = xgb.DMatrix(train_x, label=train_y)
    dvalid = xgb.DMatrix(valid_x, label=valid_y)

    param = {
        "verbosity": 0,
        "objective": "binary:logistic",
        "booster": trial.suggest_categorical("booster", ["gbtree", "gblinear", "dart"]),
        "lambda": trial.suggest_float("lambda", 1e-8, 1.0, log=True),
        "alpha": trial.suggest_float("alpha", 1e-8, 1.0, log=True),
    }

    if param["booster"] == "gbtree" or param["booster"] == "dart":
        param["max_depth"] = trial.suggest_int("max_depth", 1, 9)
        param["eta"] = trial.suggest_float("eta", 1e-8, 1.0, log=True)
        param["gamma"] = trial.suggest_float("gamma", 1e-8, 1.0, log=True)
        param["grow_policy"] = trial.suggest_categorical("grow_policy", ["depthwise", "lossguide"])
    if param["booster"] == "dart":
        param["sample_type"] = trial.suggest_categorical("sample_type", ["uniform", "weighted"])
        param["normalize_type"] = trial.suggest_categorical("normalize_type", ["tree", "forest"])
        param["rate_drop"] = trial.suggest_float("rate_drop", 1e-8, 1.0, log=True)
        param["skip_drop"] = trial.suggest_float("skip_drop", 1e-8, 1.0, log=True)

    bst = xgb.train(param, dtrain)
    preds = bst.predict(dvalid)
    pred_labels = np.rint(preds)
    precision = sklearn.metrics.f1_score(valid_y, pred_labels)
    return precision
Пример #8
0
def objective(trial: optuna.Trial, hparams: Namespace):
    """Return the objective loss for an optuna trial.

    Args:
        trial (optuna.Trial): The optuna trial.
        hparams (Namespace): The argparse `Namespace` that will be passed to
        the `LightningModule`.
    """
    hparams.loss = trial.suggest_categorical(
        'loss', ['CrossEntropy', 'TripletMargin', 'ArcFace'])

    if hparams.loss == 'ArcFace':
        hparams.loss_margin = trial.suggest_uniform('loss_margin', 0.3, 0.9)
        hparams.sampler = None
    elif hparams.loss == 'TripletMargin':
        hparams.loss_margin = trial.suggest_uniform('loss_margin', 0, 0.2)
        hparams.sampler = 'MPerClass'
        hparams.m_per_class = 5
        hparams.miner = trial.suggest_categorical('miner', ['BatchHard', None])

    hparams.optim = 'SGD'
    hparams.lr = trial.suggest_loguniform('lr', 1e-8, 1e0)

    max_steps = 1e4

    hparams.lr_sched = trial.suggest_categorical('lr_sched',
                                                 [None, 'OneCycleLr'])
    if hparams.lr_sched == 'OneCycleLR':
        hparams.lr_sched_total_steps = max_steps
        hparams.lr_sched_max_lr = 10 * hparams.lr

    hparams.use_sample_data = True
    def get_parameters(self, trial: optuna.Trial):
        kwargs = {
            'stop_loss':
            trial.suggest_int("stop_loss", 1, 200, step=10),
            'slope_average':
            trial.suggest_categorical('slope_average', ['ema', 'sma']),
            'slope_ma_length':
            trial.suggest_int('slope_ma_length', 5, 610),
            'slope_fast_length':
            trial.suggest_int('slope_fast_length', 5, 610),
            'slope_slow_length':
            trial.suggest_int('slope_slow_length', 5, 610),
            'slope_trend_filter_enable':
            trial.suggest_categorical('slope_trend_filter_enable',
                                      [True, False]),
        }

        if kwargs['slope_fast_length'] >= kwargs['slope_slow_length']:
            raise optuna.exceptions.TrialPruned(
                "slope_fast_length >= slope_slow_length")

        if kwargs['slope_trend_filter_enable']:
            kwargs['slope_trend_filter_length'] = trial.suggest_int(
                'slope_trend_filter_length', 5, 610)

        return kwargs
Пример #10
0
def optimal_params(trial: optuna.Trial):
    params = {}
    penalty = trial.suggest_categorical("penalty", ["l1", "l2"])
    params["penalty"] = penalty

    tol = trial.suggest_uniform("tol", 1e-5, 1e-3)
    params["tol"] = tol
    C = trial.suggest_uniform("C", 1e-4, 10)
    params["C"] = C
    params["random_state"] = 42
    if penalty == "l2":
        solver = trial.suggest_categorical(
            "solver", ["newton-cg", "lbfgs", "sag", "saga"])
        params["solver"] = solver
    else:
        params["solver"] = "saga"
    params["max_iter"] = 10000
    params["multi_class"] = "auto"

    oof_preds = np.zeros((X.shape[0], ))
    fold = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
    for (trn_index, val_index) in fold.split(X, y):
        X_train, X_val = X[trn_index], X[val_index]
        y_train = y[trn_index]
        model = LogisticRegression(**params)
        model.fit(X_train, y_train)
        y_pred = model.predict(X_val)
        oof_preds[val_index] = y_pred
    f1 = f1_score(y, oof_preds, average="macro")
    return 1.0 - f1
Пример #11
0
def suggest_kwargs(
    trial: Trial,
    prefix: str,
    kwargs_ranges: Mapping[str, Any],
    kwargs: Optional[Mapping[str, Any]] = None,
):
    _kwargs = {}
    if kwargs:
        _kwargs.update(kwargs)

    for name, info in kwargs_ranges.items():
        if name in _kwargs:
            continue  # has been set by default, won't be suggested

        prefixed_name = f'{prefix}.{name}'
        dtype, low, high = info['type'], info.get('low'), info.get('high')
        if dtype in {int, 'int'}:
            q, scale = info.get('q'), info.get('scale')
            if scale == 'power_two':
                _kwargs[name] = suggest_discrete_power_two_int(
                    trial=trial,
                    name=prefixed_name,
                    low=low,
                    high=high,
                )
            elif q is not None:
                _kwargs[name] = suggest_discrete_uniform_int(
                    trial=trial,
                    name=prefixed_name,
                    low=low,
                    high=high,
                    q=q,
                )
            else:
                _kwargs[name] = trial.suggest_int(name=prefixed_name,
                                                  low=low,
                                                  high=high)

        elif dtype in {float, 'float'}:
            if info.get('scale') == 'log':
                _kwargs[name] = trial.suggest_loguniform(name=prefixed_name,
                                                         low=low,
                                                         high=high)
            else:
                _kwargs[name] = trial.suggest_uniform(name=prefixed_name,
                                                      low=low,
                                                      high=high)
        elif dtype == 'categorical':
            choices = info['choices']
            _kwargs[name] = trial.suggest_categorical(name=prefixed_name,
                                                      choices=choices)
        elif dtype in {bool, 'bool'}:
            _kwargs[name] = trial.suggest_categorical(name=prefixed_name,
                                                      choices=[True, False])
        else:
            logger.warning(
                f'Unhandled data type ({dtype}) for parameter {name}')

    return _kwargs
 def get_trial_hyperparameters(self, trial: optuna.Trial) -> dict:
     if self.model_class == models.LinearColaborativeFilteringModel:
         learning_rate = trial.suggest_uniform('learning_rate', 1e-5, 1e-1)
         number_of_epochs = int(
             trial.suggest_int('number_of_epochs', 10, 15))
         batch_size = int(
             trial.suggest_categorical('batch_size',
                                       [2**power
                                        for power in range(6, 12)]))
         gradient_clip_val = trial.suggest_uniform('gradient_clip_val', 1.0,
                                                   1.0)
         embedding_size = int(trial.suggest_int('embedding_size', 100, 500))
         regularization_factor = trial.suggest_uniform(
             'regularization_factor', 1, 100)
         dropout_probability = trial.suggest_uniform(
             'dropout_probability', 0.0, 1.0)
         hyperparameters = {
             'learning_rate': learning_rate,
             'number_of_epochs': number_of_epochs,
             'batch_size': batch_size,
             'gradient_clip_val': gradient_clip_val,
             'embedding_size': embedding_size,
             'regularization_factor': regularization_factor,
             'dropout_probability': dropout_probability,
         }
     elif self.model_class == models.DeepConcatenationColaborativeFilteringModel:
         learning_rate = trial.suggest_uniform('learning_rate', 1e-4, 1e-3)
         number_of_epochs = int(
             trial.suggest_int('number_of_epochs', 10, 15))
         batch_size = int(
             trial.suggest_categorical('batch_size',
                                       [2**power
                                        for power in range(6, 12)]))
         gradient_clip_val = trial.suggest_uniform('gradient_clip_val', 1.0,
                                                   1.0)
         embedding_size = int(trial.suggest_int('embedding_size', 100, 500))
         dense_layer_count = int(
             trial.suggest_int('dense_layer_count', 1, 4))
         regularization_factor = trial.suggest_uniform(
             'regularization_factor', 1, 100)
         dropout_probability = trial.suggest_uniform(
             'dropout_probability', 0.0, 0.75)
         hyperparameters = {
             'learning_rate': learning_rate,
             'number_of_epochs': number_of_epochs,
             'batch_size': batch_size,
             'gradient_clip_val': gradient_clip_val,
             'embedding_size': embedding_size,
             'dense_layer_count': dense_layer_count,
             'regularization_factor': regularization_factor,
             'dropout_probability': dropout_probability,
         }
     else:
         raise ValueError(f'Unrecognized model {model_class}')
     return hyperparameters
Пример #13
0
def knn(trial: optuna.Trial):
    n_neighbors = trial.suggest_int("n_neighbors", 1, 10)
    weights = trial.suggest_categorical("weights", ['uniform', 'distance'])
    algorithm = trial.suggest_categorical("algorithm",
                                          ['ball_tree', 'kd_tree'])
    leaf_size = trial.suggest_int('leaf_size', 1, 50)
    classifier = sklearn.neighbors.KNeighborsClassifier(
        n_neighbors=n_neighbors,
        weights=weights,
        algorithm=algorithm,
        leaf_size=leaf_size)
    return evaluate_classifier(classifier)
Пример #14
0
    def objective(self, trial: optuna.Trial) -> float:
        trial.suggest_float('lr', 1e-6, 1e-4)
        trial.suggest_categorical('batch_size', [4, 8, 14])
        trial.suggest_float('weight_decay', 0.0, 0.1)
        trial.suggest_float('dropout', 0.0, 0.8)

        executor = optuna.integration.allennlp.AllenNLPExecutor(
            trial=trial,
            config_file=self.config_file,
            serialization_dir=self.MODEL_PATH + f'/trial_{trial.number}',
            metrics='best_validation_f1-measure-overall')
        return executor.run()
Пример #15
0
def suggest_kwargs(
    trial: Trial,
    prefix: str,
    kwargs_ranges: Mapping[str, Any],
    kwargs: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, Any]:
    _kwargs: Dict[str, Any] = {}
    if kwargs:
        _kwargs.update(kwargs)

    for name, info in kwargs_ranges.items():
        if name in _kwargs:
            continue  # has been set by default, won't be suggested

        prefixed_name = f'{prefix}.{name}'
        dtype, low, high = info['type'], info.get('low'), info.get('high')
        if dtype in {int, 'int'}:
            scale = info.get('scale')
            if scale in {'power_two', 'power'}:
                _kwargs[name] = suggest_discrete_power_int(
                    trial=trial,
                    name=prefixed_name,
                    low=low,
                    high=high,
                    base=info.get('q') or info.get('base') or 2,
                )
            elif scale is None or scale == 'linear':
                # get log from info - could either be a boolean or string
                log = info.get('log') in {True, 'TRUE', 'True', 'true', 't', 'YES', 'Yes', 'yes', 'y'}
                _kwargs[name] = trial.suggest_int(
                    name=prefixed_name,
                    low=low,
                    high=high,
                    step=info.get('q') or info.get('step') or 1,
                    log=log,
                )
            else:
                logger.warning(f'Unhandled scale {scale} for parameter {name} of data type {dtype}')

        elif dtype in {float, 'float'}:
            if info.get('scale') == 'log':
                _kwargs[name] = trial.suggest_loguniform(name=prefixed_name, low=low, high=high)
            else:
                _kwargs[name] = trial.suggest_uniform(name=prefixed_name, low=low, high=high)
        elif dtype == 'categorical':
            choices = info['choices']
            _kwargs[name] = trial.suggest_categorical(name=prefixed_name, choices=choices)
        elif dtype in {bool, 'bool'}:
            _kwargs[name] = trial.suggest_categorical(name=prefixed_name, choices=[True, False])
        else:
            logger.warning(f'Unhandled data type ({dtype}) for parameter {name}')

    return _kwargs
Пример #16
0
def sample_clustering_config_with_optuna(trial: Trial) -> Dict:
    """
    Uses optuna to sample a config dictionary with clustering parameters.
    :param trial: optuna trial
    :return: config dictionary
    """
    cluster_criterion = trial.suggest_categorical("cluster_criterion", ['inconsistent', 'distance', 'maxclust'])
    cluster_depth = 0 if not cluster_criterion == 'inconsistent' else trial.suggest_int("cluster_depth", low=1, high=10)
    clustering_config = {"threshold": trial.suggest_uniform("threshold", 0, 1),
                         "linkage_method": trial.suggest_categorical("linkage_method", ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']),
                         "cluster_criterion": cluster_criterion,
                         "cluster_depth": cluster_depth}
    return clustering_config
Пример #17
0
def mlp(trial: optuna.Trial):
    hidden_layer_size = trial.suggest_int("hidden_layer_size", 10, 1000, 10)
    hidden_layer_count = trial.suggest_int("hidden_layer_count", 1, 10)
    hidden_layer = (hidden_layer_size, ) * hidden_layer_count
    activation_fun = trial.suggest_categorical("activation_fun",
                                               ['identity', 'tanh', 'relu'])
    alpha = trial.suggest_float("alpha", 0.000001, 0.0001)
    learning_rate = trial.suggest_categorical(
        'learning_rate', ['constant', 'invscaling', 'adaptive'])
    classifier = MLPClassifier(hidden_layer_sizes=hidden_layer,
                               activation=activation_fun,
                               alpha=alpha,
                               learning_rate=learning_rate)
    return evaluate_classifier(classifier)
Пример #18
0
    def evaluate_params(self, trial: optuna.Trial):

        params = dict()

        params['model_type'] = trial.suggest_categorical(
            'model_type', ['dnn', 'cnn'])

        if params['model_type'] == 'dnn':
            params['l1_units'] = trial.suggest_categorical(
                'l1_units', [8, 16, 32, 64])
            params['l1_activation'] = trial.suggest_categorical(
                'l1_activation', ['relu', 'sigmoid', 'linear'])

            params['l2_units'] = trial.suggest_categorical(
                'l2_units', [8, 16, 32, 64])
            params['l2_activation'] = trial.suggest_categorical(
                'l2_activation', ['relu', 'sigmoid', 'linear'])

        elif params['model_type'] == 'cnn':
            params['cnn_l1_filters'] = trial.suggest_categorical(
                'cnn_l1_filters', [8, 16, 32, 64])
            params['cnn_dense_l1_units'] = trial.suggest_categorical(
                'cnn_dense_l1_units', [8, 16, 32, 64])
            params['cnn_dense_l2_units'] = trial.suggest_categorical(
                'cnn_dense_l2_units', [8, 16, 32, 64])

        if trial.should_prune():
            raise optuna.structs.TrialPruned()

        return self.run_model(params, trial.number)
Пример #19
0
def sample_ddpg_params(trial: optuna.Trial) -> Dict[str, Any]:
    """
    Sampler for DDPG hyperparams.

    :param trial:
    :return:
    """
    gamma = trial.suggest_categorical("gamma", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
    learning_rate = trial.suggest_loguniform("lr", 1e-5, 1)
    batch_size = trial.suggest_categorical("batch_size", [16, 32, 64, 100, 128, 256, 512, 1024, 2048])
    buffer_size = trial.suggest_categorical("buffer_size", [int(1e4), int(1e5), int(1e6)])
    # Polyak coeff
    tau = trial.suggest_categorical("tau", [0.001, 0.005, 0.01, 0.02])

    episodic = trial.suggest_categorical("episodic", [True, False])

    if episodic:
        n_episodes_rollout = 1
        train_freq, gradient_steps = -1, -1
    else:
        train_freq = trial.suggest_categorical("train_freq", [1, 16, 128, 256, 1000, 2000])
        gradient_steps = train_freq
        n_episodes_rollout = -1

    noise_type = trial.suggest_categorical("noise_type", ["ornstein-uhlenbeck", "normal", None])
    noise_std = trial.suggest_uniform("noise_std", 0, 1)

    # NOTE: Add "verybig" to net_arch when tuning HER (see TD3)
    net_arch = trial.suggest_categorical("net_arch", ["small", "medium", "big"])
    # activation_fn = trial.suggest_categorical('activation_fn', [nn.Tanh, nn.ReLU, nn.ELU, nn.LeakyReLU])

    net_arch = {
        "small": [64, 64],
        "medium": [256, 256],
        "big": [400, 300],
    }[net_arch]

    hyperparams = {
        "gamma": gamma,
        "tau": tau,
        "learning_rate": learning_rate,
        "batch_size": batch_size,
        "buffer_size": buffer_size,
        "train_freq": train_freq,
        "gradient_steps": gradient_steps,
        "n_episodes_rollout": n_episodes_rollout,
        "policy_kwargs": dict(net_arch=net_arch),
    }

    if noise_type == "normal":
        hyperparams["action_noise"] = NormalActionNoise(
            mean=np.zeros(trial.n_actions), sigma=noise_std * np.ones(trial.n_actions)
        )
    elif noise_type == "ornstein-uhlenbeck":
        hyperparams["action_noise"] = OrnsteinUhlenbeckActionNoise(
            mean=np.zeros(trial.n_actions), sigma=noise_std * np.ones(trial.n_actions)
        )

    return hyperparams
Пример #20
0
    def objective(trial: optuna.Trial):
        model_cfg = {
            "input_size": [32, 32],
            "input_channel": 3,
            "depth_multiple": 1.0,
            "width_multiple": 1.0,
        }
        conv_type = trial.suggest_categorical("conv_type", ["Conv", "DWConv"])
        kernel_size = trial.suggest_int("kernel_size", 3, 7, step=2)
        n_channel_01 = trial.suggest_int("n_channel_01", 8, 64, step=8)
        n_channel_02 = trial.suggest_int("n_channel_02", 8, 128, step=8)

        linear_activation = trial.suggest_categorical("linear_activation",
                                                      ["ReLU", "SiLU"])
        n_channel_03 = trial.suggest_int("n_channel_03", 64, 256, step=8)
        n_channel_04 = trial.suggest_int("n_channel_04", 32, 128, step=8)
        n_repeat = trial.suggest_int("n_repeat", 1, 3)

        backbone = [
            [-1, n_repeat, conv_type, [n_channel_01, kernel_size, 1]],
            [-1, 1, "MaxPool", [2]],
            [-1, n_repeat, conv_type, [int(n_channel_02), kernel_size, 1]],
            [-1, 1, "MaxPool", [2]],
            [-1, 1, "Flatten", []],
            [-1, 1, "Linear", [n_channel_03, linear_activation]],
            [-1, 1, "Linear", [n_channel_04, linear_activation]],
            [-1, 1, "Linear", [10]],
        ]
        model_cfg.update({"backbone": backbone})

        model = Model(model_cfg, verbose=True).to(device)
        batch_size = trial.suggest_int("batch_size", 8, 256)
        epochs = trial.suggest_int("epochs", 5, 20)

        train_loader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  sampler=subset_sampler)
        test_loader = DataLoader(test_dataset, batch_size=batch_size)

        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters())

        trainer = TorchTrainer(model, criterion, optimizer, device=device)
        trainer.train(train_loader,
                      n_epoch=epochs,
                      test_dataloader=test_loader)
        test_loss, test_accuracy = trainer.test(test_loader)

        return test_loss
Пример #21
0
def func(trial: Trial, x_max: float = 1.0) -> float:

    x = trial.suggest_uniform("x", -x_max, x_max)
    y = trial.suggest_loguniform("y", 20, 30)
    z = trial.suggest_categorical("z", (-1.0, 1.0))
    assert isinstance(z, float)
    return (x - 2)**2 + (y - 25)**2 + z
Пример #22
0
 def multi_objective_function(trial: Trial) -> Tuple[float, float]:
     x1: float = trial.suggest_float("x1", 0.1, 3)
     x2: float = trial.suggest_float("x2", 0.1, 3, log=True)
     x3: int = trial.suggest_int("x3", 2, 4, log=True)
     x4 = trial.suggest_categorical("x4", [0.1, 1.0, 10.0])
     assert isinstance(x4, float)
     return (x1 + x2 * x3 + x4, x1 * x4)
Пример #23
0
def objective(trial: Trial) -> float:
    x1: float = trial.suggest_float("x1", 0.1, 3)
    x2: float = trial.suggest_float("x2", 0.1, 3, log=True)
    x3: int = trial.suggest_int("x3", 2, 4, log=True)
    x4 = trial.suggest_categorical("x4", [0.1, 1.0, 10.0])
    assert isinstance(x4, float)
    return x1 + x2 * x3 + x4
Пример #24
0
def suggest_discrete_power_two_int(trial: Trial, name, low, high) -> int:
    """Suggest an integer in the given range [2^low, 2^high]."""
    if high <= low:
        raise Exception(
            f"Upper bound {high} is not greater than lower bound {low}.")
    choices = [2**i for i in range(low, high + 1)]
    return trial.suggest_categorical(name=name, choices=choices)
Пример #25
0
def suggest_discrete_uniform_int(trial: Trial, name, low, high, q) -> int:
    """Suggest an integer in the given range [low, high] inclusive with step size q."""
    if (high - low) % q:
        logger.warning(
            f'bad range given: range({low}, {high}, {q}) - not divisible by q')
    choices = list(range(low, high + 1, q))
    return trial.suggest_categorical(name=name, choices=choices)
Пример #26
0
    def _suggest(self, trial: optuna.Trial, v: problem.Var) -> float:
        if v.name in trial.params:
            if isinstance(trial.params[v.name], str):
                assert isinstance(v.range, problem.CategoricalRange)
                return v.range.choices.index(trial.params[v.name])
            else:
                return trial.params[v.name]

        if isinstance(v.range, problem.ContinuousRange):
            if v.distribution == problem.Distribution.UNIFORM:
                return trial.suggest_uniform(v.name, v.range.low, v.range.high)
            elif v.distribution == problem.Distribution.LOG_UNIFORM:
                return trial.suggest_loguniform(v.name, v.range.low,
                                                v.range.high)
        elif isinstance(v.range, problem.DiscreteRange):
            if self._use_discrete_uniform:
                return trial.suggest_discrete_uniform(v.name,
                                                      v.range.low,
                                                      v.range.high - 1,
                                                      q=1)
            elif v.distribution == problem.Distribution.LOG_UNIFORM:
                return trial.suggest_int(v.name,
                                         v.range.low,
                                         v.range.high - 1,
                                         log=True)
            else:
                return trial.suggest_int(v.name, v.range.low, v.range.high - 1)
        elif isinstance(v.range, problem.CategoricalRange):
            category = trial.suggest_categorical(v.name, v.range.choices)
            return v.range.choices.index(category)

        raise ValueError("Unsupported parameter: {}".format(v))
Пример #27
0
def run_model(trial: optuna.Trial) -> float:

    print(study, ' with ', len(study.trials))
    print(trial)

    l1Units = trial.suggest_categorical('l1Units', [2, 4, 8, 16, 32])
    l2Units = trial.suggest_categorical('l2Units', [2, 4, 8, 16, 32])

    print("Training model with: ")
    print("Layer 1 units: ", l1Units)
    print("Layer 2 units: ", l2Units)

    if trial.should_prune():
        raise optuna.structs.TrialPruned()

    model = keras.Sequential([
        keras.layers.Dense(l1Units,
                           activation='relu',
                           input_shape=(WINDOW_SIZE, )),
        keras.layers.Dense(l2Units, activation='relu'),
        keras.layers.Dense(1)
    ])

    model.compile(optimizer='adam',
                  loss='mean_squared_error',
                  metrics=['mse', 'mae'])

    model.fit(x_train, y_train, epochs=20, verbose=0)

    _, mse, _ = model.evaluate(x_test, y_test)

    print("Test mse: ", mse)

    return mse
Пример #28
0
def sample_her_params(trial: optuna.Trial, hyperparams: Dict[str, Any]) -> Dict[str, Any]:
    """
    Sampler for HerReplayBuffer hyperparams.

    :param trial:
    :parma hyperparams:
    :return:
    """
    her_kwargs = trial.her_kwargs.copy()
    her_kwargs["n_sampled_goal"] = trial.suggest_int("n_sampled_goal", 1, 5)
    her_kwargs["goal_selection_strategy"] = trial.suggest_categorical(
        "goal_selection_strategy", ["final", "episode", "future"]
    )
    her_kwargs["online_sampling"] = trial.suggest_categorical("online_sampling", [True, False])
    hyperparams["replay_buffer_kwargs"] = her_kwargs
    return hyperparams
Пример #29
0
    def objective(trial: Trial) -> float:
        trial.set_user_attr('method', 'hope')
        classifier = classifier_type
        if classifier is None:
            classifier = trial.suggest_categorical('classifier',
                                                   ['SVM', 'EN', 'RF', 'LR'])
        else:
            trial.set_user_attr('classifier', classifier)

        dimensions = trial.suggest_int('dimensions', dimensions_range[0],
                                       dimensions_range[1])

        # Set the inner trial seed
        _set_trial_seed(trial)

        model = embed_train.train_embed_hope(
            train_graph_filename=train_graph_filename,
            dimensions=dimensions,
            weighted=weighted,
        )
        return predict_and_evaluate(
            model=model,
            graph=graph,
            graph_train=graph_train,
            testing_pos_edges=testing_pos_edges,
            seed=seed,
            trial=trial,
            labels=labels,
            node_list=node_list,
            classifier_type=classifier,
            prediction_task=prediction_task,
        )
Пример #30
0
    def f(trial: Trial) -> float:

        x = trial.suggest_int("x", 1, 1)
        y = trial.suggest_categorical("y", (2.5, ))
        trial.set_user_attr("train_loss", 3)
        raise ValueError()
        return x + y  # 3.5