Пример #1
0
def hp_search(trial: optuna.Trial):
    if torch.cuda.is_available():
        logger.info("%s", torch.cuda.get_device_name(0))

    global gopt
    opt = gopt
    # set config
    config = load_config(opt)
    config['opt'] = opt
    logger.info("%s", config)

    # set path
    set_path(config)

    # set search spaces
    lr = trial.suggest_loguniform('lr', 1e-6, 1e-3) # .suggest_float('lr', 1e-6, 1e-3, log=True)
    bsz = trial.suggest_categorical('batch_size', [32, 64, 128])
    seed = trial.suggest_int('seed', 17, 42)
    epochs = trial.suggest_int('epochs', 1, opt.epoch)

    # prepare train, valid dataset
    train_loader, valid_loader = prepare_datasets(config, hp_search_bsz=bsz)

    with temp_seed(seed):
        # prepare model
        model = prepare_model(config)
        # create optimizer, scheduler, summary writer, scaler
        optimizer, scheduler, writer, scaler = prepare_osws(config, model, train_loader, hp_search_lr=lr)
        config['optimizer'] = optimizer
        config['scheduler'] = scheduler
        config['writer'] = writer
        config['scaler'] = scaler

        early_stopping = EarlyStopping(logger, patience=opt.patience, measure=opt.measure, verbose=1)
        best_eval_measure = float('inf') if opt.measure == 'loss' else -float('inf')
        for epoch in range(epochs):
            eval_loss, eval_acc = train_epoch(model, config, train_loader, valid_loader, epoch)

            if opt.measure == 'loss': eval_measure = eval_loss 
            else: eval_measure = eval_acc
            # early stopping
            if early_stopping.validate(eval_measure, measure=opt.measure): break
            if opt.measure == 'loss': is_best = eval_measure < best_eval_measure
            else: is_best = eval_measure > best_eval_measure
            if is_best:
                best_eval_measure = eval_measure
                early_stopping.reset(best_eval_measure)
            early_stopping.status()

            trial.report(eval_acc, epoch)
            if trial.should_prune():
                raise optuna.TrialPruned()
        return eval_acc
Пример #2
0
def hp_search(trial: optuna.Trial,
              model_name: str,
              dataset,
              label_nbr,
              metric_name,
              reference_class,
              device):
    """
    objective function for optuna.study optimizes for epoch number, lr and batch_size

    :param trial: optuna.Trial, trial of optuna, which will optimize for hyperparameters
    :param model_name: name of the pretrained model
    :param dataset: huggingface/nlp dataset object
    :param label_nbr: number of label for the model to output
    :param metric_name: name of the metric to maximize
    :param reference_class: reference class to calculate metrics for
    :param device: device where the training will occur, cuda recommended
    :return: metric after training

    """
    lr = trial.suggest_float("lr", 1e-7, 1e-4, log=True)
    batch_size = trial.suggest_categorical("batch_size", [2, 4, 6])
    epochs = trial.suggest_int("epochs", 1, 5)

    model = MultilabeledSequenceModel(pretrained_model_name=model_name,
                                      label_nbr=label_nbr).to(device)
    optimizer = AdamW(params=model.parameters(), lr=lr)
    for epoch in range(epochs):
        train_epoch(model,
                    optimizer,
                    dataset,
                    batch_size,
                    device)

        labels, preds = evaluate(model,
                                  dataset,
                                  batch_size,
                                  device)

        metric = calculate_metric(metric_name,
                                  labels,
                                  preds,
                                  reference_class)

        trial.report(metric, epoch)

        if trial.should_prune():
            raise optuna.TrialPruned()

    return metric
Пример #3
0
def fn_opt(trial: optuna.Trial) -> int:
    try:
        net_arch = trial.suggest_categorical('net_arch',
                                             ['CnnPolicy', 'LnCnnPolicy'])
        gamma = trial.suggest_categorical(
            'gamma', [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
        learning_rate = trial.suggest_loguniform('lr', 1e-5, 1)
        batch_size = trial.suggest_categorical('batch_size',
                                               [16, 32, 64, 128, 256, 512])
        buffer_size = trial.suggest_categorical(
            'buffer_size', [int(1e4), int(1e5), int(1e6)])
        learning_starts = trial.suggest_categorical('learning_starts',
                                                    [0, 25, 50, 75, 100])
        gradient_steps = trial.suggest_categorical('gradient_steps',
                                                   [5, 15, 20, 50, 100, 300])
        ent_coef = trial.suggest_categorical(
            'ent_coef', ['auto', 0.5, 0.1, 0.05, 0.01, 0.0001])

        target_entropy = 'auto'
        if ent_coef == 'auto':
            target_entropy = trial.suggest_categorical(
                'target_entropy', ['auto', -1, -10, -20, -50, -100])

        env = fn_gym.FNGym(0.2)
        model = fn_sac.FNSAC(net_arch,
                             env,
                             gamma=gamma,
                             learning_rate=learning_rate,
                             batch_size=batch_size,
                             buffer_size=buffer_size,
                             learning_starts=learning_starts,
                             gradient_steps=gradient_steps,
                             ent_coef=ent_coef,
                             target_entropy=target_entropy)

        for train_count in range(10):
            model.learn(total_timesteps=200)
            trial.report(env.get_running_reward(), (train_count + 1) * 200)

            if trial.should_prune():
                raise optuna.TrialPruned()
    except KeyboardInterrupt:
        input('Keyboard Interrupt. Press any key to continue')
        raise ValueError("Exit Trial, Keyboard Interrupt")

    return env.get_running_reward()
    def objective(self, trial: optuna.Trial) -> float:
        # set up the search space of the hyperparameters 
        k = trial.suggest_int('num_topics', 1, self.max_num_topics)
        a = trial.suggest_categorical('alpha', list(np.arange(0.01, 1, 0.3)) + ['symmetric','asymmetric'])
        b = trial.suggest_categorical('eta', list(np.arange(0.01, 1, 0.3)) + ['symmetric'])
        chunksize = trial.suggest_int('chunksize', 100, 2000, step=100)
        passes = trial.suggest_int('passes', 1, 10, step=2)
        iterations = trial.suggest_int('iterations', 50, 500, step=50)

        # train the model using the hyperparamters suggested by Optuna
        finder = TopicsFinder(self.data, self.num_ngrams, self.addl_stop_words)
        model, cv = finder.fit_model(
            random_state=100,
            eval_every=None,
            chunksize=chunksize,
            passes=passes,
            iterations=iterations,
            num_topics = k,
            alpha = a,
            eta = b
        )
        score = cv.get_coherence()
        # emit progress signal to the slot functions
        if (self.progress_signal is not None):
            self.progress_signal.emit({'study':self.studyname,'num_trial':trial.number})

        # report an objective function value for a given step. The reported values are used by the pruners to determine whether this trial should be pruned. 
        trial.report(score, 0)
        # handle pruning based on the intermediate value.
        if trial.should_prune():
            raise optuna.TrialPruned()

            
        # save a trial info object to a file.
        trial_info = {'trial': trial, 'model': model, 'score': score}
        with open(f'{trial.number}.pickle', 'wb') as fout:
            pickle.dump(trial_info, fout)
        
        return score
Пример #5
0
class AllenNLPPruningCallback(EpochCallback):
    """AllenNLP callback to prune unpromising trials.

    See `the example <https://github.com/optuna/optuna/blob/master/
    examples/allennlp/allennlp_simple.py>`__
    if you want to add a proning callback which observes a metric.

    You can also see the tutorial of our AllenNLP integration on
    `AllenNLP Guide <https://guide.allennlp.org/hyperparameter-optimization>`_.

    .. note::
        When :class:`~optuna.integration.AllenNLPPruningCallback` is instantiated in Python script,
        trial and monitor are mandatory.

        On the other hand, when :class:`~optuna.integration.AllenNLPPruningCallback` is used with
        :class:`~optuna.integration.AllenNLPExecutor`, ``trial`` and ``monitor``
        would be ``None``. :class:`~optuna.integration.AllenNLPExecutor` sets
        environment variables for a study name, trial id, monitor, and storage.
        Then :class:`~optuna.integration.AllenNLPPruningCallback`
        loads them to restore ``trial`` and ``monitor``.

    Args:
        trial:
            A :class:`~optuna.trial.Trial` corresponding to the current evaluation of the
            objective function.
        monitor:
            An evaluation metric for pruning, e.g. ``validation_loss`` or
            ``validation_accuracy``.

    """
    def __init__(
        self,
        trial: Optional[optuna.trial.Trial] = None,
        monitor: Optional[str] = None,
    ):
        _imports.check()

        if allennlp.__version__ < "1.0.0":
            raise Exception(
                "AllenNLPPruningCallback requires `allennlp`>=1.0.0.")

        # When `AllenNLPPruningCallback` is instantiated in Python script,
        # trial and monitor should not be `None`.
        if trial is not None and monitor is not None:
            self._trial = trial
            self._monitor = monitor

        # When `AllenNLPPruningCallback` is used with `AllenNLPExecutor`,
        # `trial` and `monitor` would be None. `AllenNLPExecutor` sets information
        # for a study name, trial id, monitor, and storage in environment variables.
        else:
            environment_variables = _get_environment_variables_for_trial()
            study_name = environment_variables["study_name"]
            trial_id = environment_variables["trial_id"]
            monitor = environment_variables["monitor"]
            storage = environment_variables["storage"]

            if study_name is None or trial_id is None or monitor is None or storage is None:
                message = (
                    "Fail to load study. Perhaps you attempt to use `AllenNLPPruningCallback`"
                    " without `AllenNLPExecutor`. If you want to use a callback"
                    " without an executor, you have to instantiate a callback with"
                    "`trial` and `monitor. Please see the Optuna example: https://github.com/"
                    "optuna/optuna/blob/master/examples/allennlp/allennlp_simple.py."
                )
                raise RuntimeError(message)

            else:
                # If `stoage` is empty despite `study_name`, `trial_id`,
                # and `monitor` are not `None`, users attempt to use `AllenNLPPruningCallback`
                # with `AllenNLPExecutor` and in-memory storage.
                # `AllenNLPruningCallback` needs RDB or Redis storages to work.
                if storage == "":
                    message = (
                        "If you want to use AllenNLPExecutor and AllenNLPPruningCallback,"
                        " you have to use RDB or Redis storage.")
                    raise RuntimeError(message)

                study = load_study(study_name,
                                   storage,
                                   pruner=_create_pruner())
                self._trial = Trial(study, int(trial_id))
                self._monitor = monitor

    def __call__(
        self,
        trainer: "allennlp.training.GradientDescentTrainer",
        metrics: Dict[str, Any],
        epoch: int,
        is_master: bool,
    ) -> None:
        value = metrics.get(self._monitor)
        if value is None:
            return

        self._trial.report(float(value), epoch)
        if self._trial.should_prune():
            raise optuna.TrialPruned()
def objective(trial: opt.Trial):
    # only test dropping sozio economic facotrs
    drop_sozioeco = trial.suggest_categorical("drop_eco", [True, False])
    # rest of preprocessing keeps default values

    # categrorial encoding, try identical encoders for all columns (for now)
    enc_name = trial.suggest_categorical("encoder",
                                         ["one-hot", "woe", "binary"])
    enc = encoders[enc_name]

    clf_name = trial.suggest_categorical("classifier", ["rf", "xt", "gb"])

    if clf_name == "rf":
        clf = RandomForestClassifier(
            n_estimators=trial.suggest_categorical(
                "rf_nest", [50, 100, 150, 200, 300, 500]),
            max_depth=trial.suggest_int("rf_max_depth", 3, 15),
            min_samples_split=trial.suggest_int("rf_min_split", 2, 5),
            min_samples_leaf=trial.suggest_int("rf_min_leaf", 1, 8),
            bootstrap=trial.suggest_categorical("rf_bootstrap", [True, False]),
            class_weight=trial.suggest_categorical(
                "rf_cl_weight", ["balanced", "balanced_subsample"]),
        )
    elif clf_name == "xt":
        clf = ExtraTreesClassifier(
            n_estimators=trial.suggest_categorical(
                "xt_nest", [50, 100, 150, 200, 300, 500]),
            max_depth=trial.suggest_int("xt_max_depth", 3, 15),
            min_samples_split=trial.suggest_int("xt_min_split", 2, 5),
            min_samples_leaf=trial.suggest_int("xt_min_leaf", 1, 8),
            bootstrap=trial.suggest_categorical("xt_bootstrap", [True, False]),
            class_weight=trial.suggest_categorical(
                "xt_cl_weight", ["balanced", "balanced_subsample"]),
        )
    elif clf_name == "gb":
        clf = GradientBoostingClassifier(
            learning_rate=trial.suggest_float("gb_lr", 0.01, 0.3),
            subsample=trial.suggest_float("gb_subsample", 0.5, 1.0),
            n_estimators=trial.suggest_categorical(
                "gb_nest", [50, 100, 150, 200, 300, 500]),
            max_depth=trial.suggest_int("gb_max_depth", 2, 7),
            min_samples_split=trial.suggest_int("gb_min_split", 2, 5),
            min_samples_leaf=trial.suggest_int("gb_min_leaf", 1, 8),
            n_iter_no_change=20,
        )

    # k-fold over 5 split
    # on every split the average precision is calculated and optuna
    # can decide to prune this trial
    maes = []
    i = 0
    kf = StratifiedKFold(n_splits=5, random_state=42, shuffle=True)
    for train_idx, val_idx in kf.split(x, y):
        x_train = x.loc[train_idx]
        y_train = y.loc[train_idx]

        x_train = enc.fit_transform(x_train, y_train)
        clf.fit(x_train, y_train)

        x_val = x.loc[val_idx]
        x_val = enc.transform(x_val)
        y_val_prob = clf.predict_proba(x_val)[:, 1]
        y_val_true = y.loc[val_idx]

        avg_pre = me.average_precision_score(y_val_true, y_val_prob)
        maes.append(avg_pre)

        # after three folds, allow optuna to prune
        if i >= 3:
            trial.report(np.mean(maes), i)
            if trial.should_prune():
                raise opt.exceptions.TrialPruned()
        i += 1

    return np.mean(maes)
Пример #7
0
class AllenNLPPruningCallback(TrainerCallback):
    """AllenNLP callback to prune unpromising trials.

    See `the example <https://github.com/optuna/optuna-examples/tree/main/
    allennlp/allennlp_simple.py>`__
    if you want to add a pruning callback which observes a metric.

    You can also see the tutorial of our AllenNLP integration on
    `AllenNLP Guide <https://guide.allennlp.org/hyperparameter-optimization>`_.

    .. note::
        When :class:`~optuna.integration.AllenNLPPruningCallback` is instantiated in Python script,
        trial and monitor are mandatory.

        On the other hand, when :class:`~optuna.integration.AllenNLPPruningCallback` is used with
        :class:`~optuna.integration.AllenNLPExecutor`, ``trial`` and ``monitor``
        would be :obj:`None`. :class:`~optuna.integration.AllenNLPExecutor` sets
        environment variables for a study name, trial id, monitor, and storage.
        Then :class:`~optuna.integration.AllenNLPPruningCallback`
        loads them to restore ``trial`` and ``monitor``.

    .. note::
        Currently, build-in pruners are supported except for
        :class:`~optuna.pruners.PatientPruner`.

    Args:
        trial:
            A :class:`~optuna.trial.Trial` corresponding to the current evaluation of the
            objective function.
        monitor:
            An evaluation metric for pruning, e.g. ``validation_loss`` or
            ``validation_accuracy``.

    """

    def __init__(
        self,
        trial: Optional[optuna.trial.Trial] = None,
        monitor: Optional[str] = None,
    ):
        _imports.check()

        if version.parse(allennlp.__version__) < version.parse("2.0.0"):
            raise ImportError(
                "`AllenNLPPruningCallback` requires AllenNLP>=v2.0.0."
                "If you want to use a callback with an old version of AllenNLP, "
                "please install Optuna v2.5.0 by executing `pip install 'optuna==2.5.0'`."
            )

        # When `AllenNLPPruningCallback` is instantiated in Python script,
        # trial and monitor should not be `None`.
        if trial is not None and monitor is not None:
            self._trial = trial
            self._monitor = monitor

        # When `AllenNLPPruningCallback` is used with `AllenNLPExecutor`,
        # `trial` and `monitor` would be None. `AllenNLPExecutor` sets information
        # for a study name, trial id, monitor, and storage in environment variables.
        else:
            environment_variables = _get_environment_variables_for_trial()
            study_name = environment_variables["study_name"]
            trial_id = environment_variables["trial_id"]
            monitor = environment_variables["monitor"]
            storage = environment_variables["storage"]

            if study_name is None or trial_id is None or monitor is None or storage is None:
                message = (
                    "Fail to load study. Perhaps you attempt to use `AllenNLPPruningCallback`"
                    " without `AllenNLPExecutor`. If you want to use a callback"
                    " without an executor, you have to instantiate a callback with"
                    "`trial` and `monitor. Please see the Optuna example: https://github.com/"
                    "optuna/optuna-examples/tree/main/allennlp/allennlp_simple.py."
                )
                raise RuntimeError(message)

            else:
                # If `stoage` is empty despite `study_name`, `trial_id`,
                # and `monitor` are not `None`, users attempt to use `AllenNLPPruningCallback`
                # with `AllenNLPExecutor` and in-memory storage.
                # `AllenNLPruningCallback` needs RDB or Redis storages to work.
                if storage == "":
                    message = (
                        "If you want to use AllenNLPExecutor and AllenNLPPruningCallback,"
                        " you have to use RDB or Redis storage."
                    )
                    raise RuntimeError(message)

                study = load_study(study_name, storage, pruner=_create_pruner())
                self._trial = Trial(study, int(trial_id))
                self._monitor = monitor

    def on_epoch(
        self,
        trainer: "GradientDescentTrainer",
        metrics: Dict[str, Any],
        epoch: int,
        is_primary: bool = True,
        **kwargs: Any,
    ) -> None:
        """Check if a training reaches saturation.

        Args:
            trainer:
                AllenNLP's trainer
            metrics:
                Dictionary of metrics.
            epoch:
                Number of current epoch.
            is_primary:
                A flag for AllenNLP internal.

        """
        if not is_primary:
            return None

        value = metrics.get(self._monitor)
        if value is None:
            return

        self._trial.report(float(value), epoch)
        if self._trial.should_prune():
            raise optuna.TrialPruned()
Пример #8
0
def hp_search_optuna(trial: optuna.Trial):

    global gargs
    args = gargs
    # set config
    config = load_config(args)
    config['args'] = args
    logger.info("%s", config)

    # set path
    set_path(config)

    # create accelerator
    accelerator = Accelerator()
    config['accelerator'] = accelerator
    args.device = accelerator.device

    # set search spaces
    lr = trial.suggest_float('lr', 1e-5, 1e-3, log=True)
    bsz = trial.suggest_categorical('batch_size', [8, 16, 32, 64])
    seed = trial.suggest_int('seed', 17, 42)
    epochs = trial.suggest_int('epochs', 1, args.epoch)

    # prepare train, valid dataset
    train_loader, valid_loader = prepare_datasets(config, hp_search_bsz=bsz)

    with temp_seed(seed):
        # prepare model
        model = prepare_model(config)

        # create optimizer, scheduler, summary writer
        model, optimizer, scheduler, writer = prepare_others(config,
                                                             model,
                                                             train_loader,
                                                             lr=lr)
        # create secondary optimizer, scheduler
        _, optimizer_2nd, scheduler_2nd, _ = prepare_others(
            config, model, train_loader, lr=args.bert_lr_during_freezing)
        train_loader = accelerator.prepare(train_loader)
        valid_loader = accelerator.prepare(valid_loader)

        config['optimizer'] = optimizer
        config['scheduler'] = scheduler
        config['optimizer_2nd'] = optimizer_2nd
        config['scheduler_2nd'] = scheduler_2nd
        config['writer'] = writer

        total_batch_size = args.batch_size * accelerator.num_processes * args.gradient_accumulation_steps
        logger.info("***** Running training *****")
        logger.info(f"  Num examples = {len(train_loader)}")
        logger.info(f"  Num Epochs = {args.epoch}")
        logger.info(
            f"  Instantaneous batch size per device = {args.batch_size}")
        logger.info(
            f"  Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"
        )
        logger.info(
            f"  Gradient Accumulation steps = {args.gradient_accumulation_steps}"
        )
        logger.info(f"  Total optimization steps = {args.max_train_steps}")

        early_stopping = EarlyStopping(logger,
                                       patience=args.patience,
                                       measure='f1',
                                       verbose=1)
        best_eval_f1 = -float('inf')
        for epoch in range(epochs):
            eval_loss, eval_f1, best_eval_f1 = train_epoch(
                model, config, train_loader, valid_loader, epoch, best_eval_f1)

            # early stopping
            if early_stopping.validate(eval_f1, measure='f1'): break
            if eval_f1 == best_eval_f1:
                early_stopping.reset(best_eval_f1)
            early_stopping.status()

            trial.report(eval_f1, epoch)
            if trial.should_prune():
                raise optuna.TrialPruned()
        return eval_f1
Пример #9
0
    def __call__(self, trial: optuna.Trial):
        torch.manual_seed(self.seed)
        random.seed(self.seed)

        train_data_loader, val_data_loader = datasets2data_loaders(
            self.train_set, self.val_set, test_set=None, num_workers=1)

        epochs = self.hydra_cfg['parameters']['epochs']

        # Calculate an objective value by using the extra arguments.
        model = SupervisedFastText(V=self.dictionary.size_total_vocab,
                                   num_classes=len(
                                       self.dictionary.label_vocab),
                                   embedding_dim=self.dim,
                                   pretrained_emb=self.pretrained_word_vectors,
                                   freeze=self.is_freeze,
                                   pooling=self.pooling).to(self.device)

        initial_lr = trial.suggest_loguniform(
            'lr', self.hydra_cfg['optuna']['lr_min'],
            self.hydra_cfg['optuna']['lr_max'])

        optimizer = optim.SGD(model.parameters(), lr=initial_lr)

        # parameters for update learning rate
        num_tokens = self.dictionary.num_words

        learning_rate_schedule = self.hydra_cfg['parameters']['lr_update_rate']
        total_num_processed_tokens_in_training = epochs * num_tokens
        num_processed_tokens = 0
        local_processed_tokens = 0
        N = len(train_data_loader.dataset)

        best_val_loss = np.finfo(0.).max
        best_val_acc = np.finfo(0.).min
        save_fname = os.getcwd() + '/' + '{}.pt'.format(
            trial.number)  # file name to store best model's weights

        for epoch in range(epochs):
            # begin training phase
            sum_loss = 0.
            correct = 0
            model.train()

            for sentence, label, n_tokens in train_data_loader:
                sentence, label = sentence.to(self.device), label.to(
                    self.device)
                optimizer.zero_grad()
                output = model(sentence)
                loss = F.nll_loss(output, label)
                loss.backward()
                optimizer.step()
                pred = output.argmax(1, keepdim=False)
                correct += pred.eq(label).sum().item()
                sum_loss += loss.item()

                # update learning rate
                # ref: https://github.com/facebookresearch/fastText/blob/6d7c77cd33b23eec26198fdfe10419476b5364c7/src/fasttext.cc#L656
                local_processed_tokens += n_tokens.item()
                if local_processed_tokens > learning_rate_schedule:
                    num_processed_tokens += local_processed_tokens
                    local_processed_tokens = 0
                    progress = num_processed_tokens / total_num_processed_tokens_in_training
                    optimizer.param_groups[0]['lr'] = initial_lr * (1. -
                                                                    progress)

            train_loss = sum_loss / N
            train_acc = correct / N
            # end training phase

            val_loss, val_acc = evaluation(model, self.device, val_data_loader)

            progress = num_processed_tokens / total_num_processed_tokens_in_training  # approximated progress
            self.logger.info(
                '\rProgress: {:.1f}% Avg. train loss: {:.4f}, train acc: {:.1f}%, '
                'Avg. val loss: {:.4f}, val acc: {:.1f}%'.format(
                    progress * 100., train_loss, train_acc * 100, val_loss,
                    val_acc * 100))

            if self.metric == 'loss':
                trial.report(val_loss, epoch)
            else:
                trial.report(val_acc, epoch)

            if trial.should_prune():
                raise optuna.exceptions.TrialPruned()

            # validation
            is_saved_model = False
            if self.metric == 'loss':
                if best_val_loss > val_loss:
                    best_val_loss = val_loss
                    best_val_acc = val_acc
                    is_saved_model = True
            else:
                if best_val_acc < val_acc:
                    best_val_loss = val_loss
                    best_val_acc = val_acc
                    is_saved_model = True

            if is_saved_model:
                torch.save(model.state_dict(), save_fname)

        trial.set_user_attr('val_loss', best_val_loss)
        trial.set_user_attr('val_acc', best_val_acc)
        trial.set_user_attr('model_path', save_fname)

        if self.metric == 'loss':
            return best_val_loss
        else:
            return best_val_acc