def test_should_prune() -> None: pruner = DeterministicPruner(True) study = create_study(pruner=pruner) trial = Trial(study, study._storage.create_new_trial(study._study_id)) trial.report(1, 1) assert trial.should_prune()
def test_keras_pruning_callback_monitor_is_invalid() -> None: study = optuna.create_study(pruner=DeterministicPruner(True)) trial = study.ask() callback = KerasPruningCallback(trial, "InvalidMonitor") with pytest.warns(UserWarning): callback.on_epoch_end(0, {"loss": 1.0})
def test_report_and_should_prune(storage_mode: str, comm: CommunicatorBase, is_pruning: bool) -> None: with MultiNodeStorageSupplier(storage_mode, comm) as storage: study = TestChainerMNStudy._create_shared_study( storage, comm, DeterministicPruner(is_pruning)) mn_trial = _create_new_chainermn_trial(study, comm) mn_trial.report(1.0, 0) assert mn_trial.should_prune() == is_pruning
def test_catboost_pruning_callback_init_param(metric: str, eval_set_index: int) -> None: def objective(trial: optuna.trial.Trial) -> float: train_x = np.asarray([[1.0], [2.0]]) train_y = np.asarray([[1.0], [0.0]]) valid_x = np.asarray([[1.0], [2.0]]) valid_y = np.asarray([[1.0], [0.0]]) if eval_set_index is None: eval_set = [(valid_x, valid_y)] pruning_callback = CatBoostPruningCallback(trial, metric) else: eval_set = [(valid_x, valid_y), (valid_x, valid_y)] pruning_callback = CatBoostPruningCallback(trial, metric, eval_set_index) param = { "objective": "Logloss", "eval_metric": metric, } gbm = cb.CatBoostClassifier(**param) gbm.fit( train_x, train_y, eval_set=eval_set, verbose=0, callbacks=[pruning_callback], ) # Invoke pruning manually. pruning_callback.check_pruned() return 1.0 study = optuna.create_study(pruner=DeterministicPruner(True)) study.optimize(objective, n_trials=1) assert study.trials[0].state == optuna.trial.TrialState.PRUNED study = optuna.create_study(pruner=DeterministicPruner(False)) study.optimize(objective, n_trials=1) assert study.trials[0].state == optuna.trial.TrialState.COMPLETE assert study.trials[0].value == 1.0
def test_should_prune(storage_mode: str, is_pruning: bool) -> None: with StorageSupplier(storage_mode) as storage: if dist.get_rank() == 0: # type: ignore study = optuna.create_study(storage=storage, pruner=DeterministicPruner(is_pruning)) trial = TorchDistributedTrial(study.ask()) else: trial = TorchDistributedTrial(None) trial.report(1, 0) assert trial.should_prune() == is_pruning
def test_xgboost_pruning_callback_call() -> None: # The pruner is deactivated. study = optuna.create_study(pruner=DeterministicPruner(False)) trial = study.ask() pruning_callback = XGBoostPruningCallback(trial, "validation-logloss") pruning_callback.after_iteration( model=None, epoch=1, evals_log={"validation": OrderedDict({"logloss": [1.0]})}) # The pruner is activated. study = optuna.create_study(pruner=DeterministicPruner(True)) trial = study.ask() pruning_callback = XGBoostPruningCallback(trial, "validation-logloss") with pytest.raises(optuna.TrialPruned): pruning_callback.after_iteration( model=None, epoch=1, evals_log={"validation": OrderedDict({"logloss": [1.0]})})
def test_keras_pruning_callback_observation_isnan() -> None: study = optuna.create_study(pruner=DeterministicPruner(True)) trial = study.ask() callback = KerasPruningCallback(trial, "loss") with pytest.raises(optuna.TrialPruned): callback.on_epoch_end(0, {"loss": 1.0}) with pytest.raises(optuna.TrialPruned): callback.on_epoch_end(0, {"loss": float("nan")})
def test_xgboost_pruning_callback_cv() -> None: def objective(trial: optuna.trial.Trial) -> float: dtrain = xgb.DMatrix(np.ones((2, 1)), label=[1.0, 1.0]) params = { "objective": "binary:logistic", } pruning_callback = optuna.integration.XGBoostPruningCallback( trial, "test-logloss") xgb.cv(params, dtrain, callbacks=[pruning_callback], nfold=2) return 1.0 study = optuna.create_study(pruner=DeterministicPruner(True)) study.optimize(objective, n_trials=1) assert study.trials[0].state == optuna.trial.TrialState.PRUNED study = optuna.create_study(pruner=DeterministicPruner(False)) study.optimize(objective, n_trials=1) assert study.trials[0].state == optuna.trial.TrialState.COMPLETE assert study.trials[0].value == 1.0
def test_pytorch_lightning_pruning_callback() -> None: def objective(trial: optuna.trial.Trial) -> float: trainer = pl.Trainer( max_epochs=2, enable_checkpointing=False, callbacks=[PyTorchLightningPruningCallback(trial, monitor="accuracy")], ) model = Model() trainer.fit(model) return 1.0 study = optuna.create_study(pruner=DeterministicPruner(True)) study.optimize(objective, n_trials=1) assert study.trials[0].state == optuna.trial.TrialState.PRUNED study = optuna.create_study(pruner=DeterministicPruner(False)) study.optimize(objective, n_trials=1) assert study.trials[0].state == optuna.trial.TrialState.COMPLETE assert study.trials[0].value == 1.0
def test_chainer_pruning_extension_observation_nan() -> None: study = optuna.create_study(pruner=DeterministicPruner(True)) trial = study.ask() extension = ChainerPruningExtension(trial, "main/loss", (1, "epoch")) MockTrainer = namedtuple("MockTrainer", ("observation", "updater")) MockUpdater = namedtuple("MockUpdater", ("epoch")) trainer = MockTrainer(observation={"main/loss": float("nan")}, updater=MockUpdater(1)) with patch.object(extension, "_observation_exists", Mock(return_value=True)) as mock: with pytest.raises(optuna.TrialPruned): extension(trainer) # type: ignore assert mock.call_count == 1
def test_tensorflow_pruning_hook() -> None: def objective(trial: optuna.trial.Trial) -> float: clf = tf.estimator.DNNClassifier( hidden_units=[], feature_columns=[tf.feature_column.numeric_column(key="x", shape=[20])], model_dir=None, n_classes=2, config=tf.estimator.RunConfig(save_summary_steps=10, save_checkpoints_steps=10), ) hook = TensorFlowPruningHook( trial=trial, estimator=clf, metric="accuracy", run_every_steps=5 ) train_spec = tf.estimator.TrainSpec( input_fn=fixed_value_input_fn, max_steps=100, hooks=[hook] ) eval_spec = tf.estimator.EvalSpec(input_fn=fixed_value_input_fn, steps=1, hooks=[]) tf.estimator.train_and_evaluate(estimator=clf, train_spec=train_spec, eval_spec=eval_spec) return 1.0 study = optuna.create_study(pruner=DeterministicPruner(True), direction="maximize") study.optimize(objective, n_trials=1) assert study.trials[0].state == optuna.trial.TrialState.PRUNED study = optuna.create_study(pruner=DeterministicPruner(False), direction="maximize") study.optimize(objective, n_trials=1) assert study.trials[0].state == optuna.trial.TrialState.COMPLETE assert study.trials[0].value == 1.0 # Check if eval_metrics returns the None value. value = OrderedDict([(10, {"accuracy": None})]) with patch("optuna.integration.tensorflow.read_eval_metrics", return_value=value) as mock_obj: study = optuna.create_study(pruner=DeterministicPruner(True), direction="maximize") study.optimize(objective, n_trials=1) assert mock_obj.call_count == 1 assert math.isnan(study.trials[0].intermediate_values[10]) assert study.trials[0].state == optuna.trial.TrialState.PRUNED
def test_pytorch_lightning_pruning_callback_monitor_is_invalid() -> None: study = optuna.create_study(pruner=DeterministicPruner(True)) trial = study.ask() callback = PyTorchLightningPruningCallback(trial, "InvalidMonitor") trainer = pl.Trainer( max_epochs=1, enable_checkpointing=False, callbacks=[callback], ) model = Model() with pytest.warns(UserWarning): callback.on_validation_end(trainer, model)
def test_skorch_pruning_callback() -> None: X, y = torch.zeros(5, 4), torch.zeros(5, dtype=torch.long) def objective(trial: optuna.trial.Trial) -> float: net = skorch.NeuralNetClassifier( ClassifierModule, max_epochs=10, lr=0.02, callbacks=[SkorchPruningCallback(trial, "valid_acc")], ) net.fit(X, y) return 1.0 study = optuna.create_study(pruner=DeterministicPruner(True)) study.optimize(objective, n_trials=1) assert study.trials[0].state == optuna.trial.TrialState.PRUNED study = optuna.create_study(pruner=DeterministicPruner(False)) study.optimize(objective, n_trials=1) assert study.trials[0].state == optuna.trial.TrialState.COMPLETE assert study.trials[0].value == 1.0
def test_pytorch_lightning_pruning_callback_ddp_unsupported_storage( storage_mode: str, ) -> None: def objective(trial: optuna.trial.Trial) -> float: trainer = pl.Trainer( max_epochs=1, accelerator="ddp_cpu", num_processes=2, enable_checkpointing=False, callbacks=[PyTorchLightningPruningCallback(trial, monitor="accuracy")], ) model = ModelDDP() trainer.fit(model) return 1.0 with StorageSupplier(storage_mode) as storage: study = optuna.create_study(storage=storage, pruner=DeterministicPruner(True)) with pytest.raises(ValueError): study.optimize(objective, n_trials=1)