def test_cast_float() -> None: def objective(trial: optuna.trial.Trial) -> float: x = trial.suggest_float("x", 1, 2) y = trial.suggest_float("y", 1, 2, log=True) assert isinstance(x, float) assert isinstance(y, float) return x + y dirname = tempfile.mkdtemp() metric_name = "target" study_name = "test_tensorboard_integration" tbcallback = TensorBoardCallback(dirname, metric_name) study = optuna.create_study(study_name=study_name) study.optimize(objective, n_trials=1, callbacks=[tbcallback])
def test_study_name() -> None: dirname = tempfile.mkdtemp() metric_name = "target" study_name = "test_tensorboard_integration" tbcallback = TensorBoardCallback(dirname, metric_name) study = optuna.create_study(study_name=study_name) study.optimize(_objective_func, n_trials=1, callbacks=[tbcallback]) event_acc = EventAccumulator(os.path.join(dirname, "trial-0")) event_acc.Reload() try: assert len(event_acc.Tensors("target")) == 1 except Exception as e: raise e finally: shutil.rmtree(dirname)
def test_experimental_warning() -> None: with pytest.warns(optuna.exceptions.ExperimentalWarning): TensorBoardCallback(dirname="", metric_name="")
tf.keras.layers.Dense(10, activation=tf.nn.softmax), ]) model.compile(optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"]) model.fit( x_train, y_train, epochs=1) # Run with 1 epoch to speed things up for demo purposes _, accuracy = model.evaluate(x_test, y_test) return accuracy def objective(trial: optuna.trial.Trial) -> float: num_units = trial.suggest_int("NUM_UNITS", 16, 32) dropout_rate = trial.suggest_float("DROPOUT_RATE", 0.1, 0.2) optimizer = trial.suggest_categorical("OPTIMIZER", ["sgd", "adam"]) accuracy = train_test_model(num_units, dropout_rate, optimizer) # type: ignore return accuracy tensorboard_callback = TensorBoardCallback("logs/", metric_name="accuracy") study = optuna.create_study(direction="maximize") study.optimize(objective, n_trials=10, timeout=600, callbacks=[tensorboard_callback])