def testHyperopt(self): from ray.tune.search.hyperopt import HyperOptSearch searcher = HyperOptSearch(space=self.config, metric=self.metric_name, mode="max") self._save(searcher) searcher = HyperOptSearch(space=self.config, metric=self.metric_name, mode="max") self._restore(searcher)
def testConvergenceHyperopt(self): from ray.tune.search.hyperopt import HyperOptSearch np.random.seed(0) searcher = HyperOptSearch(random_state_seed=1234) analysis = self._testConvergence(searcher, patience=50, top=5) assert math.isclose(analysis.best_config["x"], 0, abs_tol=1e-2)
def run_hyperopt_tune(config_dict=config_space, smoke_test=False): algo = HyperOptSearch(space=config_dict, metric="mean_loss", mode="min") algo = ConcurrencyLimiter(algo, max_concurrent=4) scheduler = AsyncHyperBandScheduler() analysis = tune.run( easy_objective, metric="mean_loss", mode="min", search_alg=algo, scheduler=scheduler, num_samples=10 if smoke_test else 100, ) print("Best hyperparameters found were: ", analysis.best_config)
def testRemoteRunWithSearcher(self): def train(config, reporter): for i in range(100): reporter(timesteps_total=i) analysis = run( train, search_alg=HyperOptSearch(), config={"a": choice(["a", "b"])}, metric="timesteps_total", mode="max", _remote=True, ) [trial] = analysis.trials self.assertEqual(trial.status, Trial.TERMINATED) self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testHyperopt(self): from ray.tune.search.hyperopt import HyperOptSearch out = tune.run( _invalid_objective, # At least one nan, inf, -inf and float search_alg=HyperOptSearch(random_state_seed=1234), config=self.config, metric="_metric", mode="max", num_samples=8, reuse_actors=False, ) best_trial = out.best_trial self.assertLessEqual(best_trial.config["report"], 2.0)
def set_basic_conf(self): space = { "x": hp.uniform("x", 0, 10), "y": hp.uniform("y", -10, 10), "z": hp.uniform("z", -10, 0), } def cost(space, reporter): loss = space["x"]**2 + space["y"]**2 + space["z"]**2 reporter(loss=loss) search_alg = HyperOptSearch( space, metric="loss", mode="min", random_state_seed=5, n_initial_points=1, ) return search_alg, cost
from ray import tune from ray.tune.search.hyperopt import HyperOptSearch import keras # 1. Wrap a Keras model in an objective function. def objective(config): model = keras.models.Sequential() model.add(keras.layers.Dense(784, activation=config["activation"])) model.add(keras.layers.Dense(10, activation="softmax")) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # model.fit(...) # loss, accuracy = model.evaluate(...) return {"accuracy": accuracy} # 2. Define a search space and initialize the search algorithm. search_space = {"activation": tune.choice(["relu", "tanh"])} algo = HyperOptSearch() # 3. Start a Tune run that maximizes accuracy. analysis = tune.run(objective, search_alg=algo, config=search_space, metric="accuracy", mode="max") # __keras_hyperopt_end__
ax = None # This plots everything on the same plot for d in dfs.values(): ax = d.mean_accuracy.plot(ax=ax, legend=False) # __plot_scheduler_end__ # fmt: on # __run_searchalg_begin__ from hyperopt import hp from ray.tune.search.hyperopt import HyperOptSearch space = { "lr": hp.loguniform("lr", 1e-10, 0.1), "momentum": hp.uniform("momentum", 0.1, 0.9), } hyperopt_search = HyperOptSearch(space, metric="mean_accuracy", mode="max") analysis = tune.run(train_mnist, num_samples=10, search_alg=hyperopt_search) # To enable GPUs, use this instead: # analysis = tune.run( # train_mnist, config=search_space, resources_per_trial={'gpu': 1}) # __run_searchalg_end__ # __run_analysis_begin__ import os df = analysis.results_df logdir = analysis.get_best_logdir("mean_accuracy", mode="max") state_dict = torch.load(os.path.join(logdir, "model.pth"))