import ray from ray import tune from ray.tune import CLIReporter from ray.tune.schedulers import ASHAScheduler # Define a search space of hyperparameters config = { "lr": tune.loguniform(1e-4, 1e-2), "momentum": tune.uniform(0.1, 0.9), "hidden_layers": tune.choice([16, 32, 64]) } # Create the TrialRunner runner = tune.TrialRunner() # Schedule trials with different hyperparameter configurations using the ASHA algorithm runner.add(trainable_class=my_trainable_class, config=config, num_samples=num_samples, scheduler=ASHAScheduler()) # Monitor the training process with the CLIReporter reporter = CLIReporter() runner.add_reporter(reporter) # Start the experiment ray.init() runner.run()
from ray.tune import Trainable, run_experiments from ray.tune.schedulers import PopulationBasedTraining class MyTrainableClass(Trainable): def _setup(self, config): self.iter = 0 self.lr = config["lr"] def _train(self): self.iter += 1 accuracy = train_my_model(self.lr) return {"accuracy": accuracy, "done": self.iter == 100} search_space = { "lr": tune.loguniform(1e-4, 1e-1), "momentum": tune.uniform(0.1, 0.9), } config = { "my_experiment": { "run": MyTrainableClass, "num_samples": 10, "config": search_space, "stop": {"accuracy": 0.95}, "local_dir": "/path/to/log/directory", "resources_per_trial": {"cpu": 4, "gpu": 0.5}, } } scheduler = PopulationBasedTraining( time_attr="training_iteration", metric="accuracy", mode="max", perturbation_interval=5, hyperparam_mutations={ "lr": [1e-5, 1e-4, 1e-3], "momentum": [0.1, 0.5, 0.9, 0.99] }, ) run_experiments(config, scheduler=scheduler)In this example, we use a `PopulationBasedTraining` scheduler to perform a population-based search of hyperparameter configurations. The scheduler generates new configurations from the best performing ones and perturbs them to explore the search space. We define a custom `Trainable` class `MyTrainableClass` to train a model with each configuration, and stop the experiment when the target accuracy is reached. We also specify the resources required by each trial and the directory to save the experiment logs. Finally, we run the experiment with `run_experiments` and pass the scheduler as an argument. Overall, the `TrialRunner` class is a powerful tool in the Ray Tune library that enables users to perform hyperparameter tuning experiments with ease and flexibility.