from ray import tune config = { "lr": tune.grid_search([0.001, 0.01, 0.1]), "batch_size": tune.choice([8, 16, 32]), # other hyperparameters } def train_fn(config): # logic for training the model pass # creating a trial runner runner = tune.TrialRunner() # setting the configuration options runner.add_config(config) # defining a function that will be used for training the model runner.call_on_trial_start(train_fn) # run the trials until a metrics goal is reached runner.run(until="hypermetric_goal_reached")
from ray import tune config = { "lr": tune.grid_search([0.001, 0.01, 0.1]), "batch_size": tune.choice([8, 16, 32]), # other hyperparameters } def train_fn(config): # logic for training the model pass # creating a trial runner with local mode for running trials in parallel runner = tune.TrialRunner(local_mode=True) # setting the configuration options runner.add_config(config) # defining a function that will be used for training the model runner.call_on_trial_start(train_fn) # running the trials in parallel with the specified number of parallel executions runner.run_parallel(num_parallel=4)In both examples, the Ray Tune library is used.