def testHyperopt(self): from ray.tune.suggest.hyperopt import HyperOptSearch searcher = HyperOptSearch( space=self.config, metric=self.metric_name, mode="max") self._save(searcher) searcher = HyperOptSearch( space=self.config, metric=self.metric_name, mode="max") self._restore(searcher)
def __init__(self, space, max_concurrent=10, reward_attr=None, metric="episode_reward_mean", mode="max", points_to_evaluate=None, n_initial_points=20, random_state_seed=None, gamma=0.25, _wandb=None, **kwargs): from hyperopt import hp from ray.tune.suggest.hyperopt import HyperOptSearch _wandb = _wandb or {} space = engine.translate(space) # save useful parameters self._metric = metric # load previous results self._wandb_results = _wandb.get("results", []) if points_to_evaluate: sweepwarn( "HyperOptSearch points_to_evaluate not supported, ignoring.") points_to_evaluate = [] # FIXME(jhr): max_concurrent needs to be super high? not sure why # it seems that sending on_complete is still concurrent max_concurrent = 1000 wandb_seed = _wandb.get("seed") if random_state_seed is None: random_state_seed = wandb_seed self._search = HyperOptSearch(space, max_concurrent=max_concurrent, reward_attr=reward_attr, metric=metric, mode=mode, points_to_evaluate=points_to_evaluate, n_initial_points=n_initial_points, random_state_seed=random_state_seed, gamma=gamma, **kwargs)
def main(cls, config, use_tune, num_samples, local_mode, env, name, **kwargs): config = getattr(configs, config) config.update(env_id=env) for k, v in kwargs.items(): if k not in config: config[k] = v if use_tune: ray.init(webui_host="127.0.0.1", local_mode=local_mode) metric = "reward" config.update(use_tune=True) def run(kwargs): return cls.run(kwargs) if local_mode: tune.run( run, name=name, config=config, resources_per_trial={"gpu": 1, "cpu": 6}, ) else: tune.run( run, config=config, name=name, resources_per_trial={"gpu": 1, "cpu": 6}, # scheduler=ASHAScheduler(metric=metric, mode="max"), search_alg=HyperOptSearch(config, metric=metric, mode="max"), num_samples=num_samples, ) else: config.update(use_tune=False) cls.run(config)
def tune_hyperopt(): from hyperopt import hp from ray.tune.suggest.hyperopt import HyperOptSearch search_alg = HyperOptSearch(space={ "n_estimators": hp.loguniform("n_estimators", 2, 6), "max_depth": hp.uniform("max_depth", 2, 20), "min_impurity_decrease": hp.loguniform("min_impurity_decrease", -5, -1) }, metric="mean_accuracy", mode="max", n_initial_points=5, points_to_evaluate=[{ "n_estimators": 100, "max_depth": 16, "min_impurity_decrease": 0 }]) analysis = tune.run( CUMLTrainable, resources_per_trial={"gpu": 1}, num_samples=100, # reuse_actors=True, stop={"training_iteration": 1}, verbose=1, max_failures=0, search_alg=search_alg)
def run_tune_experiment(tune_config: Dict[str, Any], num_samples: int) -> None: """ entry point for running tune experiment :param tune_config: :param num_samples: :return: """ hyperopt = HyperOptSearch(metric=tune_config["objective_metric"], mode="max") analysis = tune.run( Trainable, config=tune_config, search_alg=hyperopt, num_samples=num_samples, stop={"training_iteration": 1}, local_dir=system_config.get_tune_results_dir(), resources_per_trial={ "cpu": 1, "gpu": 1 }, ) results = analysis.get_best_config(metric=tune_config["objective_metric"], mode="max") logger.info(f"best config: {results}")
def parallel_optim(program_path, opt_space, n_searches=1, n_repetitions=10, num_cpus=1, num_gpus=1): algo = HyperOptSearch(opt_space, metric="loss", mode="min") name = os.path.basename(program_path) if name.endswith('.py'): name = name[:-3] analysis = tune.run(get_computation_wrapper(program_path, repeat=n_repetitions), name=name, search_alg=algo, num_samples=n_searches, resources_per_trial={ 'cpu': num_cpus, 'gpu': num_gpus }) best_trial = analysis.get_best_trial(metric="loss", mode="min") measurements = best_trial.last_result['measurements'] best_hyperparams = best_trial.config return best_hyperparams, measurements, best_trial
def run(search_type, algo, exp_name): if search_type == "gridsearch": run_experiments({exp_name: get_params(algo)}) elif search_type == "hyperopt": space = { "double_q": hp.choice('double_q', [True, False]), "hiddens": [hp.choice('hiddens', [64, 256, 512, 1024])], "buffer_size": hp.choice('buffer_size', np.arange(20000, 100000, 1000, dtype=int)), "lr": hp.loguniform("lr", np.log(0.0005), np.log(0.1)), "n_step": hp.choice('n_step', [1, 3, 6, 12]), "target_network_update_freq": hp.choice('target_network_update_freq', [10, 30, 50]) } search_alg = HyperOptSearch(space, max_concurrent=4, reward_attr="episode_reward_mean") scheduler = AsyncHyperBandScheduler(reward_attr="episode_reward_mean") run_experiments({exp_name: get_params(algo)}, search_alg=search_alg, scheduler=scheduler, verbose=True)
def main(): parser = argparse.ArgumentParser() parser.add_argument('model', choices=('gcn', 'fishergcn'), help="model to tune") parser.add_argument('--tune', action='store_true', help="using ray.tune") args = parser.parse_args() start_t = time.time() if args.tune: import ray from ray.tune import run from ray.tune.suggest.hyperopt import HyperOptSearch ray.init() algo = HyperOptSearch(SPACE[args.model], max_concurrent=NCPUS, reward_attr="accuracy") run(benchmark_tune, search_alg=algo, num_samples=NEVALS) else: trials = Trials() best = fmin(benchmark, space=SPACE[args.model], algo=tpe.suggest, max_evals=NEVALS, trials=trials) print('best:', best) with open("{}.pkl".format(args.model), "wb") as f: pickle.dump(trials, f) print("finished in {:.1f} hours".format((time.time() - start_t) / 3600))
def optimize_hyperparameters( self, ) -> ray.tune.analysis.experiment_analysis.ExperimentAnalysis: """ Optimize hyperparameters using ray wrapping hyperopt library. :return: <ray.tune.analysis.experiment_analysis.ExperimentAnalysis> Result analysis after hyperparameter optimization. """ if self.args.params_subspace == "nn": # Load data to the memory self._get_data() base_function = self.optimize_nn metric = "loss" else: base_function = self.optimize_rl metric = "mean_last_reward" hyperopt_search = HyperOptSearch( self.space, metric=metric, mode=self.args.extreme_type, ) analysis = tune.run( base_function, num_samples=self.args.num_samples, search_alg=hyperopt_search, name=self.args.results_name[:-4], local_dir=os.path.join(self.args.save_dir, "hyperopt_results"), ) return analysis
def set_algorithm(experiment_name, config): ''' Configure search algorithm. ''' if args.algorithm == 'hyperopt': algorithm = HyperOptSearch(points_to_evaluate=best_params) elif args.algorithm == 'ax': ax_client = AxClient(enforce_sequential_optimization=False) ax_client.create_experiment(name=experiment_name, parameters=config, objective_name="minimum", minimize=True) algorithm = AxSearch(ax_client=ax_client, points_to_evaluate=best_params) elif args.algorithm == 'nevergrad': algorithm = NevergradSearch( points_to_evaluate=best_params, optimizer=ng.optimizers.registry["PortfolioDiscreteOnePlusOne"]) elif args.algorithm == 'optuna': algorithm = OptunaSearch(points_to_evaluate=best_params, seed=args.seed) elif args.algorithm == 'pbt': algorithm = PopulationBasedTraining( time_attr="training_iteration", perturbation_interval=args.perturbation, hyperparam_mutations=config, synch=True) elif args.algorithm == 'random': algorithm = BasicVariantGenerator(max_concurrent=args.jobs) if args.algorithm not in ['random', 'pbt']: algorithm = ConcurrencyLimiter(algorithm, max_concurrent=args.jobs) return algorithm
def testMetricCheckingEndToEnd(self): from ray import tune def train(config): tune.report(val=4, second=8) def train2(config): return os.environ["TUNE_DISABLE_STRICT_METRIC_CHECKING"] = "0" # `acc` is not reported, should raise with self.assertRaises(TuneError): # The trial runner raises a ValueError, but the experiment fails # with a TuneError tune.run(train, metric="acc") # `val` is reported, should not raise tune.run(train, metric="val") # Run does not report anything, should not raise tune.run(train2, metric="val") # Only the scheduler requires a metric with self.assertRaises(TuneError): tune.run( train, scheduler=AsyncHyperBandScheduler(metric="acc", mode="max")) tune.run( train, scheduler=AsyncHyperBandScheduler(metric="val", mode="max")) # Only the search alg requires a metric with self.assertRaises(TuneError): tune.run( train, config={"a": tune.choice([1, 2])}, search_alg=HyperOptSearch(metric="acc", mode="max")) # Metric is passed tune.run( train, config={"a": tune.choice([1, 2])}, search_alg=HyperOptSearch(metric="val", mode="max")) os.environ["TUNE_DISABLE_STRICT_METRIC_CHECKING"] = "1" # With strict metric checking disabled, this should not raise tune.run(train, metric="acc")
def main( config: Optional[dict], cpus_per_trial: int, data: Path, gpus_per_trial: int, local_mode: bool, n_samples: int, name: str, seeds: List[int], **kwargs, ): for k, v in kwargs.items(): if v is not None or k not in config: config[k] = v seed = config.get("seed") if not seed: if not seeds: raise RuntimeError( "Either seed must be set or seeds must be non-empty") elif len(seeds) == 1: seed = seeds[0] else: seed = tune.grid_search(seeds) config.update(seed=seed) config.update(data=data.absolute()) if n_samples or local_mode: config.update(report=tune.report) ray.init(dashboard_host="127.0.0.1", local_mode=local_mode) kwargs = dict() if any(isinstance(v, Apply) for v in config.values()): kwargs = dict( search_alg=HyperOptSearch(config, metric="test_loss"), num_samples=n_samples, ) def _run(c): run(**c) tune.run( _run, name=name, config=config, resources_per_trial=dict(gpu=gpus_per_trial, cpu=cpus_per_trial), **kwargs, ) else: def report(**kwargs): print( tabulate( {k: [v] for k, v in kwargs.items()}, headers="keys", tablefmt="pretty", )) config.update(report=report) run(**config)
def testConvergenceHyperopt(self): from ray.tune.suggest.hyperopt import HyperOptSearch np.random.seed(0) searcher = HyperOptSearch(random_state_seed=1234) analysis = self._testConvergence(searcher, patience=50, top=5) assert math.isclose(analysis.best_config["x"], 0, abs_tol=1e-2)
def backtest_tune(ohlc: np.ndarray, backtest_config: dict): config = create_config(backtest_config) if not os.path.isdir(os.path.join('reports', backtest_config['symbol'])): os.makedirs(os.path.join('reports', backtest_config['symbol']), exist_ok=True) report_path = os.path.join('reports', backtest_config['symbol']) iters = 10 if 'iters' in backtest_config: iters = backtest_config['iters'] else: print( 'Parameter iters should be defined in the configuration. Defaulting to 10.' ) num_cpus = 2 if 'num_cpus' in backtest_config: num_cpus = backtest_config['num_cpus'] else: print( 'Parameter num_cpus should be defined in the configuration. Defaulting to 2.' ) initial_points = max(1, min(int(iters / 10), 20)) ray.init(num_cpus=num_cpus ) # , logging_level=logging.FATAL, log_to_driver=False) algo = HyperOptSearch(n_initial_points=initial_points) algo = ConcurrencyLimiter(algo, max_concurrent=num_cpus) scheduler = AsyncHyperBandScheduler() analysis = tune.run(tune.with_parameters(backtest, ohlc=ohlc), metric='objective', mode='max', name='search', search_alg=algo, scheduler=scheduler, num_samples=iters, config=config, verbose=1, reuse_actors=True, local_dir=report_path) ray.shutdown() session_path = os.path.join( os.path.join('sessions', backtest_config['symbol']), backtest_config['session_name']) if not os.path.isdir(session_path): os.makedirs(session_path, exist_ok=True) print('Best candidate found is: ', analysis.best_config) json.dump(analysis.best_config, open(os.path.join(session_path, 'best_config.json'), 'w'), indent=4) result = backtest(analysis.best_config, ohlc, True) result.to_csv(os.path.join(session_path, 'best_trades.csv'), index=False) return analysis
def network_debug(self): logger = logging.getLogger("detectron2.trainer") # inference SearchTrainer.test_policies(self.cfg, self.model, None, self.k_th, self.K_fold) # search by explore and exploit logger.info("Step2: search best policies") name = "search_fold%d" % (self.k_th) register_trainable(name, lambda augs, rpt: search_debug(augs, rpt)) # search algorithm algo = HyperOptSearch(self.space, max_concurrent=4 * 20, metric=self.metric, mode=self.mode) # top1_valid or minus_loss # experiments configuration exp_config = { name: { 'run': name, 'num_samples': 4, 'resources_per_trial': self.resources_per_trial, 'stop': { 'training_iteration': self.num_policy }, 'config': { "cfg": self.cfg, "k_th": self.k_th, "K_fold": self.K_fold } } } # bayes optimization search # results = run_experiments(exp_config, search_alg=algo, scheduler=None, verbose=0, queue_trials=True, raise_on_failed_trial=False) results = run( convert_to_experiment_list(exp_config), name=name, search_alg=algo, resources_per_trial=self.resources_per_trial, return_trials=True, verbose=0, queue_trials=True, raise_on_failed_trial=False, ) # sort results = [x for x in results if x.last_result is not None] results = sorted(results, key=lambda x: x.last_result[self.metric], reverse=True) return []
def main(): ray.init(local_mode=False) hyperband = tune.schedulers.AsyncHyperBandScheduler( time_attr="training_iteration", metric="elo", mode="max", max_t=20, reduction_factor=3, brackets=3) experiment = tune.Experiment( name="ppo_hyperparameter_search_6", run=HyperParameterSearch, stop={}, num_samples=128, loggers=[JsonLogger, CSVLogger, TFEagerLogger], resources_per_trial={"cpu": 8}) algo = HyperOptSearch( { "base_program": os.path.join(os.getcwd(), sys.argv[1]), "actor_loss_coef": hp.uniform("actor_loss_coef", 0.25, 1), "algorithm": 1, "batch_size": hp.choice("batch_size", [128, 512, 1024, 2048, 4096, 8162]), "clip_param": hp.uniform("clip_param", 0.05, 0.3), "discount_factor": hp.choice("discount_factor", [0.9, 0.97, 0.98, 0.99]), "entropy_coef": hp.choice("entropy_coef", [0.01, 0.001, 0.0001, 0.00001]), "learning_rate": hp.choice("learning_rate", [0.001, 0.0007, 0.0003]), "num_env": 8, "num_epoch": hp.choice("num_epoch", range(2, 9)), "num_minibatch": 8, "value_loss_coef": hp.uniform("value_loss_coef", 0.25, 1) }, max_concurrent=8, metric="elo", mode="max", points_to_evaluate=[{ "actor_loss_coef": 0.4760768147894592, "algorithm": 1, "batch_size": 0, "clip_param": 0.20104703089284237, "discount_factor": 3, "entropy_coef": 2, "learning_rate": 1, "num_env": 8, "num_epoch": 4, "num_minibatch": 8, "value_loss_coef": 0.2968052161165151 }]) tune.run( experiment, search_alg=algo, scheduler=hyperband, resume="prompt")
def optimize_hyperparameters( train_model, create_model, data_train, data_test, search_space, model_kwargs_str, callbacks, hyperparams_file_name, random_seed, model_path, epochs, n_steps, num_samples_optim, ): tmp_dir = tempfile.TemporaryDirectory(dir=os.getcwd()) ray.shutdown() ray.init(log_to_driver=False, local_mode=True) search_alg = HyperOptSearch(random_state_seed=random_seed) search_alg = ConcurrencyLimiter(search_alg, max_concurrent=1) scheduler = AsyncHyperBandScheduler(time_attr="training_iteration", grace_period=10) analysis = tune.run( tune.with_parameters( train_model, data_train=data_train, data_test=data_test, create_model=create_model, model_kwargs_str=model_kwargs_str, callbacks=callbacks, epochs=epochs, n_steps=n_steps, ), verbose=1, config=search_space, search_alg=search_alg, scheduler=scheduler, resources_per_trial={ "cpu": os.cpu_count(), "gpu": 0 }, metric="val_loss", mode="min", name="ray_tune_keras_hyperopt_gru", local_dir=tmp_dir.name, num_samples=num_samples_optim, ) shutil.rmtree(tmp_dir) best_params = analysis.get_best_config(metric="val_loss", mode="min") with open(os.path.join(model_path, hyperparams_file_name), "w") as f: json.dump(best_params, f)
def main(distributed: bool, num_samples: int = 5, batch_size: int = 512, num_epochs: int = 10) -> None: init_logging("main.log") logger.info("Running main ...") if distributed: ray.init(address="localhost:6379", _redis_password=os.getenv("RAY_REDIS_PWD"), ignore_reinit_error=True) else: ray.init(ignore_reinit_error=True) X, y = make_data(NUM_ROW) X_tr, X_val, y_tr, y_val = train_test_split(X, y, test_size=0.2) # NOTE: Hyperopt config metric = "loss" mode = "min" hp_search = HyperOptSearch(metric=metric, mode=mode) # NOTE: Like functools.partial, but stores data in object store objective = tune.with_parameters(fit, X_tr=X_tr, X_val=X_val, y_tr=y_tr, y_val=y_val, batch_size=batch_size, num_epochs=num_epochs) # NOTE: Define the support of the parameters we're optimizing over param_space = { "width": tune.choice((2**np.arange(5, 11)).astype(int)), "depth": tune.choice(range(1, 5)), "lr": tune.loguniform(1e-4, 5e-2) } logger.info("Starting hyperparameter search ...") analysis = tune.run(objective, num_samples=num_samples, config=param_space, search_alg=hp_search, resources_per_trial={ "cpu": 2, "gpu": 0.5 }, metric=metric, mode=mode) best_config = analysis.get_best_config(metric=metric, mode=mode) logger.info("Best config:\n%s", best_config) with open("/tmp/analysis.p", "wb") as f: pickle.dump(analysis, f) logger.info("Best results %s", pformat(analysis.results)) analysis.results_df.to_parquet(RESULTS_PATH)
def ray_tune_interface(data_settings, cross_validation_settings, parameter_settings, model_settings): cat_features = data_settings['cat_features'] weights = data_settings['weights'] data = data_settings['data'] target = data_settings['target'] model = model_settings.pop('model', None) n_folds = cross_validation_settings['n_folds'] metric_to_use = cross_validation_settings['metric_to_use'] mode = cross_validation_settings['mode'] n_samples = parameter_settings['tune_parameters']['n_samples'] num_threads = parameter_settings['tune_parameters']['num_threads'] max_concurrent = parameter_settings['tune_parameters']['max_concurrent'] space = ({ k: var_type_sample(k, v, parameter_settings['parameters_types']) for k, v in parameter_settings['parameter_range'].items() }) algo = HyperOptSearch( space, max_concurrent=max_concurrent, metric="metric_ave", mode=mode, ) scheduler = ASHAScheduler(metric="metric_ave", mode=mode) config = { 'num_samples': n_samples, 'config': { 'non_hp': { 'data': { 'X': data, 'y': target, 'cat_features': cat_features, 'weights': weights, }, 'modelIns': model, 'n_folds': n_folds, 'metric_to_use': metric_to_use, } } } config['config'].update(parameter_settings) config['config']['model_settings'] = model_settings ray_experiment = tune.run(cross_validation_catboost_ray, resources_per_trial={"gpu": 1}, search_alg=algo, scheduler=scheduler, keep_checkpoints_num=0, verbose=1, **config) results = ray_experiment.dataframe(metric="metric_ave", mode="min") results.to_csv('results_experiment.csv')
def optimize(cfg): search_space = { "ae_lr": tune.loguniform(1e-6, 1e-2), } # Points to evaluate best_params = [{"ae_lr": 3e-4}] search_alg = HyperOptSearch(metric="val_return", mode="max", points_to_evaluate=best_params) analysis = tune.run(tune.with_parameters(train, params=cfg.worker), num_samples=1, config=search_space, resources_per_trial={ 'cpu': 4, 'gpu': 1 }) search_alg.save("./opt_checkpoint.pkl") print("best config: ", analysis.get_best_config(metric="return", mode="max"))
def testCreateSearcher(self): kwargs = {"metric": "metric_foo", "mode": "min"} searcher_ax = "ax" shim_searcher_ax = tune.create_searcher(searcher_ax, **kwargs) real_searcher_ax = AxSearch(space=[], **kwargs) assert type(shim_searcher_ax) is type(real_searcher_ax) searcher_hyperopt = "hyperopt" shim_searcher_hyperopt = tune.create_searcher(searcher_hyperopt, **kwargs) real_searcher_hyperopt = HyperOptSearch({}, **kwargs) assert type(shim_searcher_hyperopt) is type(real_searcher_hyperopt)
def main(cmdl): base_cfg = namespace_to_dict(read_config(Path(cmdl.cfg) / "default.yaml")) search_cfg = namespace_to_dict(read_config(Path(cmdl.cfg) / "search.yaml")) print(config_to_string(cmdl)) print(config_to_string(dict_to_namespace(search_cfg))) # the search space good_init, search_space = get_search_space(search_cfg) search_name = "{timestep}_tune_{experiment_name}{dev}".format( timestep="{:%Y%b%d-%H%M%S}".format(datetime.now()), experiment_name=base_cfg["experiment"], dev="_dev" if cmdl.dev else "", ) # search algorithm hyperopt_search = HyperOptSearch( search_space, metric="criterion", mode="max", max_concurrent=cmdl.workers, points_to_evaluate=good_init, ) # early stopping scheduler = ASHAScheduler( time_attr="train_step", metric="criterion", mode="max", max_t=base_cfg["training_steps"], # max length of the experiment grace_period=cmdl.grace_steps, # stops after 20 logged steps brackets=3, # don't know what this does ) analysis = tune.run( lambda x: tune_trial(x, base_cfg=base_cfg, get_objective=None), name=search_name, # config=search_space, search_alg=hyperopt_search, scheduler=scheduler, local_dir="./results", num_samples=cmdl.trials, trial_name_creator=trial2string, resources_per_trial={"cpu": 3}, ) dfs = analysis.trial_dataframes for i, (key, df) in enumerate(dfs.items()): print("saving: ", key) df.to_pickle(f"{key}/trial_df.pkl")
def testHyperopt(self): from ray.tune.suggest.hyperopt import HyperOptSearch out = tune.run( _invalid_objective, # At least one nan, inf, -inf and float search_alg=HyperOptSearch(random_state_seed=1234), config=self.config, mode="max", num_samples=8, reuse_actors=False) best_trial = out.best_trial self.assertLessEqual(best_trial.config["report"], 2.0)
def testRemoteRunWithSearcher(self): def train(config, reporter): for i in range(100): reporter(timesteps_total=i) analysis = run(train, search_alg=HyperOptSearch(), config={"a": choice(["a", "b"])}, metric="timesteps_total", mode="max", _remote=True) [trial] = analysis.trials self.assertEqual(trial.status, Trial.TERMINATED) self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def run_hyperopt_tune(config_dict=config_space, smoke_test=False): algo = HyperOptSearch(space=config_dict, metric="mean_loss", mode="min") algo = ConcurrencyLimiter(algo, max_concurrent=4) scheduler = AsyncHyperBandScheduler() analysis = tune.run( easy_objective, metric="mean_loss", mode="min", search_alg=algo, scheduler=scheduler, num_samples=10 if smoke_test else 100, ) print("Best hyperparameters found were: ", analysis.best_config)
def testConvertHyperOptNested(self): from ray.tune.suggest.hyperopt import HyperOptSearch config = { "a": 1, "dict_nested": tune.sample.Categorical([{ "a": tune.sample.Categorical(["M", "N"]), "b": tune.sample.Categorical(["O", "P"]) }]).uniform(), "list_nested": tune.sample.Categorical([ [ tune.sample.Categorical(["M", "N"]), tune.sample.Categorical(["O", "P"]) ], [ tune.sample.Categorical(["Q", "R"]), tune.sample.Categorical(["S", "T"]) ], ]).uniform(), "domain_nested": tune.sample.Categorical([ tune.sample.Categorical(["M", "N"]), tune.sample.Categorical(["O", "P"]) ]).uniform(), } searcher = HyperOptSearch(metric="a", mode="max") analysis = tune.run(_mock_objective, config=config, search_alg=searcher, num_samples=10) for trial in analysis.trials: config = trial.config self.assertIn(config["dict_nested"]["a"], ["M", "N"]) self.assertIn(config["dict_nested"]["b"], ["O", "P"]) if config["list_nested"][0] in ["M", "N"]: self.assertIn(config["list_nested"][1], ["O", "P"]) else: self.assertIn(config["list_nested"][0], ["Q", "R"]) self.assertIn(config["list_nested"][1], ["S", "T"]) self.assertIn(config["domain_nested"], ["M", "N", "O", "P"])
def main(): config = Config(config_file="lunar/config/config_lunar.yaml").config ray.init(address="auto") space = { "agent_learn_every_x_steps": hp.choice("agent_learn_every_x_steps", [10, 20]), "replay_buffer_batch_size": hp.choice("replay_buffer_batch_size", [128, 512, 1024]), # "memory_learning_start": hp.choice("memory_learning_start", [50000]), "agent_gamma": hp.uniform("agent_gamma", 0.95, 0.999), "agent_gamma": hp.choice("agent_gamma", [0.95, 0.995]) } reporter = CLIReporter() reporter.add_metric_column("mean_rewards") reporter.add_metric_column("reward") ahb = AsyncHyperBandScheduler(time_attr="training_iteration", metric="mean_rewards", mode="max", grace_period=500, max_t=3600) tune.run( LunarTrainer, name="asynHyber-lunar-ddpg", scheduler=ahb, config=config, queue_trials=True, num_samples=10, progress_reporter=reporter, resources_per_trial={ "cpu": 3, "gpu": 0.2 }, search_alg=HyperOptSearch(space=space, max_concurrent=4, metric="mean_rewards", mode="max"), checkpoint_freq=20, checkpoint_at_end=True, verbose=1, )
def testConvertHyperOpt(self): from ray.tune.suggest.hyperopt import HyperOptSearch from hyperopt import hp config = { "a": tune.sample.Categorical([2, 3, 4]).uniform(), "b": { "x": tune.sample.Integer(0, 5).quantized(2), "y": 4, "z": tune.sample.Float(1e-4, 1e-2).loguniform() } } converted_config = HyperOptSearch.convert_search_space(config) hyperopt_config = { "a": hp.choice("a", [2, 3, 4]), "b": { "x": hp.randint("x", 5), "y": 4, "z": hp.loguniform("z", np.log(1e-4), np.log(1e-2)) } } searcher1 = HyperOptSearch(space=converted_config, random_state_seed=1234) searcher2 = HyperOptSearch(space=hyperopt_config, random_state_seed=1234) config1 = searcher1.suggest("0") config2 = searcher2.suggest("0") self.assertEqual(config1, config2) self.assertIn(config1["a"], [2, 3, 4]) self.assertIn(config1["b"]["x"], list(range(5))) self.assertEqual(config1["b"]["y"], 4) self.assertLess(1e-4, config1["b"]["z"]) self.assertLess(config1["b"]["z"], 1e-2) searcher = HyperOptSearch(metric="a", mode="max") analysis = tune.run(_mock_objective, config=config, search_alg=searcher, num_samples=1) trial = analysis.trials[0] assert trial.config["a"] in [2, 3, 4]
def set_basic_conf(self): space = { "x": hp.uniform("x", 0, 10), "y": hp.uniform("y", -10, 10), "z": hp.uniform("z", -10, 0) } def cost(space, reporter): loss = space["x"]**2 + space["y"]**2 + space["z"]**2 reporter(loss=loss) search_alg = HyperOptSearch(space, max_concurrent=1, metric="loss", mode="min", random_state_seed=5) return search_alg, cost
def main(): space = { "batch_size": hp.choice("batch_size", [32, 64, 128]), "learning_rate": hp.choice("learning_rate", [0.01, 0.001, 0.0005]), "target_update": hp.choice("target_update", [4, 10, 100]), } hyperopt_search = HyperOptSearch(space, metric="mean_reward", mode="max") analysis = tune.run(Trainable, stop={'training_iteration': MAX_TRAINING_ITERATION}, num_samples=10, scheduler=MedianStoppingRule(metric="mean_reward", mode="max"), search_alg=hyperopt_search, local_dir=TUNE_RESULTS_FOLDER, progress_reporter=reporter, checkpoint_freq=1, verbose=1)