Пример #1
0
    def set_basic_conf(self):
        space_config = [
            {
                "name": "width",
                "type": "num",
                "lb": 0,
                "ub": 20
            },
            {
                "name": "height",
                "type": "num",
                "lb": -100,
                "ub": 100
            },
        ]
        space = HEBODesignSpace().parse(space_config)

        def cost(param, reporter):
            reporter(loss=(param["height"] - 14)**2 - abs(param["width"] - 3))

        search_alg = HEBOSearch(
            space=space, metric="loss", mode="min", random_state_seed=5)

        return search_alg, cost
Пример #2
0
        {
            "width": 10,
            "height": 0,
            "activation": "relu"  # Activation will be relu
        },
        {
            "width": 15,
            "height": -20,
            "activation": "tanh"  # Activation will be tanh
        }
    ]
    known_rewards = [-189, -1144]

    algo = HEBOSearch(
        # space = space, # If you want to set the space
        points_to_evaluate=previously_run_params,
        evaluated_rewards=known_rewards,
        random_state_seed=123,  # for reproducibility
    )
    algo = ConcurrencyLimiter(algo, max_concurrent=4)

    scheduler = AsyncHyperBandScheduler()

    analysis = tune.run(easy_objective,
                        metric="mean_loss",
                        mode="min",
                        name="hebo_exp_with_warmstart",
                        search_alg=algo,
                        scheduler=scheduler,
                        num_samples=10 if args.smoke_test else 50,
                        config={
                            "steps": 100,
Пример #3
0
    def testConvertHEBO(self):
        from ray.tune.suggest.hebo import HEBOSearch
        from hebo.design_space.design_space import DesignSpace
        import torch

        # Grid search not supported, should raise ValueError
        with self.assertRaises(ValueError):
            HEBOSearch.convert_search_space({"grid": tune.grid_search([0, 1])})

        config = {
            "a": tune.sample.Categorical([2, 3, 4]).uniform(),
            "b": {
                "x": tune.sample.Integer(0, 5),
                "y": 4,
                "z": tune.sample.Float(1e-4, 1e-2).loguniform()
            }
        }
        converted_config = HEBOSearch.convert_search_space(config)
        hebo_space_config = [
            {
                "name": "a",
                "type": "cat",
                "categories": [2, 3, 4]
            },
            {
                "name": "b/x",
                "type": "int",
                "lb": 0,
                "ub": 5
            },
            {
                "name": "b/z",
                "type": "pow",
                "lb": 1e-4,
                "ub": 1e-2
            },
        ]
        hebo_space = DesignSpace().parse(hebo_space_config)

        searcher1 = HEBOSearch(space=converted_config,
                               metric="a",
                               mode="max",
                               random_state_seed=123)
        searcher2 = HEBOSearch(space=hebo_space,
                               metric="a",
                               mode="max",
                               random_state_seed=123)

        np.random.seed(1234)
        torch.manual_seed(1234)
        config1 = searcher1.suggest("0")
        np.random.seed(1234)
        torch.manual_seed(1234)
        config2 = searcher2.suggest("0")

        self.assertEqual(config1, config2)
        self.assertIn(config1["a"], [2, 3, 4])
        self.assertIn(config1["b"]["x"], list(range(5)))
        self.assertLess(1e-4, config1["b"]["z"])
        self.assertLess(config1["b"]["z"], 1e-2)

        searcher = HEBOSearch(metric="a", mode="max", random_state_seed=123)
        analysis = tune.run(_mock_objective,
                            config=config,
                            search_alg=searcher,
                            num_samples=1)
        trial = analysis.trials[0]
        self.assertIn(trial.config["a"], [2, 3, 4])
        self.assertEqual(trial.config["b"]["y"], 4)
Пример #4
0
def ray_tune(model_wrapper, job_config: ht.config, resume: bool = False):
    """Performs automatic hyper-parameters tuning with Ray"""
    # initialize
    tuner = job_config.tune.clone().tuner
    log_dir = pathlib.Path(job_config.run.save_sub_dir) / "tmp_log"
    log_dir.mkdir(parents=True, exist_ok=True)

    # set up config
    config = get_hypers_tune(job_config)
    # set up scheduler
    sched_class = getattr(schedulers, tuner.scheduler_class)
    logger.info(f"Setting up scheduler: {tuner.scheduler_class}")
    sched_config = tuner.scheduler.get_config_dict()
    sched = sched_class(**sched_config)
    # set up algorithm
    algo_class = tuner.algo_class
    logger.info(f"Setting up search algorithm: {tuner.algo_class}")
    algo_config = tuner.algo.get_config_dict()
    algo = None
    if algo_class is None:
        algo = None
    elif algo_class == "AxSearch":
        from ray.tune.suggest.ax import AxSearch

        algo = AxSearch(**algo_config)
    elif algo_class == "HyperOptSearch":
        from ray.tune.suggest.hyperopt import HyperOptSearch

        algo = HyperOptSearch(**algo_config)
    elif algo_class == "HEBOSearch":
        from ray.tune.suggest.hebo import HEBOSearch

        algo = HEBOSearch(**algo_config)
    else:
        logger.error(f"Unsupported search algorithm: {algo_class}")
        logger.info(f"Using default value None for search algorithm")
    # set stopper
    if tuner.stopper_class is None:
        stop = None
    else:
        logger.info(f"Setting up stopper: {tuner.stopper_class}")
        stop_class = getattr(ray.tune.stopper, tuner.stopper_class)
        stop_config = tuner.stopper.get_config_dict()
        stop = stop_class(**stop_config)
    # set up extra run configs
    run_config = (tuner.run.get_config_dict()
                  )  # important: convert Hepy_Config class to dict
    if not "raise_on_failed_trial" in run_config:
        run_config["raise_on_failed_trial"] = False
    tune_func = getattr(hep_model, model_wrapper._tune_fun_name)

    # start tuning jobs
    if os.name == "posix":
        logger.info(f"Ignoring tune.tmp.tmp_dir setting on Unix OS")
        ray.init(**(tuner.init.get_config_dict()))
    else:
        ray.init(
            _temp_dir=str(job_config.tune.tmp_dir),
            **(tuner.init.get_config_dict()),
        )
    analysis = tune.run(
        tune_func,
        name="ray_tunes",
        stop=stop,
        search_alg=algo,
        scheduler=sched,
        config=config,
        local_dir=job_config.run.save_sub_dir,
        resume=resume,
        **run_config,
    )
    print("#### Best hyperparameters found were:")
    print(analysis.best_config)
    print(yaml.dump(analysis.best_config))

    return analysis
Пример #5
0
def get_run_params(tuning_config, param_config):
    params = {}

    if "benchmarking_params" in tuning_config:
        from benchmarking_trainable import BenchmarkingTrainable
        params["run_or_experiment"] = BenchmarkingTrainable
    else:
        from tuning_trainable import TuningTrainable
        from tuning_callbacks import TuningCallbacks
        params["run_or_experiment"] = TuningTrainable
        params["callbacks"] = [TuningCallbacks(tuning_config, param_config)]

    params["name"] = tuning_config["name"]

    params["num_samples"] = tuning_config["samples"]
    if 'tuning_timeout' in tuning_config:
        params["time_budget_s"] = tuning_config['tuning_timeout']
    params["verbose"] = tuning_config["logging_level"]
    #params["stop"] = { "training_iteration": tuning_config["stages"] }
    params["resources_per_trial"] = {
        "cpu":
        multiprocessing.cpu_count() / tuning_config["max_concurrent_trials"],
        "gpu": 0
    }
    params["mode"] = "max"
    params["metric"] = "score"

    finalize_param_config(tuning_config, param_config)
    params["config"] = param_config

    if tuning_config["use_wandb"]:
        from ray.tune.integration.wandb import WandbLoggerCallback
        callbacks = params["callbacks"] = params.get("callbacks", [])

        wandb_cfg_f = open(os.path.expanduser("~") + "/.netrc", "r")
        # last line, last word, without \n at the end
        wandb_api_key = wandb_cfg_f.readlines()[-1].split(' ')[-1][:-1]

        callbacks.append(
            WandbLoggerCallback(project="ralph-tuning" if "benchmarking_params"
                                not in tuning_config else "ralph-benchmarking",
                                api_key=wandb_api_key,
                                group=tuning_config["name"],
                                log_config=False))
        os.environ["WANDB_START_METHOD"] = "thread"
        # silent=true is very important, do not delete;
        # otherwise, the whole tuning gets mysteriously stuck
        os.environ["WANDB_SILENT"] = "true"

    params["reuse_actors"] = True

    points_to_evaluate = tuning_config.get("initial_param_configs", None)
    max_concurrent = tuning_config["max_concurrent_trials"]

    if tuning_config["optimizer"] == "RANDOM":
        from ray.tune.suggest import BasicVariantGenerator
        params["search_alg"] = BasicVariantGenerator(
            points_to_evaluate=points_to_evaluate,
            max_concurrent=max_concurrent)
    elif tuning_config["optimizer"] == "SKOPT":
        from ray.tune.suggest.skopt import SkOptSearch
        params["search_alg"] = SkOptSearch(
            metric="score",
            mode="max",
            points_to_evaluate=points_to_evaluate,
            convert_to_python=True,
            max_concurrent=max_concurrent)
    elif tuning_config["optimizer"] == "HEBO":
        from ray.tune.suggest.hebo import HEBOSearch
        params["search_alg"] = HEBOSearch(
            metric="score",
            mode="max",
            points_to_evaluate=points_to_evaluate,
            max_concurrent=max_concurrent)

    return params