Пример #1
0
    def __init__(self,
                 num_workers: int,
                 space,
                 learner,
                 acq_func,
                 liar_strategy,
                 KAPPA=1.96,
                 SEED=12345,
                 **kwargs):
        assert learner in [
            "RF", "ET", "GBRT", "GP", "DUMMY"
        ], f"Unknown scikit-optimize base_estimator: {learner}"
        assert liar_strategy in "cl_min cl_mean cl_max".split()

        self.space = space
        self.learner = learner
        self.acq_func = acq_func
        self.liar_strategy = liar_strategy
        self.KAPPA = KAPPA
        self.SEED = SEED

        n_init = inf if learner == 'DUMMY' else num_workers

        if isinstance(self.space, CS.ConfigurationSpace):
            # Pass on seed for replicable RNG
            self.space.seed(self.SEED)
            self._optimizer = SkOptimizer(
                dimensions=self.space,
                base_estimator=self.learner,
                acq_optimizer='sampling',
                acq_func=self.acq_func,
                acq_func_kwargs={'kappa': self.KAPPA},
                random_state=self.SEED,
                n_initial_points=n_init)
        else:
            self._optimizer = SkOptimizer(
                dimensions=self.space.dimensions,
                base_estimator=self.learner,
                acq_optimizer='sampling',
                acq_func=self.acq_func,
                acq_func_kwargs={'kappa': self.KAPPA},
                random_state=self.SEED,
                n_initial_points=n_init)

        self.evals = {}
        self.counter = 0
        logger.info("Using skopt.Optimizer with %s base_estimator" %
                    self.learner)
Пример #2
0
    def __init__(self, num_workers: int, space, learner, acq_func,
                 liar_strategy, **kwargs):
        assert learner in [
            "RF", "ET", "GBRT", "GP", "DUMMY"
        ], f"Unknown scikit-optimize base_estimator: {learner}"
        assert liar_strategy in "cl_min cl_mean cl_max".split()

        self.space = space
        self.learner = learner
        self.acq_func = acq_func
        self.liar_strategy = liar_strategy

        n_init = inf if learner == 'DUMMY' else num_workers

        self._optimizer = SkOptimizer(dimensions=self.space.dimensions,
                                      base_estimator=self.learner,
                                      acq_optimizer='sampling',
                                      acq_func=self.acq_func,
                                      acq_func_kwargs={'kappa': self.KAPPA},
                                      random_state=self.SEED,
                                      n_initial_points=n_init)

        self.evals = {}
        self.counter = 0
        logger.info("Using skopt.Optimizer with %s base_estimator" %
                    self.learner)
Пример #3
0
    def __init__(
        self,
        problem,
        num_workers,
        surrogate_model="RF",
        acq_func="gp_hedge",
        acq_kappa=1.96,
        acq_xi=None,
        liar_strategy="cl_max",
        n_jobs=1,
        **kwargs,
    ):

        assert surrogate_model in [
            "RF",
            "ET",
            "GBRT",
            "GP",
            "DUMMY",
        ], f"Unknown scikit-optimize base_estimator: {surrogate_model}"

        if surrogate_model == "RF":
            base_estimator = RandomForestRegressor(n_jobs=n_jobs)
        elif surrogate_model == "ET":
            base_estimator = ExtraTreesRegressor(n_jobs=n_jobs)
        elif surrogate_model == "GBRT":
            base_estimator = GradientBoostingQuantileRegressor(n_jobs=n_jobs)
        else:
            base_estimator = surrogate_model

        self.space = problem.space
        # queue of remaining starting points
        self.starting_points = problem.starting_point

        n_init = (inf if surrogate_model == "DUMMY" else max(
            num_workers, len(self.starting_points)))

        # Set acq_func_kwargs parameters
        acq_func_kwargs = {}
        if type(acq_kappa) is float:
            acq_func_kwargs["kappa"] = acq_kappa
        if type(acq_xi) is float:
            acq_func_kwargs["xi"] = acq_xi

        self._optimizer = SkOptimizer(
            dimensions=self.space,
            base_estimator=base_estimator,
            acq_optimizer="sampling",
            acq_func=acq_func,
            acq_func_kwargs=acq_func_kwargs,
            random_state=self.SEED,
            n_initial_points=n_init,
        )

        assert liar_strategy in "cl_min cl_mean cl_max".split()
        self.strategy = liar_strategy
        self.evals = {}
        self.counter = 0
        logger.info(
            f"Using skopt.Optimizer with {surrogate_model} base_estimator")
Пример #4
0
    def __init__(
        self,
        problem,
        run,
        evaluator,
        population_size=100,
        sample_size=10,
        plot="true",
        n_jobs=1,
        **kwargs,
    ):
        super().__init__(
            problem=problem,
            run=run,
            evaluator=evaluator,
            population_size=population_size,
            sample_size=sample_size,
            **kwargs,
        )

        self.do_plot = plot == "true"
        self.n_jobs = int(n_jobs)

        # Initialize Hyperaparameter space

        # self.hp_space = cs.ConfigurationSpace(seed=42)
        # self.hp_space.add_hyperparameter(
        #     check_hyperparameter(
        #         self.problem.space["hyperparameters"]["learning_rate"], "learning_rate"
        #     )
        # )
        # self.hp_space.add_hyperparameter(
        #     check_hyperparameter(
        #         self.problem.space["hyperparameters"]["batch_size"], "batch_size"
        #     )
        # )

        self.hp_space = []
        self.hp_space.append(
            self.problem.space["hyperparameters"]["learning_rate"])

        # ploting
        lr_range = self.problem.space["hyperparameters"]["learning_rate"][:2]
        self.domain_x = np.linspace(*lr_range, 400).reshape(-1, 1)

        # Initialize opitmizer of hyperparameter space
        acq_func_kwargs = {"xi": 0.000001, "kappa": 0.001}  # tiny exploration
        self.n_initial_points = self.free_workers

        self.hp_opt = SkOptimizer(
            dimensions=self.hp_space,
            base_estimator=RandomForestRegressor(n_jobs=32),
            # base_estimator=RandomForestRegressor(n_jobs=self.n_jobs),
            acq_func="LCB",
            acq_optimizer="sampling",
            acq_func_kwargs=acq_func_kwargs,
            n_initial_points=self.n_initial_points,
            # model_queue_size=100,
        )
Пример #5
0
    def __init__(
        self,
        problem,
        run,
        evaluator,
        surrogate_model="RF",
        acq_func="LCB",
        kappa=1.96,
        xi=0.001,
        liar_strategy="cl_min",
        n_jobs=1,
        **kwargs,
    ):
        super().__init__(
            problem=problem,
            run=run,
            evaluator=evaluator,
            **kwargs,
        )

        self.n_jobs = int(
            n_jobs)  # parallelism of BO surrogate model estimator
        self.kappa = float(kappa)
        self.xi = float(xi)
        self.n_initial_points = self.evaluator.num_workers
        self.liar_strategy = liar_strategy

        # Setup
        na_search_space = self.problem.build_search_space()

        self.hp_space = self.problem._hp_space  #! hyperparameters
        self.hp_size = len(self.hp_space.space.get_hyperparameter_names())
        self.na_space = HpProblem(self.problem.seed)
        for i, vnode in enumerate(na_search_space.variable_nodes):
            self.na_space.add_hyperparameter((0, vnode.num_ops - 1),
                                             name=f"vnode_{i:05d}")

        self.space = CS.ConfigurationSpace(seed=self.problem.seed)
        self.space.add_configuration_space(
            prefix="1", configuration_space=self.hp_space.space)
        self.space.add_configuration_space(
            prefix="2", configuration_space=self.na_space.space)

        # Initialize opitmizer of hyperparameter space
        self.opt = SkOptimizer(
            dimensions=self.space,
            base_estimator=self.get_surrogate_model(surrogate_model,
                                                    self.n_jobs),
            acq_func=acq_func,
            acq_optimizer="sampling",
            acq_func_kwargs={
                "xi": self.xi,
                "kappa": self.kappa
            },
            n_initial_points=self.n_initial_points,
        )
Пример #6
0
    def __init__(
        self,
        problem,
        run,
        evaluator,
        population_size=100,
        sample_size=10,
        n_jobs=1,
        kappa=0.001,
        xi=0.000001,
        **kwargs,
    ):
        super().__init__(
            problem=problem,
            run=run,
            evaluator=evaluator,
            population_size=population_size,
            sample_size=sample_size,
            **kwargs,
        )

        self.n_jobs = int(n_jobs)

        # Initialize Hyperaparameter space

        self.hp_space = []
        # add the 'learning_rate' space to the HPO search space
        self.hp_space.append(
            self.problem.space["hyperparameters"]["learning_rate"])
        # add the 'batch_size' space to the HPO search space
        self.hp_space.append(
            self.problem.space["hyperparameters"]["batch_size"])
        # add the 'num_ranks_per_node' space to the HPO search space
        self.hp_space.append(
            self.problem.space["hyperparameters"]["ranks_per_node"])

        # Initialize opitmizer of hyperparameter space
        acq_func_kwargs = {
            "xi": float(xi),
            "kappa": float(kappa)
        }  # tiny exploration
        # self.free_workers = 128  #! TODO: test
        self.n_initial_points = self.free_workers

        self.hp_opt = SkOptimizer(
            dimensions=self.hp_space,
            base_estimator=RandomForestRegressor(n_jobs=32),
            # base_estimator=RandomForestRegressor(n_jobs=4),
            acq_func="LCB",
            acq_optimizer="sampling",
            acq_func_kwargs=acq_func_kwargs,
            n_initial_points=self.n_initial_points,
            # model_queue_size=100,
        )
Пример #7
0
    def __init__(self,
                 problem,
                 num_workers,
                 surrogate_model='RF',
                 acq_func='gp_hedge',
                 acq_kappa=1.96,
                 liar_strategy='cl_max',
                 n_jobs=-1,
                 **kwargs):
        assert surrogate_model in [
            "RF", "ET", "GBRT", "GP", "DUMMY"
        ], f"Unknown scikit-optimize base_estimator: {surrogate_model}"
        if surrogate_model == "RF":
            base_estimator = RandomForestRegressor(n_jobs=n_jobs)
        elif surrogate_model == "ET":
            base_estimator = ExtraTreesRegressor(n_jobs=n_jobs)
        elif surrogate_model == "GBRT":
            base_estimator = GradientBoostingQuantileRegressor(n_jobs=n_jobs)
        else:
            base_estimator = surrogate_model

        self.space = problem.space
        cs_kwargs = self.space['create_search_space'].get('kwargs')
        if cs_kwargs is None:
            search_space = self.space['create_search_space']['func']()
        else:
            search_space = self.space['create_search_space']['func'](
                **cs_kwargs)

        # // queue of remaining starting points
        # // self.starting_points = problem.starting_point
        n_init = np.inf if surrogate_model == 'DUMMY' else num_workers

        self.starting_points = []  # ! EMPTY for now TODO

        # Building search space for SkOptimizer
        skopt_space = [(0, vnode.num_ops - 1)
                       for vnode in search_space.variable_nodes]

        self._optimizer = SkOptimizer(skopt_space,
                                      base_estimator=base_estimator,
                                      acq_optimizer='sampling',
                                      acq_func=acq_func,
                                      acq_func_kwargs={'kappa': acq_kappa},
                                      random_state=self.SEED,
                                      n_initial_points=n_init)

        assert liar_strategy in "cl_min cl_mean cl_max".split()
        self.strategy = liar_strategy
        self.evals = {}
        self.counter = 0
        logger.info("Using skopt.Optimizer with %s base_estimator" %
                    surrogate_model)
Пример #8
0
    def __init__(self, problem, run, evaluator, **kwargs):
        super().__init__(problem=problem,
                         run=run,
                         evaluator=evaluator,
                         **kwargs)

        self.free_workers = self.evaluator.num_workers

        dhlogger.info(
            jm(
                type="start_infos",
                alg="bayesian-optimization-for-hpo-nas",
                nworkers=self.evaluator.num_workers,
                encoded_space=json.dumps(self.problem.space, cls=Encoder),
            ))

        # Setup
        self.pb_dict = self.problem.space
        cs_kwargs = self.pb_dict["create_search_space"].get("kwargs")
        if cs_kwargs is None:
            search_space = self.pb_dict["create_search_space"]["func"]()
        else:
            search_space = self.pb_dict["create_search_space"]["func"](
                **cs_kwargs)

        self.space_list = [(0, vnode.num_ops - 1)
                           for vnode in search_space.variable_nodes]

        # Initialize Hyperaparameter space
        self.dimensions = []
        self.size_ha = None  # Number of algorithm hyperparameters in the dimension list
        self.add_ha_dimensions()
        self.add_hm_dimensions()

        # Initialize opitmizer of hyperparameter space
        # acq_func_kwargs = {"xi": 0.000001, "kappa": 0.001}  # tiny exploration
        acq_func_kwargs = {"xi": 0.000001, "kappa": 1.96}  # tiny exploration
        self.n_initial_points = self.free_workers

        self.opt = SkOptimizer(
            dimensions=self.dimensions,
            base_estimator=RandomForestRegressor(n_jobs=32),
            # base_estimator=RandomForestRegressor(n_jobs=4),
            acq_func="LCB",
            acq_optimizer="sampling",
            acq_func_kwargs=acq_func_kwargs,
            n_initial_points=self.n_initial_points,
            # model_queue_size=100,
        )
Пример #9
0
    def __init__(self,
                 problem,
                 num_workers,
                 surrogate_model='RF',
                 acq_func='gp_hedge',
                 acq_kappa=1.96,
                 liar_strategy='cl_max',
                 n_jobs=1,
                 **kwargs):

        assert surrogate_model in [
            "RF", "ET", "GBRT", "GP", "DUMMY"
        ], f"Unknown scikit-optimize base_estimator: {surrogate_model}"

        if surrogate_model == "RF":
            base_estimator = RandomForestRegressor(n_jobs=n_jobs)
        elif surrogate_model == "ET":
            base_estimator = ExtraTreesRegressor(n_jobs=n_jobs)
        elif surrogate_model == "GBRT":
            base_estimator = GradientBoostingQuantileRegressor(n_jobs=n_jobs)
        else:
            base_estimator = surrogate_model

        self.space = problem.space
        # queue of remaining starting points
        self.starting_points = problem.starting_point

        n_init = inf if surrogate_model == 'DUMMY' else max(
            num_workers, len(self.starting_points))

        self._optimizer = SkOptimizer(self.space.values(),
                                      base_estimator=base_estimator,
                                      acq_optimizer='sampling',
                                      acq_func=acq_func,
                                      acq_func_kwargs={'kappa': acq_kappa},
                                      random_state=self.SEED,
                                      n_initial_points=n_init)

        assert liar_strategy in "cl_min cl_mean cl_max".split()
        self.strategy = liar_strategy
        self.evals = {}
        self.counter = 0
        logger.info(
            f"Using skopt.Optimizer with {surrogate_model} base_estimator")
Пример #10
0
    def __init__(
        self,
        problem,
        run,
        evaluator,
        population_size=100,
        sample_size=10,
        n_jobs=1,
        kappa=0.001,
        xi=0.000001,
        acq_func="LCB",
        **kwargs,
    ):
        super().__init__(
            problem=problem,
            run=run,
            evaluator=evaluator,
            population_size=population_size,
            sample_size=sample_size,
            **kwargs,
        )

        self.n_jobs = int(
            n_jobs)  # parallelism of BO surrogate model estimator

        # Initialize Hyperaparameter space
        self.hp_space = self.problem._hp_space

        # Initialize opitmizer of hyperparameter space
        acq_func_kwargs = {
            "xi": float(xi),
            "kappa": float(kappa)
        }  # tiny exploration
        self.n_initial_points = self.free_workers

        self.hp_opt = SkOptimizer(
            dimensions=self.hp_space._space,
            base_estimator=RandomForestRegressor(n_jobs=self.n_jobs),
            acq_func=acq_func,
            acq_optimizer="sampling",
            acq_func_kwargs=acq_func_kwargs,
            n_initial_points=self.n_initial_points,
        )
Пример #11
0
    def __init__(self, problem, num_workers, args):
        assert args.learner in ["RF", "ET", "GBRT", "GP", "DUMMY"], f"Unknown scikit-optimize base_estimator: {args.learner}"

        self.space = problem.space
        n_init = inf if args.learner=='DUMMY' else num_workers
        self._optimizer = SkOptimizer(
            self.space.values(),
            base_estimator=args.learner,
            acq_optimizer='sampling',
            acq_func=args.acq_func,
            acq_func_kwargs={'kappa':self.KAPPA},
            random_state=self.SEED,
            n_initial_points=n_init
        )

        assert args.liar_strategy in "cl_min cl_mean cl_max".split()
        self.strategy = args.liar_strategy
        self.evals = {}
        self.counter = 0
        logger.info("Using skopt.Optimizer with %s base_estimator" % args.learner)
Пример #12
0
    def __init__(
        self,
        problem,
        num_workers,
        surrogate_model="RF",
        acq_func="gp_hedge",
        acq_kappa=1.96,
        liar_strategy="cl_max",
        n_jobs=1,
        **kwargs,
    ):
        assert surrogate_model in [
            "RF",
            "ET",
            "GBRT",
            "GP",
            "DUMMY",
        ], f"Unknown scikit-optimize base_estimator: {surrogate_model}"
        if surrogate_model == "RF":
            base_estimator = RandomForestRegressor(n_jobs=n_jobs)
        elif surrogate_model == "ET":
            base_estimator = ExtraTreesRegressor(n_jobs=n_jobs)
        elif surrogate_model == "GBRT":
            base_estimator = GradientBoostingQuantileRegressor(n_jobs=n_jobs)
        else:
            base_estimator = surrogate_model

        self.problem = problem
        cs_kwargs = self.problem.space["create_search_space"].get("kwargs")
        if cs_kwargs is None:
            search_space = self.problem.space["create_search_space"]["func"]()
        else:
            search_space = self.problem.space["create_search_space"]["func"](
                **cs_kwargs)

        n_init = np.inf if surrogate_model == "DUMMY" else num_workers

        self.starting_points = []  # ! EMPTY for now TODO

        # Building search space for SkOptimizer using ConfigSpace
        skopt_space = cs.ConfigurationSpace(seed=self.problem.seed)
        for i, vnode in enumerate(search_space.variable_nodes):
            hp = csh.UniformIntegerHyperparameter(name=f"vnode_{i}",
                                                  lower=0,
                                                  upper=(vnode.num_ops - 1))
            skopt_space.add_hyperparameter(hp)

        self._optimizer = SkOptimizer(
            skopt_space,
            base_estimator=base_estimator,
            acq_optimizer="sampling",
            acq_func=acq_func,
            acq_func_kwargs={"kappa": acq_kappa},
            random_state=self.SEED,
            n_initial_points=n_init,
        )

        assert liar_strategy in "cl_min cl_mean cl_max".split()
        self.strategy = liar_strategy
        self.evals = {}
        self.counter = 0
        logger.info("Using skopt.Optimizer with %s base_estimator" %
                    surrogate_model)