Ejemplo n.º 1
0
    def __init__(self, api_config, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.lb, self.ub = self.bounds[:, 0], self.bounds[:, 1]
        self.dim = len(self.bounds)
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.batch_size = None
        self.history = []

        self.turbo = Turbo1(
            f=None,
            lb=self.bounds[:, 0],
            ub=self.bounds[:, 1],
            n_init=2 * self.dim + 1,
            max_evals=self.max_evals,
            batch_size=1,  # We need to update this later
            verbose=False,
        )
Ejemplo n.º 2
0
    def __init__(self, api_config, base_estimator="GBRT", acq_func="EI", n_initial_points=5):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        base_estimator : {'GP', 'RF', 'ET', 'GBRT'}
            How to estimate the objective function.
        acq_func : {'LCB', 'EI', 'PI', 'gp_hedge', 'EIps', 'PIps'}
            Acquisition objective to decide next suggestion.
        n_initial_points : int
            Number of points to sample randomly before actual Bayes opt.
        """
        AbstractOptimizer.__init__(self, api_config)

        dimensions, self.round_to_values = ScikitOptimizer.get_sk_dimensions(api_config)

        # Older versions of skopt don't copy over the dimensions names during
        # normalization and hence the names are missing in
        # self.skopt.space.dimensions. Therefore, we save our own copy of
        # dimensions list to be safe. If we can commit to using the newer
        # versions of skopt we can delete self.dimensions.
        self.dimensions_list = tuple(dd.name for dd in dimensions)

        self.skopt = SkOpt(
            dimensions,
            n_initial_points=n_initial_points,
            base_estimator=base_estimator,
            acq_func=acq_func,
            acq_optimizer="sampling",
            acq_func_kwargs={},
            acq_optimizer_kwargs={},
        )
Ejemplo n.º 3
0
    def __init__(self, api_config):
        """Build wrapper class to use optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        
        api_space = BoEI.api_manipulator(api_config)  # used for GPyOpt initialization

        self.space_x = JointSpace(api_config) # used for warping & unwarping of new suggestions & observations

        self.hasCat, self.cat_vec = BoEI.is_cat(api_config)
        
        self.dim = len(self.space_x.get_bounds())

        self.objective = GPyOpt.core.task.SingleObjective(None)

        self.space = GPyOpt.Design_space(api_space)
        
        self.model = GPyOpt.models.GPModel(optimize_restarts=5,verbose=False)
        
        self.aquisition_optimizer = GPyOpt.optimization.AcquisitionOptimizer(self.space)
        
        
        self.aquisition = AcquisitionEI(self.model, self.space, optimizer=self.aquisition_optimizer, cost_withGradients=None)
        
        self.batch_size = None
    def __init__(self, api_config, **kwargs):
        AbstractOptimizer.__init__(self, api_config)

        print('api_config:', api_config)
        self.api_config = api_config

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.lb, self.ub = self.bounds[:, 0], self.bounds[:, 1]
        self.dim = len(self.bounds)

        self.X = np.zeros((0, self.dim))
        self.y = np.zeros((0, 1))

        self.X_init = None
        self.batch_size = None
        self.turbo = None
        self.split_used = 0
        self.node = None
        self.best_values = []

        self.config = self._read_config()
        print('config:', self.config)
        optimizer_seed = self.config.get('optimizer_seed')
        fix_optimizer_seed(optimizer_seed)
        self.sampler_seed = self.config.get('sampler_seed')
        sampler.fix_sampler_seed(self.sampler_seed)

        self.is_init_batch = False
        self.init_batches = []
Ejemplo n.º 5
0
    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.
        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.search_space = JointSpace(api_config)
        self.bounds = self.search_space.get_bounds()
        self.iter = 0
        # Sets up the optimization problem (needs self.bounds)
        self.create_opt_prob()
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.batch_size = None
        self.history = []
        self.proposals = []
        # Population-based parameters in DE
        self.population = []
        self.fitness = []
        self.F = 0.7
        self.Cr = 0.7
        # For bayes opt
        self.dim = len(self.search_space.param_list)
        self.torch_bounds = torch.from_numpy(self.search_space.get_bounds().T)
        self.min_max_bounds = torch.from_numpy(
            np.stack([np.zeros(self.dim),
                      np.ones(self.dim)]))
        self.archive = []
        self.arc_fitness = []
Ejemplo n.º 6
0
 def __init__(self, api_config, random=np_util.random):
     AbstractOptimizer.__init__(self, api_config)
     self.random = random
     self.param_list = sorted([
         kk for kk in api_config.keys()
         if api_config[kk]["type"] in ("real", "int")
     ])
Ejemplo n.º 7
0
 def __init__(self, api_config):
     AbstractOptimizer.__init__(self, api_config)
     self.api_config = api_config
     self.space = self.parse_space(api_config)
     self.X = pd.DataFrame(columns=self.space.para_names)
     self.y = np.zeros((0, 1))
     for k in api_config:
         print(k, api_config[k])
     self.sobol = SobolEngine(self.space.num_paras, scramble=True)
 def __init__(self, api_config, random=np_util.random):
     """Build wrapper class to use random search function in benchmark.
     Settings for `suggest_dict` can be passed using kwargs.
     Parameters
     ----------
     api_config : dict-like of dict-like
         Configuration of the optimization variables. See API description.
     """
     AbstractOptimizer.__init__(self, api_config)
     self.cs = self.get_cs_dimensions(api_config)
    def __init__(self, api_config, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        self.opt1 = PySOTOptimizer(api_config, **kwargs)
        self.opt2 = OpentunerOptimizer(api_config)
Ejemplo n.º 10
0
    def __init__(self, api_config, random, cost_function, step=0):
        """Build wrapper class to use random search function in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        cost_function : callback taking X.
        """
        AbstractOptimizer.__init__(self, api_config)
        self.random = random
        self.cost_function = cost_function
Ejemplo n.º 11
0
    def __init__(self, api_config, random=np_util.random):
        """Build wrapper class to use random search function in benchmark.

        Settings for `suggest_dict` can be passed using kwargs.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        self.random = random
        self.mode = self.random.choice(["normal", "crash", "delay"])
Ejemplo n.º 12
0
    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.create_opt_prob()  # Sets up the optimization problem (needs self.bounds)
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.batch_size = None
        self.history = []
        self.proposals = []
Ejemplo n.º 13
0
    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.create_opt_prob(
        )  # Sets up the optimization problem (needs self.bounds)
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.turbo_batch_size = None
        self.pysot_batch_size = None
        self.history = []
        self.proposals = []
        self.lb, self.ub = self.bounds[:, 0], self.bounds[:, 1]
        self.dim = len(self.bounds)

        self.turbo = Turbo1(
            f=None,
            lb=self.bounds[:, 0],
            ub=self.bounds[:, 1],
            n_init=2 * self.dim + 1,
            max_evals=self.max_evals,
            batch_size=4,  # We need to update this later
            verbose=False,
        )

        # hyperopt
        self.random = np_random

        space, self.round_to_values = tuSOTOptimizer.get_hyperopt_dimensions(
            api_config)
        self.domain = Domain(dummy_f, space, pass_expr_memo_ctrl=None)
        self.trials = Trials()

        # Some book keeping like opentuner wrapper
        self.trial_id_lookup = {}

        # Store just for data validation
        self.param_set_chk = frozenset(api_config.keys())
Ejemplo n.º 14
0
    def __init__(self, api_config, tool="OnePlusOne", budget=300):
        """Build wrapper class to use nevergrad optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        budget : int
            Expected number of max function evals
        """
        AbstractOptimizer.__init__(self, api_config)

        self.instrum, self.space = NevergradOptimizer.get_nvg_dimensions(
            api_config)

        dimension = self.instrum.dimension
        opt_class = optimization.registry[tool]
        self.optim = opt_class(dimension=dimension, budget=budget)
Ejemplo n.º 15
0
    def __init__(self, api_config, random=np_random):
        """Build wrapper class to use hyperopt optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        self.random = random

        space, self.round_to_values = HyperoptOptimizer.get_hyperopt_dimensions(api_config)
        self.domain = Domain(dummy_f, space, pass_expr_memo_ctrl=None)
        self.trials = Trials()

        # Some book keeping like opentuner wrapper
        self.trial_id_lookup = {}

        # Store just for data validation
        self.param_set_chk = frozenset(api_config.keys())
Ejemplo n.º 16
0
    def __init__(self, api_config, n_initial_points=0):
        """Build wrapper class to use an optimizer in benchmark

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        n_initial_points : int
            Number of points to sample randomly before actual Nomad optimization
        """
        AbstractOptimizer.__init__(self, api_config)

        self.api_config = api_config

        main_params = PyNomadOptimizer.get_nomad_dimensions(api_config)

        # NB these params will have to be tuned according to the number of variables,
        # and their types
        params = [
            main_params[0],  #main_params[1], main_params[2],
            'BB_OUTPUT_TYPE OBJ',
            'MAX_BB_EVAL ' + str(8 * 16),
            'BB_MAX_BLOCK_SIZE 8',
            'MODEL_SEARCH SGTELIB',
            'SGTELIB_MODEL_CANDIDATES_NB 8',
            'SGTELIB_MODEL_TRIALS 5',
            'MODEL_EVAL_SORT no',
            'DIRECTION_TYPE ORTHO 2N',
            'SPECULATIVE_SEARCH no',
            'LH_SEARCH 8 0',
            'OPPORTUNISTIC_EVAL false',
            'NM_SEARCH false'
        ]  #,'PERIODIC_VARIABLE 0']

        # deal with categorical variables
        if len(main_params[3]) > 0:
            instruction = 'PERIODIC_VARIABLE '
            for var_index in main_params[3]:
                instruction += str(var_index) + ' '
            params.append(instruction)

        self.round_to_values = main_params[4]

        # lower and upper bounds are given
        lb = main_params[1]
        ub = main_params[2]

        x0 = []  # TODO choose x0 or LHS with n_initial_points

        # TODO analyze the types of the inputs to fill at maximum nomad bb blocks

        # queues to communicate between threads
        #  self.inputs_queue = queue.Queue()
        #  self.outputs_queue = queue.Queue()
        self.inputs_queue = multiprocessing.JoinableQueue()
        self.outputs_queue = multiprocessing.JoinableQueue()

        # counter to deal with number of iterations: needed to properly kill the daemon thread
        self.n_iters = 0

        # list to keep candidates for an evaluation
        self.stored_candidates = list()

        # start background thread
        #  self.nomad_thread = threading.Thread(target=nomad_solve, args=(self.bb_fct, x0, lb, ub, params,), daemon=True)
        #  self.nomad_thread.start()
        self.nomad_process = multiprocessing.Process(target=nomad_solve,
                                                     args=(
                                                         self.bb_fct,
                                                         x0,
                                                         lb,
                                                         ub,
                                                         params,
                                                     ))
        self.nomad_process.start()
Ejemplo n.º 17
0
    def __init__(self, api_config, random=np_util.random, n_initial_points=0):
        """Build wrapper class to use an optimizer in benchmark

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        n_initial_points : int
            Number of points to sample randomly before actual Nomad optimization
        """
        AbstractOptimizer.__init__(self, api_config)

        self.api_config = api_config

        print(api_config)

        main_params = PyNomadOptimizer.get_nomad_dimensions(api_config)

        # NB these params will have to be tuned according to the number of variables,
        # and their types
        #  params = [main_params[0], #main_params[1], main_params[2],
        #            'BB_OUTPUT_TYPE OBJ',
        #            'MAX_BB_EVAL ' + str(8 * 16),
        #            'BB_MAX_BLOCK_SIZE 8',
        #            'MODEL_SEARCH SGTELIB',
        #            'SGTELIB_MODEL_CANDIDATES_NB 8',
        #            'SGTELIB_MODEL_TRIALS 5',
        #            'MODEL_EVAL_SORT no',
        #            'DIRECTION_TYPE ORTHO 2N',
        #            'SPECULATIVE_SEARCH no',
        #            'LH_SEARCH 8 0',
        #            'OPPORTUNISTIC_EVAL false',
        #            'NM_SEARCH false'] #,'PERIODIC_VARIABLE 0']

        params = [
            main_params[0],  #main_params[1], main_params[2],
            'BB_OUTPUT_TYPE OBJ',
            'MAX_BB_EVAL ' + str(8 * 16),
            'BB_MAX_BLOCK_SIZE 8',
            'MODEL_SEARCH SGTELIB',
            'SGTELIB_MODEL_CANDIDATES_NB 8',
            'SGTELIB_MODEL_TRIALS 5',
            'MODEL_EVAL_SORT no',
            'SPECULATIVE_SEARCH no',
            'LH_SEARCH 8 0',
            'OPPORTUNISTIC_EVAL true',
            'OPPORTUNISTIC_LH false',
            'NM_SEARCH false',
            'INITIAL_MESH_SIZE r0.001'
        ]  #,'PERIODIC_VARIABLE 0']

        # fine tune parameters according to the number of variables
        dimensions_pb = len(self.api_config.keys())

        #  Direction type and intensification for dimension 3 to 9
        #   dim       2n         n+1       strategy
        #     3        6           4       2n + intens. 4 => 8
        #     4        8           5       2n => 8
        #     5       10           6       n+1 + intens. 2 => 8
        #     6       12           7       n+1 + intens. 1 => 8
        #     7       14           8       n+1 => 8
        #     8       16           9       2n => 2*8
        #     9       18          10       n+1 + intens. 6 => 2*8

        # choose direction type according to the type of the dimensions of the problem
        if dimensions_pb == 3 or dimensions_pb == 4 or dimensions_pb == 8:
            params.append('DIRECTION_TYPE ORTHO 2N')
        else:
            params.append('DIRECTION_TYPE ORTHO N+1 UNI')

        # intensification for some direction type and dimensions
        if dimensions_pb == 3:
            params.append('MAX_EVAL_INTENSIFICATION 4')
        if dimensions_pb == 5:
            params.append('MAX_EVAL_INTENSIFICATION 2')
        if dimensions_pb == 6:
            params.append('MAX_EVAL_INTENSIFICATION 1')
        if dimensions_pb == 9:
            params.append('MAX_EVAL_INTENSIFICATION 6')

        # deal with categorical variables
        if len(main_params[3]) > 0:
            instruction = 'PERIODIC_VARIABLE '
            for var_index in main_params[3]:
                instruction += str(var_index) + ' '
            params.append(instruction)

        self.round_to_values = main_params[4]

        # lower and upper bounds are given
        lb = main_params[1]
        ub = main_params[2]

        x0 = []  # TODO choose x0 or LHS with n_initial_points

        # TODO analyze the types of the inputs to fill at maximum nomad bb blocks

        # queues to communicate between threads
        #  self.inputs_queue = queue.Queue()
        #  self.outputs_queue = queue.Queue()
        self.inputs_queue = multiprocessing.JoinableQueue()
        self.outputs_queue = multiprocessing.JoinableQueue()

        # random stuff
        self.random = random

        # counter to deal with number of iterations: needed to properly kill the daemon thread
        self.n_iters = 0

        # list to keep candidates for an evaluation
        self.stored_candidates = list()

        # start background thread
        #  self.nomad_thread = threading.Thread(target=nomad_solve, args=(self.bb_fct, x0, lb, ub, params,), daemon=True)
        #  self.nomad_thread.start()
        self.nomad_process = multiprocessing.Process(target=nomad_solve,
                                                     args=(
                                                         self.bb_fct,
                                                         x0,
                                                         lb,
                                                         ub,
                                                         params,
                                                     ))
        self.nomad_process.start()
Ejemplo n.º 18
0
    def __init__(self, api_config, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.dimensions, self.vars_types, self.param_list = TurboOptimizer.get_sk_dimensions(
            api_config)
        print("dimensions: {}".format(self.dimensions))
        print("vars_types: {}".format(self.vars_types))
        # names of variables
        print("param_list: {}".format(self.param_list))

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.lb, self.ub = self.bounds[:, 0], self.bounds[:, 1]
        self.dim = len(self.bounds)
        print("lb: {}".format(self.lb))
        print("ub: {}".format(self.ub))
        print("dim: {}".format(self.dim))

        if "max_depth" in self.param_list:
            print("DT or RF")
            # max_depth
            att = "max_depth"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 10
            self.ub[att_idx] = 15
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))

            # max_features
            att = "max_features"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = logit(0.9)
            self.ub[att_idx] = logit(0.99)
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))

            # min_impurity_decrease
            att = "min_impurity_decrease"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 1e-5
            self.ub[att_idx] = 1e-4
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
        if "beta_1" in self.param_list and "hidden_layer_sizes" in self.param_list:
            print("MLP-adam")
            # batch_size
            att = "batch_size"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 16
            self.ub[att_idx] = 128
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            # hidden_layer_sizes
            att = "hidden_layer_sizes"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 64
            self.ub[att_idx] = 200
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            # validation_fraction
            att = "validation_fraction"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = logit(0.1)
            self.ub[att_idx] = logit(0.2)
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
        if "momentum" in self.param_list and "hidden_layer_sizes" in self.param_list:
            print("MLP-sgd")
            # batch_size
            att = "batch_size"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 16
            self.ub[att_idx] = 128
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            # hidden_layer_sizes
            att = "hidden_layer_sizes"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 64
            self.ub[att_idx] = 200
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            # validation_fraction
            att = "validation_fraction"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = logit(0.1)
            self.ub[att_idx] = logit(0.2)
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
        if "C" in self.param_list and "gamma" in self.param_list:
            print("SVM")
            # C
            att = "C"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = np.log(1e0)
            self.ub[att_idx] = np.log(1e3)
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            # tol
            att = "tol"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = np.log(1e-3)
            self.ub[att_idx] = np.log(1e-1)
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
        if "learning_rate" in self.param_list and "n_estimators" in self.param_list:
            print("ada")
            # n_estimators
            att = "n_estimators"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 30
            self.ub[att_idx] = 100
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
        if "n_neighbors" in self.param_list:
            print("kNN")
            # n_neighbors
            att = "n_neighbors"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 1
            self.ub[att_idx] = 15
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            # p
            att = "p"
            print("att: {}".format(att))
            att_idx = self.param_list.index(att)
            print("old lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))
            self.lb[att_idx] = 1
            self.ub[att_idx] = 2
            print("new lb: {}, ub: {}".format(self.lb[att_idx],
                                              self.ub[att_idx]))

        print("new_lb: {}".format(self.lb))
        print("new_ub: {}".format(self.ub))

        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.batch_size = None
        self.history = []

        self.turbo = Turbo1(
            f=None,
            lb=self.lb,
            ub=self.ub,
            n_init=2 * self.dim + 1,
            max_evals=self.max_evals,
            batch_size=1,  # We need to update this later
            verbose=False,
        )

        # count restart
        self.cnt_restart = 0
        # use smaller length_min
        self.turbo.length_min = 0.5**4
        # use distance between batch elements
        self.turbo.ele_distance = 1e-2
Ejemplo n.º 19
0
    def __init__(self,
                 api_config,
                 techniques=DEFAULT_TECHNIQUES,
                 n_suggestions=1):
        """Build wrapper class to use opentuner optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.

        techniques : iterable of strings
            A list or tuple of techniques to use in opentuner. If the list
            has only one technique, then that technique will be used. If the
            list has multiple techniques a bandit over those techniques
            will be used.

        n_suggestions : int
            Default number of suggestions to be made in parallel.
        """
        AbstractOptimizer.__init__(self, api_config)

        # Opentuner requires DesiredResult to reference suggestion when making
        # its observation. x_to_dr maps the dict suggestion to DesiredResult.
        self.x_to_dr = {}
        # Keep last suggested x and repeat it whenever opentuner gives up.
        self.dummy_suggest = None
        """Setting up the arguments for opentuner. You can see all possible
        arguments using:
        ```
        >>> import opentuner
        >>> opentuner.default_argparser().parse_args(['-h'])
        ```
        We only change a few arguments (other arguments are set to defaults):
        * database = MEMORY_ONLY_DB: to use an in-memory sqlite database
        * parallelism = n_suggestions: num of suggestions to give in parallel
        * technique = techniques: a list of techniques to be used by opentuner
        * print_params = False: to avoid opentuner from exiting after printing
            param spaces
        """
        args = Namespace(
            bail_threshold=500,
            database=MEMORY_ONLY_DB,
            display_frequency=10,
            generate_bandit_technique=False,
            label=None,
            list_techniques=False,
            machine_class=None,
            no_dups=False,
            parallel_compile=False,
            parallelism=n_suggestions,
            pipelining=0,
            print_params=False,
            print_search_space_size=False,
            quiet=False,
            results_log=None,
            results_log_details=None,
            seed_configuration=[],
            stop_after=None,
            technique=techniques,
            test_limit=5000,
        )

        # Setup some dummy classes required by opentuner to actually run.
        manipulator = OpentunerOptimizer.build_manipulator(api_config)
        interface = DMI(args=args, manipulator=manipulator)
        self.api = TuningRunManager(interface, args)
Ejemplo n.º 20
0
    def __init__(self, api_config, random=np_util.random):
        """Build wrapper class to use random search function in benchmark.
        Settings for `suggest_dict` can be passed using kwargs.
        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.random = random
        self._api_config = copy.deepcopy(
            api_config)  # keep the original values for later usage
        self.api_config, self._par = self._parse_api_config(api_config)
        self.cs = self.get_cs_dimensions(self.api_config)
        self.original_cs = self.get_cs_dimensions(self._api_config)
        print("ConfigSpace: ")
        print(self.cs)
        print("Original ConfigSpace: ")
        print(self.original_cs)
        self.optimizer_dict = {
            # TODO: Think about whether we can/want to rely on '8' here
            # For debugging, you can replace any of the following with this opt
            # [RandomOpt, {"api_config": self.api_config}]
            # NOTE: all the optimizers should take `self.cs` and `self.api_config` to initialize
            "Warmstart": [
                INIT, {
                    "api_config": self._api_config,
                    "pop_size": 24,
                    "warmstart": True
                }
            ],
            "SOBOL": [
                SMACInit, {
                    "api_config": self.api_config,
                    "config_space": self.cs,
                    'lifetime': 3
                }
            ],
            "PointsMD":
            [PointsMinDisc, {
                "api_config": self.api_config,
                "popsize": 24
            }],
            "SMAC": [
                SMAC, {
                    "api_config": self.api_config,
                    "config_space": self.cs,
                    "parallel_setting": 'KB'
                }
            ],
            "DE": [
                DE, {
                    "api_config": self.api_config,
                    "pop_size": 8,
                    "max_age": None,
                    "mutation_factor": 0.5,
                    "crossover_prob": 0.5,
                    "budget": None,
                    "strategy": 'best2_bin',
                    "f_adaptation": "SinDE",
                    "warmstart": False,
                    "sin_de_configuration": 2
                }
            ],
            "cma": [CMA, {
                "cs": self.cs,
                "popsize": 8,
                "lb": 0,
                "ub": 1
            }],
        }
        self.default_opt = "DE"
        self.last = None
        self.cur_opt = None
        self.rh = []
        self.Q_table = None
        self._num_iters = 0
        self.cur_opt_str = ""
        self._max_allowed_switches = 3
        # The schedule has to probably be hard-coded here since we are not sure if we can load it somehow
        # Hand designed Schedule for testing:
        self.Q_table, self._max_allowed_switches = self._fixed_policy_warm_smac8_de5_or_de4_smac8_de5(
        )

        num_iter_smac_init = 0

        for i in self.Q_table:
            self.Q_table[i] = np.array(self.Q_table[i])
            num_iter_smac_init += 1 if self.Q_table[i][1] != 0 else 0

        self.optimizer_dict["SOBOL"][1]['lifetime'] = num_iter_smac_init
Ejemplo n.º 21
0
 def __init__(self, api_config, random=np_util.random, flaky=False):
     AbstractOptimizer.__init__(self, api_config)
     self.random = random
     self.flaky = flaky