def init_random_generator(cls, rdm_object=None):
        """ Return a RandomGenerator. Can deal with multiple input: Seed/
        RandomGenerator/None
        """
        if (rdm_object is None):
            return RandomGenerator()

        elif (ut.is_int(rdm_object)):
            return RandomGenerator(seed=rdm_object)

        elif (isinstance(rdm_object, RandomGenerator)):
            return rdm_object

        else:
            raise NotImplementedError()
Esempio n. 2
0
    def _init_params_BO(self, **args_optim):
        """ Provides different ways to initialize bo depending of the input type:
        <None> / <int>: returns an integer (nb of points to be eval)
            e.g. None >> 2 * nb_params 
        <str>: random_init based on a string. returns a <P x N np.array>
            P is the population size N number of parameters  
            e.g. '40_lhs' >> 40 points drawn by latin hypercube sampling
                 '50_uniform' >> 50 points drawn uniformly
                  range of each params is infered from bounds
        <P * N array>: passs it though 
        """
        init_obj = args_optim['init_obj']
        nb_params = args_optim['nb_params']
        bounds = args_optim['bounds_params']

        if (init_obj is None):
            init_args = nb_params *2
            
        elif ut.is_int(init_obj):
            init_args = init_obj
            
        elif ut.is_string(init_obj):
            bits = init_obj.split("_",1)
            nb_points_init = int(bits[0])
            if(bits[1] == 'lhs'):
                size_pop = [nb_points_init, nb_params]
                limits = np.array(bounds).T
                init_args = self.rdm_gen.init_population_lhs(size_pop, limits)
            else:
                distrib_one_param = [bits[1]+'_'+ bounds[i][0] + '_' + bounds[i][1] for i in range(nb_params)]
                init_matrix = [self.rdm_gen.gen_rdmfunc_from_string(d, dim = nb_points_init) for d in distrib_one_param]
                init_args = np.array(init_matrix).T

        else:
            init_args = init_obj
        return init_args
    def gen_rdmfunc_from_string(self, method_rdm, dim=1):
        """return a function that when called return random variables according
        to the distribution described by the string, with specific dim
        convention: dim[-1] correspond to the dim of the RV while dim[:-1] to 
        the size of the population
        TODO: works only for 1D dim
        """
        if ut.is_list(method_rdm):
            # return a lst of function
            # should it be a function returning a list??
            func = [
                self.gen_rdmfunc_from_string(meth, dim) for meth in method_rdm
            ]
        else:

            args = ut.splitString(method_rdm)
            if (ut.is_int(dim)):
                dimRV = dim
            else:
                dimRV = dim[-1]
            if (args[0] in ['uniform', 'normal']):
                if (len(args) == 1):
                    first_args = np.repeat(0, dimRV)
                    second_args = np.repeat(1, dimRV)

                elif (len(args) == 3):
                    first_args = np.repeat(float(args[1]), dimRV)
                    second_args = np.repeat(float(args[2]), dimRV)

                elif (len(args) == (1 + 2 * dimRV)):
                    first_args = np.array(
                        [float(args[1 + 2 * d]) for d in range(dimRV)])
                    second_args = np.array(
                        [float(args[2 + 2 * d]) for d in range(dimRV)])
                else:
                    raise NotImplementedError()

                if (dim == 1):
                    # such that function return a value instead of an array
                    # may change
                    first_args, second_args = first_args[0], second_args[0]
                    dim = None
                if args[0] == 'normal':

                    def func():
                        return self.normal(first_args, second_args, size=dim)
                else:

                    def func():
                        return self.uniform(first_args, second_args, size=dim)

            elif (args[0] == 'determ'):
                if (len(args) == 1):
                    constant = np.repeat(0, dim)
                elif (len(args) == 2):
                    constant = np.repeat(float(args[1]), dim)
                elif (len(args) == (1 + dimRV)):
                    constant = np.array(args[1:])
                    if (ut.is_list(dim)):
                        dim_pop = np.product(dim[:-1])
                        constant = np.tile(constant, dim_pop).reshape(dim)
                else:
                    raise NotImplementedError()

                def func():
                    return constant
            else:
                raise NotImplementedError()
        return func
Esempio n. 4
0
    def _run_BO2(self, options, **args_call):
        """ Bayesian optimization using GPYOpt 
        def __init__(self, f, domain = None, constraints = None, cost_withGradients = None, 
        model_type = 'GP', X = None, Y = None, initial_design_numdata = 5, 
        initial_design_type='random', acquisition_type ='EI', normalize_Y = True,
        exact_feval = False, acquisition_optimizer_type = 'lbfgs', model_update_interval=1,
        evaluator_type = 'sequential',batch_size = 1, num_cores = 1, verbosity=False, 
        verbosity_model = False, maximize=False, de_duplication=False, **kwargs)
        
        
        def run_optimization(self, maxiter = 0, max_time = np.inf, eps = 1e-8, 
        context = None, verbosity=False, save_models_parameters= True, 
        report_file = None, evaluations_file = None, models_file=None):
        parameters
        ----------
        options.constraints: <str> or None
            encode the type of constraints which can be used. This string is converted
            to be passed to the BayesianOptimizer constructor with the following structure  
            [{'name':'name1', 'constraint':obj_constraint1}, .., {'name':'nameN', 'constraint':obj_constraintN}]
            where obj_constraintX can be a function or a string which can be exec to generate a function
            a constraint is of the form c(x) <= 0 (i.e. a parameter is accepted if c(x) <= 0) 
            e.g. 'step_a' difference between two consecutive parameters should be less than a
            e.g. 'step_a_a0_aN' same as above plus the first (last) parameter is also compared to a0 (aN)
            e.g. 'smooth_s' smoothness calculated should be <=s

        
        """
        #Init BO
        model = options['model']

        nb_params = options['nb_params']
        name_params = [str(i) for i in range(nb_params)]
        options['name_params'] = name_params        
        bounds_bo = [{'name': name_params[i], 'type': 'continuous', 
                   'domain': options['bounds_params'][i]} for i in range(nb_params)]
        constraints = options.get('constraints')
        cst = self._build_constraints(constraints, **options)        

        args_BO = {'acquisition_type':options['acq'], 
                   'acquisition_optimizer_type':options['acq_opt_type'], 
                   'num_cores':options['num_cores'], 
                   'domain': bounds_bo,
                   'optim_num_anchor':options['optim_num_anchor'], 
                   'optim_num_samples':options['optim_num_samples'],
                   'acquisition_jitter':options['acquisition_jitter'], 
                   'acquisition_weight':options['acquisition_weight'],
                   'batch_size':options['batch_size'], 
                   'evaluator_type':options['batch_method'],
                   'num_inducing':options['num_inducing'], 
                   'model_type':options['model_type'], 
                   'ARD':options['ARD'],
                   'acquisition_weight_lindec':options['acquisition_weight_lindec'], 
                   'constraints':cst}    

        # definition of the cost function
        def cost(params):
            return model(np.squeeze(params), **args_call)

        #V0.1 NON DEFAULT LIKELIHOOD
        if(args_BO['model_type'] == 'GP_CUSTOM_LIK'):
            logger.info('Use of GP_CUSTOM_LIK: enforce **normalize_Y** as False ')
            logger.info('Use of GP_CUSTOM_LIK: enforce **inf_method** as Laplace')
            args_BO['normalize_Y'] = False
            args_BO['inf_method'] = 'Laplace'
            args_BO['likelihood'] = options.get('likelihood', 'Binomial_10') 



        #V0.1 transfer learning
        to_transfer = options['to_transfer']
        if((to_transfer is not None) and (args_BO['model_type'] == 'GP_STACKED')):
            args_BO_transfer = copy.copy(args_BO)
            args_BO_transfer['model_type'] = 'GP'
            args_BO_transfer['X'] = to_transfer['X']
            args_BO_transfer['Y'] = to_transfer['Y']
            regressor_to_transfer = GPyOpt.methods.BayesianOptimization(cost, **args_BO)
            regressor_to_transfer.model._create_model(args_BO_transfer['X'], args_BO_transfer['Y'])
            args_BO['prev'] = regressor_to_transfer.model

        init = options['init_params']
        if(init is None):
            args_BO['initial_design_numdata'] = int(3 * nb_params)
            args_BO['initial_design_type'] = options['initial_design_type']
        elif(ut.is_int(init)):
            args_BO['initial_design_numdata'] = init
            args_BO['initial_design_type'] = options['initial_design_type']
        else:
            args_BO['X'] = init
            logger.info('Init of the GP: acquisition of {} points'.format(init.shape[0]))
            args_BO['Y'] = np.array([cost(x) for x in init])
        
        ker = options['kernel'] # matern52 /matern32
        if(ker is not None):
            ard = options['ARD']
            if(ker == 'matern52'):
                args_BO['ker'] = eval("GPy.kern.Matern52(self.input_dim = nb_params, ARD={})".format(ard))
            elif(ker == 'matern32'):
                args_BO['ker'] = eval("GPy.kern.Matern32(self.input_dim = nb_params, ARD={})".format(ard))
            else:
                logger.warning("{} not a valid kernel".format(ker))
        
        bo = GPyOpt.methods.BayesianOptimization(cost, **args_BO)
        #Initialization phase


        # Exploration-Exploitation phase
        max_time = options['max_time']
        bo.run_optimization(max_iter = options['maxiter'], max_time = max_time)
        time_left = max_time - bo.cum_time
        max_reached = (time_left < 0)
        
        # Exploitation phase
        # should it be forced eben if max_time is reached ? No so far 
        exploit_steps = options['exploit_steps']
        if(exploit_steps > 0): 
            if(not(max_reached)):
                bo.acquisition_type = 'LCB'
                bo.acquisition_weight = 0.000001
                bo.kwargs['acquisition_weight'] = 0.000001
                bo.acquisition = bo._acquisition_chooser()
                #maybe to add new logic when Batch/Sparse
                if(bo.batch_size > 1):
                    bo.batch_size = 1
                bo.evaluator = bo._evaluator_chooser()
                                     
                logger.info('Exploitation (i.e. ucb with k=0) for {}'.format(exploit_steps))
                logger.info('Still {} s left'.format(time_left))
                bo.run_optimization(exploit_steps, max_time = time_left)
                time_left -= bo.cum_time
                max_reached = (time_left < 0)

        if((_still_potentially_better(bo)) and (exploit_steps <= 50) and not(max_reached)):
            logger.info('2nd round of exploitation for {}'.format(exploit_steps))
            logger.info('Still {} s left'.format(time_left))
            bo.run_optimization(exploit_steps, max_time = time_left)

        # generate results
        optim_params = bo.x_opt
        optim_value = bo.fx_opt
        optim_params_exp = _get_best_exp_from_BO(bo)
        
        optim_value_cputed = model(optim_params)

        nfev = len(bo.X)
        resultTest = {'x': optim_params, 'x_exp':optim_params_exp, 'fun': optim_value, 
                      'fun_ver': optim_value_cputed, 'nfev':nfev, 'nit':nfev, 'sucess':True}
        resultTest['gp_kernel_optim_names'] = bo.model.model.parameter_names()
        resultTest['gp_kernel_optim_vals'] = bo.model.model.param_array
        resultTest['nb_processes'] = bo.num_cores
        resultTest['nb_cpus'] = self.mp.n_cpus
        resultTest['X_evol'] = bo.X
        resultTest['Y_evol'] = bo.Y
        resultTest['Y_best'] = bo.Y_best
        resultTest['still_potentially_better'] = _still_potentially_better(bo)
        resultTest['maxtime'] = max_reached
        
        # Close pool of processors used (if needed)
        self.mp.close_mp()
        return resultTest