예제 #1
0
def run_optimization():
    print('Calculating...')
    t2['N'] = [len(data_calc['t'][n_d])-1 for n_d in range(info['n_data'])]
    t2['n_p']=t1['n_part'].get()+1
    t2['n_d']=info['n_data']
    t2['dt'] = [(info['t_end'][i] - info['t_start'][i])/t2['N'][i] for i in range(info['n_data'])]
    p = calc_p_num()
    #for n_p in range(t2['n_p']):   # add scaling factor x_t = p[-1]*x_1 + p[-2]*x_2
    #    p.append(1/t2['n_p'])      # could set it so that x1 = p[-1],x2=1-p[-1] and they sum to one
    
    t3['nonlinear'] = 0 #nonlinear-equation solving time
    
    if t2['method_var'].get()=='CMA-ES':
        xopt,res = cma.fmin2(of, p, 0.5,options={'tolfun':t2['maxiter'].get()})
        class result:
            x = xopt
            fun = res.result[1]
        t2['res'] = result
  
    else:
        t2['res'] = minimize(of,p,method=t2['method_var'].get(),options={'maxiter':int(t2['maxiter'].get())})

    calc_from_res(t2['res'].x)
    t2['p_letters'] = calc_p()
    write_result(t2['p_letters'],t2['res'])
예제 #2
0
 def solve_for_unitary(self, circuit, options, x0=None):
     try:
         import cma
     except ImportError:
         print(
             "ERROR: Could not find cma, try running pip install quantum_synthesis[cma]",
             file=sys.stderr)
         sys.exit(1)
     eval_func = lambda v: options.error_func(options.target,
                                              circuit.matrix(v))
     jac_func = lambda v: options.error_jac(options.target,
                                            circuit.mat_jac(v))
     initial_guess = 'np.random.rand({})'.format(
         circuit.num_inputs) * 2 * np.pi if x0 is None else x0
     xopt, es = cma.fmin2(eval_func,
                          initial_guess,
                          0.25, {
                              'verb_disp': 0,
                              'verb_log': 0,
                              'bounds': [0, 2 * np.pi]
                          },
                          restarts=2,
                          gradf=jac_func)
     if circuit.num_inputs > 18:
         raise Warning("Finished with {} evaluations".format(es.result[3]))
     return (circuit.matrix(xopt), xopt)
예제 #3
0
    def optimize(self, maxiter=20, max_steps=200000):
        if self.dim > 1:
            self.max_steps = max_steps
            init_guess = [0.5] * self.dim
            #init_guess = np.random.random(self.dim)
            init_std = 0.25

            bound = [0.0, 1.0]

            print(bound)

            xopt, es = cma.fmin2(self.fitness,
                                 init_guess,
                                 init_std,
                                 options={
                                     'bounds':
                                     bound,
                                     'maxiter':
                                     maxiter,
                                     'ftarget':
                                     self.terminate_threshold,
                                     'termination_callback':
                                     self.termination_callback
                                 },
                                 callback=self.cames_callback)

            print('optimized: ', repr(xopt))
        else:
            # 1d case, not used
            candidates = np.arange(-1, 1, 0.05)
            fitnesses = [self.fitness([candidate]) for candidate in candidates]
            xopt = [candidates[np.argmin(fitnesses)]]

        return xopt
예제 #4
0
    def CMA(self, method='chi2', fitlow=None, fithigh=None, **kwargs):
        """
        Perform maximum-likelihood optimization of the model using the MIGRAD routine from the MINUIT library.

        :param method: Likelihood function to be optimized. Available likelihoods are 'chi2' (chi-squared) and 'cstat' (C statistic). Defaults to 'chi2'.
        :type method: str
        :param fitlow: Lower boundary of the active fitting radial range. If fitlow=None the entire range is used. Defaults to None
        :type fitlow: float
        :param fithigh: Upper boundary of the active fitting radial range. If fithigh=None the entire range is used. Defaults to None
        :type fithigh: float
        :param kwargs: List of arguments to be passed to the iminuit library. For instance, setting parameter boundaries, optimization options or fixing parameters.
            See the iminuit documentation: https://iminuit.readthedocs.io/en/stable/index.html
        """
        prof = self.profile
        if prof.profile is None:
            print('Error: No valid profile exists in provided object')
            return
        model = self.mod
        if prof.psfmat is not None:
            psfmat = np.transpose(prof.psfmat)
        else:
            psfmat = None
        if method == 'chi2':
            # Define the fitting algorithm
            self.cost = ChiSquared(model, prof.bins, prof.ebins, prof.profile, prof.eprof, psfmat=psfmat, fitlow=fitlow,
                                   fithigh=fithigh)

        elif method == 'cstat':
            if prof.counts is None:
                print('Error: No count profile exists')
                return
            # Define the fitting algorithm
            self.cost = Cstat(model, prof.bins, prof.ebins, prof.counts, prof.area, prof.effexp, prof.bkgcounts,
                              psfmat=psfmat, fitlow=fitlow, fithigh=fithigh)
        else:
            print('Unknown method ', method)
            return

        # Setting bounds for the variable to constrain optimization process
        low_bounds, high_bounds = kwargs.get('low_bounds', 'high_bounds')
        bounds = BoundTransform([low_bounds, high_bounds])
        # Continuous bound handling using CMA-ES package
        fitness = ComposedFunction([lambda x: self.cost(*x), bounds.transform])
        # Inversion of the initial guess
        x0 = bounds.inverse(np.array([kwargs[param] for param in self.mod.parnames]))

        # Computing best parameters using CMA-ES
        res, es = cma.fmin2(fitness, x0, 1.5, restarts=3)

        xopt = bounds.transform(res)

        self.mod.SetParameters(xopt)
        self.mod.SetErrors(np.ones_like(xopt))
        self.errors = np.ones_like(xopt)
        self.mlike = es.result[1]
        self.minuit = None
        self.out = xopt
예제 #5
0
def find_cmaes_settings(start_sigma) -> Tuple[Any, Any]:
    """ Find best settings for baseline image segmentation
        using CMA-ES
    """
    photos, annots = load_train_data()
    photos = [p.astype(np.float32) for p in photos]
    objective = partial(cmaes_metric_from_pool, photos=photos, annots=annots)
    objective_wrapped = cmaes_utils.get_cmaes_params_warp(objective)
    start_values = get_start_params()
    return cma.fmin2(objective_wrapped, start_values, start_sigma)  # xopt, es
def minimise_with_CMAES(f, lb, ub, maxeval=5000, cf=None, ftol_abs=1e-15):
    # set the options
    cma_options = {'bounds': [list(lb), list(ub)],
                   'tolfun': ftol_abs,
                   'maxfevals': maxeval,
                   'verb_disp': 0,
                   'verb_log': 0,
                   'verbose': -1,
                   'CMA_stds': np.abs(ub - lb),
                   }

    if cf is None:
        x0 = lambda: np.random.uniform(lb, ub)

    else:
        def inital_point_generator(cf, lb, ub):
            def wrapper():
                while True:
                    x = np.random.uniform(lb, ub)
                    if np.all(x >= lb) and np.all(x <= ub) and cf(x):
                        return x
            return wrapper

        class feas_func:
            def __init__(self, cf):
                self.cf = cf
                self.c = 0

            def __call__(self, x, f):
                if self.c > 10000:
                    return True

                is_feas = self.cf(x)

                if not is_feas:
                    self.c += 1

                return is_feas

        is_feasible = feas_func(cf)
        cma_options['is_feasible'] = is_feasible
        x0 = inital_point_generator(cf, lb, ub)

    # ignore warnings about flat fitness (i.e starting in a flat EI location)
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')

        # run CMA-ES with bipop (small and large population sizes) for up to
        # 9 restarts (or until it runs out of budget)
        xopt, _ = cma.fmin2(f, x0=x0, sigma0=0.25, options=cma_options,
                            bipop=True, restarts=9)
        warnings.resetwarnings()

    return xopt
예제 #7
0
 def optimize(self):
     try:
         xopt, es = cma.fmin2(objective_function=self.objective,
                              x0=self.x0,
                              sigma0=self.sigma,
                              options=self.options)
         best_arm = convertToArray(xopt)
         success = True
     except:
         logger.warning('Optimization for ' + str(self.algorithm_name) +
                        ' failed with cost function: ' +
                        str(self.objective.GetCostFunctionName()))
         best_arm = NAN
         success = False
     return best_arm, success, self.objective
예제 #8
0
def cma(loss, initial_parameters, options=None):
    """Genetic optimizer based on `pycma <https://github.com/CMA-ES/pycma>`_.

    Args:
        loss (callable): Loss as a function of variational parameters to be
            optimized.
        initial_parameters (np.ndarray): Initial guess for the variational
            parameters.
        options (dict): Dictionary with options accepted by the ``cma``.
            optimizer. The user can use ``cma.CMAOptions()`` to view the
            available options.
    """
    import cma
    r = cma.fmin2(loss, initial_parameters, 1.7, options=options)
    return r[1].result.fbest, r[1].result.xbest
 def optimization_method(self, objective: Callable,
                         x0: Dict) -> OrderedDict:
     fitted_params, _ = cma.fmin2(objective_function=objective,
                                  x0=list(x0.values()),
                                  restarts=self.restarts,
                                  restart_from_best=True,
                                  sigma0=self.sigma_cma,
                                  options={
                                      'ftarget':
                                      -np.Inf,
                                      'popsize':
                                      self.popsize,
                                      'maxfevals':
                                      self.popsize * self.iterations_cma
                                  })
     return OrderedDict([(k, v) for k, v in zip(x0.keys(), fitted_params)])
    def optimize(self, maxiter=20, max_steps=200000, custom_bound=None):
        if self.dim > 1 or self.bayesian_opt:
            self.max_steps = max_steps

            if custom_bound is None:
                init_guess = [0.0] * self.dim
                init_std = 0.5
                bound = [0.0, 1.0]
            else:
                init_guess = [0.5 *
                              (custom_bound[0] + custom_bound[1])] * self.dim
                init_std = abs(0.5 * (custom_bound[0] - custom_bound[1]))
                bound = [custom_bound[0], custom_bound[1]]

            if self.bayesian_opt:
                xs, ys, _, _, _ = bayesian_optimisation(
                    maxiter,
                    self.fitness,
                    bounds=np.array([bound] * self.dim),
                    max_steps=max_steps,
                    random_search=1000,
                    callback=self.cames_callback)
                xopt = xs[np.argmin(ys)]
            else:
                xopt, es = cma.fmin2(self.fitness,
                                     init_guess,
                                     init_std,
                                     options={
                                         'bounds':
                                         bound,
                                         'maxiter':
                                         maxiter,
                                         'ftarget':
                                         self.terminate_threshold,
                                         'termination_callback':
                                         self.termination_callback
                                     },
                                     callback=self.cames_callback)

            print('optimized: ', repr(xopt))
        else:
            # 1d case, not used
            candidates = np.arange(-1, 1, 0.05)
            fitnesses = [self.fitness([candidate]) for candidate in candidates]
            xopt = [candidates[np.argmin(fitnesses)]]

        return xopt
예제 #11
0
    def train(self) -> np.ndarray:

        # Initialization of Thetas

        print("INITIALIZING THETAS")
        init_thetas = []

        for k in range(self._num_rand_inits):
            init_thetas.append(np.random.rand(self._dim_theta * 2))

        init_thetas.append(self._behv_params)

        for k in range(self._num_behv_inits - 1):
            init_thetas.append(
                np.random.multivariate_normal(self._behv_params, self._Sigma))

        # Running CMA-ES

        print("RUNNING CMA-ES NOW")
        x_opt = []
        c = 0
        # opts = cma.CMAOptions()
        # opts.set('maxiter', 5)
        top_returns = []
        for tht in init_thetas:
            print(c)
            a, b = cma.fmin2(self._evaluationFunction,
                             tht,
                             1,
                             options={'maxiter': 4})
            x_opt.append(a)
            top_returns.append(b.result[1])
            c = c + 1

        # Running GA

        print("RUNNING GA NOW")
        ga = GA(populationSize=len(x_opt),
                evaluationFunction=self._evalGA,
                initPopulation=x_opt,
                numElite=(self._num_rand_inits + self._num_behv_inits),
                min_req_pop=self._min_req_pop,
                top_returns=top_returns)
        ga.train()
        x_opt = ga._population
        return x_opt
        pass
예제 #12
0
    def infer_parameters(self,
                         y_0: float,
                         initial_parameters: List[float],
                         step_size=0.5) -> np.ndarray:
        """Infers set of parameters that minimises an objective function (so far least squares).

        Arguments:
            y_0 {float} -- Starting point of inderence in parameter space.
            initial_parameters {[type]} -- Starting point of inference in parameter space.
        
        Keyword Arguments:
            step_size {float} -- Step-size of optimizer in parameter space. (default: {0.5})
        
        Returns:
            optimal_parameters -- Set of parameters that minimise the objective function.
        """
        print('Parameters are being infered...\n')
        initial_parameters.append(y_0)
        self.optimal_parameters, _ = cma.fmin2(self._objective_function,
                                               initial_parameters, step_size)
        return self.optimal_parameters
예제 #13
0
 def solve_for_unitary(self, circuit, options, x0=None):
     try:
         import cma
     except ImportError:
         print(
             "ERROR: Could not find cma, try running pip install quantum_synthesis[cma]",
             file=sys.stderr)
         sys.exit(1)
     eval_func = lambda v: options.error_func(options.target,
                                              circuit.matrix(v))
     initial_guess = 'np.random.rand({})*2*np.pi'.format(
         circuit.num_inputs) if x0 is None else x0
     xopt, _ = cma.fmin2(eval_func,
                         initial_guess,
                         0.25, {
                             'verb_disp': 0,
                             'verb_log': 0,
                             'bounds': [0, 2 * np.pi]
                         },
                         restarts=2)
     return (circuit.matrix(xopt), xopt)
예제 #14
0
def run(params):

    params.optim.save_fig = False
    mp.set_start_method('spawn')
    params.dataio.prefix = datetime.now().strftime("%m-%d-%Y-%H-%M-%S")

    # initialize cost function
    if (params.dataio.dataset_type == "nav2d"):
        cost_fn = lambda theta: cost_fn_nav2d(theta, params)

    # initialize theta params to be optimized
    theta_vals_init = init_theta_vals(params)

    # call black-box optimizer
    print("Running optimizer {0}".format(params.baselines.method))

    def callback_fn(x):
        return callback_scipyopt(x, params)

    if (params.baselines.method == "CMAES"):
        xopt, es = cma.fmin2(
            cost_fn, theta_vals_init, 2., {
                'maxfevals': params.baselines.max_fval_calls,
                'verb_disp': 1,
                'bounds': [0.1, 1e6]
            })
        callback_fn(xopt)
    else:
        result_optimizer = minimize(cost_fn,
                                    x0=theta_vals_init,
                                    method=params.baselines.method,
                                    callback=callback_fn)

    # print final errors
    theta_vals_final = result_optimizer.x
    err_trans, err_rot = traj_error_final(theta_vals_final, params)
    print("theta_final: {0}, err_trans: {1}, err_rot: {2}".format(
        theta_vals_final, err_trans, err_rot))
def optimize_fir_HDAWG(
    y,
    baseline_start=100,
    baseline_stop=None,
    start_sample=0,
    stop_sample=None,
    cma_options={},
    max_taps=40,
    hdawg_taps=40,
):
    step_response = np.concatenate((np.array([0]), y))
    baseline = np.mean(y[baseline_start:baseline_stop])
    x0 = [1] + (max_taps - 1) * [0]

    def objective_function_fir(x):
        y = step_response
        zeros = np.zeros(hdawg_taps - max_taps)
        x = np.concatenate((x, zeros))
        yc = signal.lfilter(convert_FIR_from_HDAWG(x), 1, y)
        return np.mean(np.abs(yc[1 + start_sample:stop_sample] -
                              baseline)) / np.abs(baseline)

    return cma.fmin2(objective_function_fir, x0, 0.1, options=cma_options)
def tune_hyperparameters(KrigInfo,
                         xhyp_ii,
                         trainvar,
                         ubhyp=None,
                         lbhyp=None,
                         sigmacmaes=None,
                         scaling=None,
                         optimbound=None):
    """Estimate the best hyperparameters.

    Extracted hyperpamaeter tuning code into a function for
    parallelisation.

    Args:
        KrigInfo (dict): Dictionary that contains Kriging information.
        xhyp_ii (nparray): starting point number ii.
        ubhyp (nparray): upper bounds of hyperparams.
        lbhyp (nparray): lower bounds of hyperparams.
        sigmacmaes (float): initial sigma for cma-es.
        scaling (list): scaling for cma-es.
        optimbound: bounds for optimizer.

    Returns:
        bestxcand (np.array(float)): Best x candidate array
        neglnlikecand (float): Negative ln-likelihood candidate

    Raises:
        ValueError: If a required parameter for the chosen optimizer is
            missing.
    """
    if KrigInfo["optimizer"] == "cmaes":
        for p in (ubhyp, lbhyp, sigmacmaes, scaling):
            if p is None:
                raise ValueError(f'{p} must be set if optimizer is cmaes.')
        bestxcand, es = cma.fmin2(
            likelihood,
            xhyp_ii,
            sigmacmaes, {
                'bounds': [lbhyp.tolist(), ubhyp.tolist()],
                'scaling_of_variables': scaling,
                'verb_disp': 0,
                'verbose': -9
            },
            args=(KrigInfo, 'default', trainvar))
        neglnlikecand = es.result[1]

    elif KrigInfo["optimizer"] == "lbfgsb":
        if optimbound is None:
            raise ValueError('optimbound must be set if optimizer is lbfgsb.')
        res = minimize(likelihood,
                       xhyp_ii,
                       method='L-BFGS-B',
                       options={'eps': 1e-03},
                       bounds=optimbound,
                       args=(KrigInfo, 'default', trainvar))
        bestxcand = res.x
        neglnlikecand = res.fun

    elif KrigInfo["optimizer"] == "slsqp":
        if optimbound is None:
            raise ValueError('optimbound must be set if optimizer is slsqp.')
        res = minimize(likelihood,
                       xhyp_ii,
                       method='SLSQP',
                       bounds=optimbound,
                       args=(KrigInfo, 'default', trainvar))
        bestxcand = res.x
        neglnlikecand = res.fun

    elif KrigInfo["optimizer"] == "cobyla":
        if optimbound is None:
            raise ValueError('optimbound must be set if optimizer is cobyla.')
        res = fmin_cobyla(likelihood,
                          xhyp_ii,
                          optimbound,
                          rhobeg=0.5,
                          rhoend=1e-4,
                          args=(KrigInfo, 'default', trainvar))
        bestxcand = res
        neglnlikecand = likelihood(res, KrigInfo, trainvar=trainvar)

    else:
        msg = (f"{KrigInfo['optimizer']} in KrigInfo['Optimizer'] is not "
               f"recognised.")
        raise KeyError(msg)
    return bestxcand, neglnlikecand
예제 #17
0
import benchmarks
# https://github.com/CMA-ES/pycma
import cma
import numpy as np

x0 = np.ones((benchmarks.common_dim)) * -1
# dim 1000 kan opgelost worden met popsize 500, en sigma 0.5
options = cma.evolution_strategy.CMAOptions()
#print(options)
opt, es = cma.fmin2(benchmarks.f,
                    x0,
                    0.02,
                    options={
                        'maxfevals': 1e6,
                        'popsize': 48
                    },
                    bipop=False,
                    restarts=0)
예제 #18
0
파일: qlassifier.py 프로젝트: tuliplan/qibo
    def minimize(self, method='BFGS', options=None, compile=True):
        loss = self.cost_function_fidelity

        if method == 'cma':
            # Genetic optimizer
            import cma
            r = cma.fmin2(lambda p: loss(p).numpy(), self.params, 2)
            result = r[1].result.fbest
            parameters = r[1].result.xbest

        elif method == 'sgd':
            from qibo.tensorflow.gates import TensorflowGate
            circuit = self.circuit(self.training_set[0])
            for gate in circuit.queue:
                if not isinstance(gate, TensorflowGate):
                    raise RuntimeError('SGD VQE requires native Tensorflow '
                                       'gates because gradients are not '
                                       'supported in the custom kernels.')

            sgd_options = {
                "nepochs": 5001,
                "nmessage": 1000,
                "optimizer": "Adamax",
                "learning_rate": 0.5
            }
            if options is not None:
                sgd_options.update(options)

            # proceed with the training
            from qibo.config import K
            vparams = K.Variable(self.params)
            optimizer = getattr(K.optimizers, sgd_options["optimizer"])(
                learning_rate=sgd_options["learning_rate"])

            def opt_step():
                with K.GradientTape() as tape:
                    l = loss(vparams)
                grads = tape.gradient(l, [vparams])
                optimizer.apply_gradients(zip(grads, [vparams]))
                return l, vparams

            if compile:
                opt_step = K.function(opt_step)

            l_optimal, params_optimal = 10, self.params
            for e in range(sgd_options["nepochs"]):
                l, vparams = opt_step()
                if l < l_optimal:
                    l_optimal, params_optimal = l, vparams
                if e % sgd_options["nmessage"] == 0:
                    print('ite %d : loss %f' % (e, l.numpy()))

            result = self.cost_function(params_optimal).numpy()
            parameters = params_optimal.numpy()

        else:
            import numpy as np
            from scipy.optimize import minimize
            m = minimize(lambda p: loss(p).numpy(),
                         self.params,
                         method=method,
                         options=options)
            result = m.fun
            parameters = m.x

        return result, parameters
예제 #19
0
import cma
import numpy as np

objective_function = lambda x: (x[0] - 2.0)**2 + (x[1] - 5.0)**2 + (x[2] + 4.0
                                                                    )**2

xopt, es = cma.fmin2(objective_function, [10.0, 5.0, 20.0], 0.5)

print('this is the solution: ', xopt)
print('so what is this ', es)

print(np.array([[1, 2, 3], [1, 2, 3]]).ndim)
예제 #20
0
        def objective(x, v):
            controller = NN(x).controller
            trackers = []
            for i in range(N_TRAIN_DIRECTIONS):
                for traj in paths:
                    trackers.append(
                        [x, v, i * 2 * pi / N_TRAIN_DIRECTIONS, traj])
            costs = pool.map(function_x, trackers)
            return max(costs)

        x = 2 * rand(1, 4 * N_NEURONS + 1)[0] - 1
        res = fmin2(objective,
                    x,
                    .5,
                    args=(VELOCITY, ),
                    options={
                        'popsize': 256,
                        'bounds': [-1, 1],
                        'maxiter': 256
                    })  # 5th is mean of final sample distribution
        res = res[1].result[0]
        controller = NN(res).controller
        trackers = []
        for i in range(N_TRAIN_DIRECTIONS):
            for t in paths:
                traj = Trajectory(t)
                orientation = i * 2 * pi / N_TRAIN_DIRECTIONS
                trackers.append(
                    Tracker(VELOCITY, orientation, traj, controller))
        traces = map(lambda x: x.run(), trackers)
        for i, trace in enumerate(traces):
예제 #21
0
파일: qEI.py 프로젝트: georgedeath/eshotgun
def batch_qEI(model, lb, ub, maxeval, q, cf):
    # n_samples only used if method is 'qEIMCMC'
    n_dim = lb.size

    # problem bounds
    lbq = np.tile(lb, q)
    ubq = np.tile(ub, q)

    # use the (more) analytical for a batch size of two (as it is fast enough)
    if q == 2:
        func = cy_qEI

    # else it is impractically slow so switch to the MCMC qEI version
    else:

        def func(m, K, incumbent, obj_sense=-1):
            return qEIMCMC(m, K, incumbent, 10**4, obj_sense)

    # best seen solution
    incumbent = model.Y.min()

    # cma-es options setup
    cma_options = {
        'bounds': [list(lbq), list(ubq)],
        'tolfun': 1e-15,
        'maxfevals': maxeval,
        'verb_log': 0,
        'verbose': 1,
        'verb_disp': 0,
        'CMA_stds': np.abs(ubq - lbq)
    }

    if cf is None:
        x0 = lambda: np.random.uniform(lbq, ubq)

    else:

        def inital_point_generator(cf, lb, ub, n_dim, q):
            def wrapper():
                x = np.zeros(n_dim * q, dtype='float')
                for i in range(q):
                    while True:
                        v = np.random.uniform(lb, ub)
                        if np.all(v >= lb) and np.all(v <= ub) and cf(v):
                            x[i * n_dim:(i + 1) * n_dim] = v
                            break
                return x

            return wrapper

        x0 = inital_point_generator(cf, lb, ub, n_dim, q)

    # run CMA-ES with bipop (small and large population sizes) for up
    # to 9 restarts (or until it runs out of budget)
    xopt, es = cma.fmin2(qei_feval,
                         x0=x0,
                         sigma0=0.25,
                         options=cma_options,
                         args=(model, incumbent, n_dim, func, cf),
                         bipop=True,
                         restarts=9)

    return np.reshape(xopt, (q, n_dim))
예제 #22
0
def run_multi_opt(kriglist,
                  moboInfo,
                  ypar,
                  krigconstlist=None,
                  cheapconstlist=None):
    """
    Run the optimization of multi-objective acquisition function to find the next sampling point.

    Args:
      kriglist (list): A list containing Kriging instances.
      moboInfo (dict): A structure containing necessary information for Bayesian optimization.
      ypar (nparray): Array contains the current non-dominated solutions.
      krigconstlist (list): List of Kriging object for constraints. Defaults to None.
      cheapconstlist (list): List of constraints function. Defaults to None.
            Expected output of the constraint functions is 1 if the constraint is satisfied and 0 if not.
            The constraint functions MUST have an input of x (the decision variable to be evaluated)

    Returns:
      xnext (nparray): Suggested next sampling point as discovered by the optimization of the acquisition function
      fnext (nparray): Optimized acquisition function

    The available optimizers for the acquisition function are 'cmaes', 'lbfgsb', 'cobyla'.
    Note that this function runs for both unconstrained and constrained single-objective Bayesian optimization.
    """
    acquifuncopt = moboInfo["acquifuncopt"]
    acquifunc = moboInfo["acquifunc"]

    if acquifunc.lower() == 'ehvi':
        acqufunhandle = ehvicalc
    else:
        raise ValueError("Acquisition function handle is not available")

    if acquifuncopt.lower() == 'cmaes':
        Xrand = realval(
            kriglist[0].KrigInfo["lb"], kriglist[0].KrigInfo["ub"],
            np.random.rand(moboInfo["nrestart"], kriglist[0].KrigInfo["nvar"]))
        xnextcand = np.zeros(
            shape=[moboInfo["nrestart"], kriglist[0].KrigInfo["nvar"]])
        fnextcand = np.zeros(shape=[moboInfo["nrestart"]])
        sigmacmaes = 1  # np.mean((KrigNewMultiInfo["ub"] - KrigNewMultiInfo["lb"]) / 6)
        for im in range(0, moboInfo["nrestart"]):
            if krigconstlist is None and cheapconstlist is None:  # For unconstrained problem
                xnextcand[im, :], es = cma.fmin2(acqufunhandle,
                                                 Xrand[im, :],
                                                 sigmacmaes, {
                                                     'verb_disp': 0,
                                                     'verbose': -9
                                                 },
                                                 args=(ypar, moboInfo,
                                                       kriglist))
                fnextcand[im] = es.result[1]
            else:  # For constrained problem
                xnextcand[im, :], es = cma.fmin2(multiconstfun,
                                                 Xrand[im, :],
                                                 sigmacmaes, {
                                                     'verb_disp': 0,
                                                     'verbose': -9
                                                 },
                                                 args=(ypar, kriglist,
                                                       moboInfo, krigconstlist,
                                                       cheapconstlist))
                fnextcand[im] = es.result[1]
        I = np.argmin(fnextcand)
        xnext = xnextcand[I, :]
        fnext = fnextcand[I]

    elif acquifuncopt.lower() == 'ga':
        if krigconstlist is None and cheapconstlist is None:
            templst = []
            if moboInfo['ehvisampling'] == 'efficient':
                for ij in range(np.size(ypar, 0)):
                    idx = np.where(
                        (kriglist[0].KrigInfo["y"] == ypar[ij, 0])
                        & (kriglist[1].KrigInfo["y"] == ypar[ij, 1]))[0][0]
                    templst.append(idx)

                init_seed = kriglist[0].KrigInfo["X_norm"][
                    templst, :] / 2 + 0.5
            else:
                init_seed = None

            xnext, fnext, _ = uncGA(acqufunhandle,
                                    lb=kriglist[0].KrigInfo["lb"],
                                    ub=kriglist[0].KrigInfo["ub"],
                                    args=(ypar, moboInfo, kriglist),
                                    initialization=init_seed)

        else:
            templst = []
            if moboInfo['ehvisampling'] == 'efficient':
                for ij in range(np.size(ypar, 0)):
                    idx = np.where(
                        (kriglist[0].KrigInfo["y"] == ypar[ij, 0])
                        & (kriglist[1].KrigInfo["y"] == ypar[ij, 1]))[0][0]
                    templst.append(idx)

                init_seed = kriglist[0].KrigInfo["X_norm"][
                    templst, :] / 2 + 0.5
            else:
                init_seed = None

            xnext, fnext, _ = uncGA(multiconstfun,
                                    lb=kriglist[0].KrigInfo["lb"],
                                    ub=kriglist[0].KrigInfo["ub"],
                                    args=(ypar, kriglist, moboInfo,
                                          krigconstlist, cheapconstlist),
                                    initialization=init_seed)

    elif acquifuncopt.lower() == 'lbfgsb':
        Xrand = realval(
            kriglist[0].KrigInfo["lb"], kriglist[0].KrigInfo["ub"],
            np.random.rand(moboInfo["nrestart"], kriglist[0].KrigInfo["nvar"]))
        xnextcand = np.zeros(
            shape=[moboInfo["nrestart"], kriglist[0].KrigInfo["nvar"]])
        fnextcand = np.zeros(shape=[moboInfo["nrestart"]])
        lbfgsbbound = np.hstack((kriglist[0].KrigInfo["lb"].reshape(-1, 1),
                                 kriglist[0].KrigInfo["ub"].reshape(-1, 1)))
        for im in range(0, moboInfo["nrestart"]):
            if krigconstlist is None and cheapconstlist is None:  # For unconstrained problem
                res = minimize(acqufunhandle,
                               Xrand[im, :],
                               method='L-BFGS-B',
                               bounds=lbfgsbbound,
                               args=(ypar, moboInfo, kriglist))
                xnextcand[im, :] = res.x
                fnextcand[im] = res.fun
            else:  # For constrained problem
                res = minimize(multiconstfun,
                               Xrand[im, :],
                               method='L-BFGS-B',
                               bounds=lbfgsbbound,
                               args=(ypar, kriglist, moboInfo, krigconstlist,
                                     cheapconstlist))
                xnextcand[im, :] = res.x
                fnextcand[im] = res.fun
        I = np.argmin(fnextcand)
        xnext = xnextcand[I, :]
        fnext = fnextcand[I]

    elif acquifuncopt.lower() == 'diff_evo':
        if moboInfo['ehvisampling'] == 'efficient':
            init_seed = efficientsamp(kriglist, ypar, npop=300)
        else:
            init_seed = 'latinhypercube'

        optimbound = np.hstack((kriglist[0].KrigInfo["lb"].reshape(-1, 1),
                                kriglist[0].KrigInfo["ub"].reshape(-1, 1)))
        if krigconstlist is None and cheapconstlist is None:  # For unconstrained problem
            res = differential_evolution(acqufunhandle,
                                         optimbound,
                                         init=init_seed,
                                         args=(ypar, moboInfo, kriglist))
            xnext = res.x
            fnext = res.fun
        else:
            res = differential_evolution(multiconstfun,
                                         optimbound,
                                         init=init_seed,
                                         args=(ypar, kriglist, moboInfo,
                                               krigconstlist, cheapconstlist))
            xnext = res.x
            fnext = res.fun

    elif acquifuncopt.lower() == 'cobyla':
        Xrand = realval(
            kriglist[0].KrigInfo["lb"], kriglist[0].KrigInfo["ub"],
            np.random.rand(moboInfo["nrestart"], kriglist[0].KrigInfo["nvar"]))
        xnextcand = np.zeros(
            shape=[moboInfo["nrestart"], kriglist[0].KrigInfo["nvar"]])
        fnextcand = np.zeros(shape=[moboInfo["nrestart"]])
        optimbound = []
        for i in range(len(kriglist[0].KrigInfo["ub"])):
            optimbound.append(lambda x, cc, kriglist, dd, aa, bb, itemp=i: x[
                itemp] - kriglist[0].KrigInfo["lb"][itemp])
            optimbound.append(lambda x, cc, kriglist, dd, aa, bb, itemp=i:
                              kriglist[0].KrigInfo["ub"][itemp] - x[itemp])
        for im in range(0, moboInfo["nrestart"]):
            if krigconstlist is None and cheapconstlist is None:  # For unconstrained problem
                res = fmin_cobyla(acqufunhandle,
                                  Xrand[im, :],
                                  optimbound,
                                  rhobeg=0.5,
                                  rhoend=1e-4,
                                  args=(ypar, moboInfo, kriglist))
                xnextcand[im, :] = res
                fnextcand[im] = acqufunhandle(res, ypar, moboInfo, kriglist)
            else:
                res = fmin_cobyla(multiconstfun,
                                  Xrand[im, :],
                                  optimbound,
                                  rhobeg=0.5,
                                  rhoend=1e-4,
                                  args=(ypar, kriglist, moboInfo,
                                        krigconstlist, cheapconstlist))
                xnextcand[im, :] = res
                fnextcand[im] = multiconstfun(res, ypar, kriglist, moboInfo,
                                              krigconstlist, cheapconstlist)
        I = np.argmin(fnextcand)
        xnext = xnextcand[I, :]
        fnext = fnextcand[I]

    return xnext, fnext
예제 #23
0
def run_single_opt(krigobj, soboInfo, krigconstlist=None, cheapconstlist=None):
    """
   Run the optimization of multi-objective acquisition function to find the next sampling point.

   Args:
     krigobj (object): Kriging object.
     soboInfo (dict): A structure containing necessary information for Bayesian optimization.
     krigconstlist (list): List of Kriging object for constraints. Defaults to None.
     cheapconstlist (list): List of constraints function. Defaults to None.
            Expected output of the constraint functions is 1 if the constraint is satisfied and 0 if not.
            The constraint functions MUST have an input of x (the decision variable to be evaluated)

   Returns:
     xnext (nparray): Suggested next sampling point as discovered by the optimization of the acquisition function
     fnext (nparray): Optimized acquisition function

   The available optimizers for the acquisition function are 'cmaes', 'lbfgsb', 'cobyla'.
   Note that this function runs for both unconstrained and constrained single-objective Bayesian optimization.
   """
    acquifuncopt = soboInfo["acquifuncopt"]
    acquifunc = soboInfo["acquifunc"]

    if acquifunc.lower() == 'parego':
        acquifunc = soboInfo['paregoacquifunc']
    else:
        pass

    if acquifuncopt.lower() == 'cmaes':
        Xrand = realval(
            krigobj.KrigInfo["lb"], krigobj.KrigInfo["ub"],
            np.random.rand(soboInfo["nrestart"], krigobj.KrigInfo["nvar"]))
        xnextcand = np.zeros(
            shape=[soboInfo["nrestart"], krigobj.KrigInfo["nvar"]])
        fnextcand = np.zeros(shape=[soboInfo["nrestart"]])
        sigmacmaes = 1  # np.mean((KrigNewMultiInfo["ub"] - KrigNewMultiInfo["lb"]) / 6)
        for im in range(0, soboInfo["nrestart"]):
            if krigconstlist is None and cheapconstlist is None:  # For unconstrained problem
                xnextcand[im, :], es = cma.fmin2(krigobj.predict,
                                                 Xrand[im, :],
                                                 sigmacmaes, {
                                                     'verb_disp': 0,
                                                     'verbose': -9
                                                 },
                                                 args=(acquifunc))
                fnextcand[im] = es.result[1]
            else:  # For constrained problem
                xnextcand[im, :], es = cma.fmin2(singleconstfun,
                                                 Xrand[im, :],
                                                 sigmacmaes, {
                                                     'verb_disp': 0,
                                                     'verbose': -9
                                                 },
                                                 args=(krigobj, acquifunc,
                                                       krigconstlist,
                                                       cheapconstlist))
                fnextcand[im] = es.result[1]
        I = np.argmin(fnextcand)
        xnext = xnextcand[I, :]
        fnext = fnextcand[I]

    elif acquifuncopt.lower() == 'lbfgsb':
        Xrand = realval(
            krigobj.KrigInfo["lb"], krigobj.KrigInfo["ub"],
            np.random.rand(soboInfo["nrestart"], krigobj.KrigInfo["nvar"]))
        xnextcand = np.zeros(
            shape=[soboInfo["nrestart"], krigobj.KrigInfo["nvar"]])
        fnextcand = np.zeros(shape=[soboInfo["nrestart"]])
        lbfgsbbound = np.hstack((krigobj.KrigInfo["lb"].reshape(-1, 1),
                                 krigobj.KrigInfo["ub"].reshape(-1, 1)))
        for im in range(0, soboInfo["nrestart"]):
            if krigconstlist is None and cheapconstlist is None:  # For unconstrained problem
                res = minimize(krigobj.predict,
                               Xrand[im, :],
                               method='L-BFGS-B',
                               bounds=lbfgsbbound,
                               args=(acquifunc))
                xnextcand[im, :] = res.x
                fnextcand[im] = res.fun
            else:  # For constrained problem (on progress)
                res = minimize(singleconstfun,
                               Xrand[im, :],
                               method='L-BFGS-B',
                               bounds=lbfgsbbound,
                               args=(krigobj, acquifunc, krigconstlist,
                                     cheapconstlist))
                xnextcand[im, :] = res.x
                fnextcand[im] = res.fun
        I = np.argmin(fnextcand)
        xnext = xnextcand[I, :]
        fnext = fnextcand[I]

    elif acquifuncopt.lower() == 'diff_evo':
        xnextcand = np.zeros(
            shape=[soboInfo["nrestart"], krigobj.KrigInfo["nvar"]])
        fnextcand = np.zeros(shape=[soboInfo["nrestart"]])
        optimbound = np.hstack((krigobj.KrigInfo["lb"].reshape(-1, 1),
                                krigobj.KrigInfo["ub"].reshape(-1, 1)))
        for im in range(0, soboInfo["nrestart"]):
            if krigconstlist is None and cheapconstlist is None:  # For unconstrained problem
                res = differential_evolution(krigobj.predict,
                                             optimbound,
                                             args=(acquifunc, ))
                xnextcand[im, :] = res.x
                fnextcand[im] = res.fun
            else:
                res = differential_evolution(singleconstfun,
                                             optimbound,
                                             args=(krigobj, acquifunc,
                                                   krigconstlist,
                                                   cheapconstlist))
                xnextcand[im, :] = res.x
                fnextcand[im] = res.fun
        I = np.argmin(fnextcand)
        xnext = xnextcand[I, :]
        fnext = fnextcand[I]

    elif acquifuncopt.lower() == 'cobyla':
        Xrand = realval(
            krigobj.KrigInfo["lb"], krigobj.KrigInfo["ub"],
            np.random.rand(soboInfo["nrestart"], krigobj.KrigInfo["nvar"]))
        xnextcand = np.zeros(
            shape=[soboInfo["nrestart"], krigobj.KrigInfo["nvar"]])
        fnextcand = np.zeros(shape=[soboInfo["nrestart"]])
        optimbound = []
        for i in range(len(krigobj.KrigInfo["ub"])):
            optimbound.append(lambda x, krigobj, aa, bb, cc, itemp=i: x[itemp]
                              - krigobj.KrigInfo["lb"][itemp])
            optimbound.append(lambda x, krigobj, aa, bb, cc, itemp=i: krigobj.
                              KrigInfo["ub"][itemp] - x[itemp])
        for im in range(0, soboInfo["nrestart"]):
            if krigconstlist is None and cheapconstlist is None:  # For unconstrained problem
                res = fmin_cobyla(krigobj.predict,
                                  Xrand[im, :],
                                  optimbound,
                                  rhobeg=0.5,
                                  rhoend=1e-4,
                                  args=(acquifunc))
                xnextcand[im, :] = res
                fnextcand[im] = krigobj.predict(res, acquifunc)
            else:
                res = fmin_cobyla(singleconstfun,
                                  Xrand[im, :],
                                  optimbound,
                                  rhobeg=0.5,
                                  rhoend=1e-4,
                                  args=(krigobj, acquifunc, krigconstlist,
                                        cheapconstlist))
                xnextcand[im, :] = res
                fnextcand[im] = singleconstfun(res, krigobj, acquifunc,
                                               krigconstlist, cheapconstlist)
        I = np.argmin(fnextcand)
        xnext = xnextcand[I, :]
        fnext = fnextcand[I]

    return (xnext, fnext)
예제 #24
0
TRAJ_LEN = 10
MIN_DIST = 2
DIM = GOAL_VEL.shape[1]

num_opt_vars = AGENTS * TRAJ_LEN * DIM  # the amount of variables to be optimized via cma
error_calc = ErrorCalculator(TIMESTEP, TRAJ_LEN, MIN_DIST, START_VEL, START_POS, GOAL_VEL, GOAL_POS)


options = cma.CMAOptions()
options.set('ftarget', 1e-1)
options.set('bounds', [-MAX_JERK, MAX_JERK])
# es = cma.CMAEvolutionStrategy(num_opt_vars * [0], 0.5, options)
# es.opts.set('opt', value) # use this for chaning options while running
# es.optimize(error_calc.get_error)
# xbest = es.result[0]
xbest = cma.fmin2(error_calc.getError, num_opt_vars * [0], 0.5, options)[0]

print("\n### FINAL ERROR #############################################")
print(error_calc.getError(xbest))
print("\n### FINAL JERK TRAJECTORY #############################################")
print(error_calc.jerk_traj)
print("\n### FINAL ACC TRAJECTORY #############################################")
print(error_calc.acc_traj)
print("\n### FINAL VEL TRAJECTORY #############################################")
print(error_calc.vel_traj)
print("\n### FINAL POS TRAJECTORY #############################################")
print(error_calc.pos_traj)


path = os.path.join(sys.path[0], "trajectories")
np.save(path + "/jerk_traj.npy", error_calc.jerk_traj)
예제 #25
0
    def train(self):

        """
        Learn your (final) policy.

        Use evolution strategy algortihm CMA-ES: https://pypi.org/project/cma/

        Possible action: [0, 1, 2]
        Range observation (tuple):
            - position: [-1.2, 0.6]
            - velocity: [-0.07, 0.07]
        """
        def policy_action(position, velocity, policy):
                '''Fonction that returns the action given a state and a policy'''
                i_position, i_velocity = get_discretized_env(position, velocity)
                # print(i_position, i_velocity)
                action = policy[i_position][i_velocity]
                return action

        def get_discretized_env(position, velocity, velocities=self.velocities, positions=self.positions):
                '''Fonction that give the indices to look for in the discretized position and velocity space'''
                i = 0
                while velocity > velocities[i]:
                    i += 1
                velocity_index = i
                j = 0
                while position > positions[j]:
                    j += 1
                position_index = j
                return position_index, velocity_index

        env = Environment()

        # For debug purposes
        self.min_value = 999999999

        def obj_function(policy):
            ''' Fonction that takes a policy and run it on 200 steps of the environement. It returns the fitness of the policy.
            '''
            env.reset()
            iter_counter = 0
            x = policy.reshape(self.positions.shape[0]*self.velocities.shape[0], -1)
            x = np.floor(x)
            d_policy = x.reshape(self.positions.shape[0], self.velocities.shape[0])

            distances = []
            distance_mid = []
            energy = []
            malus = 200

            for i in range(200):
                # env.render()
                # We take an action according to the given policy
                position, velocity = env.state
                distances.append(np.absolute(0.6 - position))
                distance_mid.append(np.absolute(-0.56 - position))
                energy.append(0.5*(velocity**2))
                if position == 0.6:
                    # If we enter here we won the game
                    malus = (i / 200) * 200
                    value = -sum(distance_mid) -max(energy) + malus + min(distances)*50 - (np.absolute(min(distances) - max(distances))*100)

                    # For debug purposes :
                    if value < self.min_value:
                        self.min_value = value
                        print('New best value = '+str(self.min_value))
                    return value
                action = policy_action(position, velocity, d_policy)
                _, _ = env.act(int(np.floor(action)))

            value = -sum(distance_mid)-max(energy) + malus + min(distances)*50 -(np.absolute(min(distances) - max(distances))*100)
            # For debug purposes :
            if value < self.min_value:
                self.min_value = value
                print('New best value = '+str(self.min_value))
            return value

        # We launch a cma-es to find a policy that minimizes the ojective function value
        # We decided to fix the ftarget value so it doest take too long too run but we could remove it
        # to optimize the function even more
        best_policy, _ = cma.fmin2(obj_function, self.init, 2,{
        #'BoundaryHandler': 'BoundPenalty',
        'BoundaryHandler': 'BoundTransform',
        'bounds':[0,3],
        'verbose':1,
        'ftarget':-100,
        'seed': 237591
        })
        print("Optimization FINISHED")
        #self.policy = best_policy
        self.b_policy = best_policy
        self.policy = np.floor(best_policy).reshape(self.positions.shape[0], self.velocities.shape[0])
        print("Best Policy updated"+str(self.policy))
예제 #26
0
def run_single_opt(krigobj,
                   soboInfo,
                   krigconstlist=None,
                   cheapconstlist=None,
                   pool=None):
    """
    Optimize the single-objective acquisition function to find the next
    sampling point.

    The available optimizers for the acquisition functions are:
        'cmaes', 'lbfgsb', 'diff_evo', 'cobyla'

    Note that this function runs for both unconstrained and constrained
    single-objective Bayesian optimization.

    Args:
        krigobj (kriging_model.Kriging): Objective Kriging instance.
        soboInfo (dict): A structure containing necessary information
            for Bayesian optimization.
        krigconstlist ([kriging_model.Kriging], optional): Kriging
            instances for constraints. Defaults to None.
        cheapconstlist ([func], optional): Constraint functions.
            Defaults to None. Expected output of the constraint
            functions is 1 if satisfied and 0 if not.
            The constraint functions MUST have an input of x (the
            decision variable to be evaluated).
        pool (mp.Pool, optional): An existing mp.Pool instance can be
            specified and passed to solvers/acquisition functions for
            multiprocessing, if supported. Default is None.
    Returns:
        xnext (np.ndarray): n_dv-len array of suggested next sampling
            point as discovered by the optimization of the acquisition
            function.
        fnext (np.ndarray): n_obj-len array of  optimized acquisition
            function fitness metrics.
    """
    acquifuncopt = soboInfo["acquifuncopt"]
    acquifunc = soboInfo["acquifunc"]

    if acquifunc.lower() == 'parego':
        acquifunc = soboInfo['paregoacquifunc']
    else:
        # Seems like string is passed stright to prediction.prediction
        pass

    n_restart = soboInfo["nrestart"]
    n_var = krigobj.KrigInfo["nvar"]
    low_bound = krigobj.KrigInfo["lb"]
    up_bound = krigobj.KrigInfo["ub"]

    xnextcand = np.zeros(shape=[n_restart, n_var])
    fnextcand = np.zeros(shape=[n_restart])

    if acquifuncopt.lower() == 'cmaes':
        Xrand = realval(low_bound, up_bound, np.random.rand(n_restart, n_var))
        sigmacmaes = 1  # np.mean((KrigNewMultiInfo["ub"] - KrigNewMultiInfo["lb"]) / 6)
        for im in range(0, n_restart):
            if krigconstlist is None and cheapconstlist is None:  # For unconstrained problem
                xnextcand[im, :], es = cma.fmin2(krigobj.predict,
                                                 Xrand[im, :],
                                                 sigmacmaes, {
                                                     'verb_disp': 0,
                                                     'verbose': -9
                                                 },
                                                 args=(acquifunc))
                fnextcand[im] = es.result[1]
            else:  # For constrained problem
                xnextcand[im, :], es = cma.fmin2(singleconstfun,
                                                 Xrand[im, :],
                                                 sigmacmaes, {
                                                     'verb_disp': 0,
                                                     'verbose': -9
                                                 },
                                                 args=(krigobj, acquifunc,
                                                       krigconstlist,
                                                       cheapconstlist))
                fnextcand[im] = es.result[1]

    elif acquifuncopt.lower() == 'lbfgsb':
        Xrand = realval(low_bound, up_bound, np.random.rand(n_restart, n_var))

        lbfgsbbound = np.hstack(
            (low_bound.reshape(-1, 1), up_bound.reshape(-1, 1)))
        for im in range(0, n_restart):
            if krigconstlist is None and cheapconstlist is None:  # For unconstrained problem
                res = minimize(krigobj.predict,
                               Xrand[im, :],
                               method='L-BFGS-B',
                               bounds=lbfgsbbound,
                               args=(acquifunc))
                xnextcand[im, :] = res.x
                fnextcand[im] = res.fun
            else:  # For constrained problem (on progress)
                res = minimize(singleconstfun,
                               Xrand[im, :],
                               method='L-BFGS-B',
                               bounds=lbfgsbbound,
                               args=(krigobj, acquifunc, krigconstlist,
                                     cheapconstlist))
                xnextcand[im, :] = res.x
                fnextcand[im] = res.fun

    elif acquifuncopt.lower() == 'diff_evo':
        if 'de_kwargs' in soboInfo:
            de_kwargs = soboInfo['de_kwargs']
        else:
            de_kwargs = {}

        de_kwargs['init'] = de_kwargs.get('init', 'latinhypercube')

        optimbound = list(zip(low_bound, up_bound))
        de_args = (singleconstfun, optimbound)
        if 'constraints' in de_kwargs:
            cheapconstlist = None  # DE handles cheap constraint functions directly
        args = (krigobj, acquifunc, krigconstlist, cheapconstlist, None, 'inf')
        de_kwargs['args'] = args

        if pool is not None:
            workers = pool.map
            # TODO: Check - we shouldn't need this fix anymore
            # # If MP, set n_cpu to 1 to stop pool in EHVI - pass in existing pool
            # soboInfo['n_cpu'] = 1
        else:
            workers = 1  # Default DE flag

        for im in range(n_restart):
            r_t = time.time()

            res = differential_evolution(*de_args,
                                         **de_kwargs,
                                         workers=workers)

            xnextcand[im, :] = res.x
            fnextcand[im] = res.fun
            print_res(r_t,
                      res.fun,
                      res.x,
                      success=res.success,
                      msg=res.message,
                      n_eval=res.nfev,
                      n_gen=res.nit,
                      i_restart=im,
                      n_restart=n_restart)

    elif acquifuncopt.lower() == 'cobyla':
        Xrand = realval(low_bound, up_bound, np.random.rand(n_restart, n_var))

        optimbound = []
        for i in range(len(up_bound)):
            optimbound.append(lambda x, krigobj, aa, bb, cc, itemp=i: x[itemp]
                              - krigobj.KrigInfo["lb"][itemp])
            optimbound.append(lambda x, krigobj, aa, bb, cc, itemp=i: krigobj.
                              KrigInfo["ub"][itemp] - x[itemp])
        for im in range(0, n_restart):
            if krigconstlist is None and cheapconstlist is None:  # For unconstrained problem
                res = fmin_cobyla(krigobj.predict,
                                  Xrand[im, :],
                                  optimbound,
                                  rhobeg=0.5,
                                  rhoend=1e-4,
                                  args=(acquifunc))
                xnextcand[im, :] = res
                fnextcand[im] = krigobj.predict(res, acquifunc)
            else:
                res = fmin_cobyla(singleconstfun,
                                  Xrand[im, :],
                                  optimbound,
                                  rhobeg=0.5,
                                  rhoend=1e-4,
                                  args=(krigobj, acquifunc, krigconstlist,
                                        cheapconstlist))
                xnextcand[im, :] = res
                fnextcand[im] = singleconstfun(res, krigobj, acquifunc,
                                               krigconstlist, cheapconstlist)

    I = np.argmin(fnextcand)
    xnext = xnextcand[I, :]
    fnext = fnextcand[I]

    return (xnext, fnext)
예제 #27
0
    def minimize(self, method='BFGS', options=None, compile=True):
        loss = self.cost_function_fidelity

        if method == 'cma':
            # Genetic optimizer
            import cma
            r = cma.fmin2(lambda p: K.to_numpy(loss(p)), self.params, 2)
            result = r[1].result.fbest
            parameters = r[1].result.xbest

        elif method == 'sgd':
            circuit = self.circuit(self.training_set[0])
            for gate in circuit.queue:
                if not K.supports_gradients:
                    from qibo.config import raise_error
                    raise_error(
                        RuntimeError,
                        'Use tensorflow backend in order to compute gradients.'
                    )

            sgd_options = {
                "nepochs": 5001,
                "nmessage": 1000,
                "optimizer": "Adamax",
                "learning_rate": 0.5
            }
            if options is not None:
                sgd_options.update(options)

            # proceed with the training
            vparams = K.Variable(self.params)
            optimizer = getattr(K.optimizers, sgd_options["optimizer"])(
                learning_rate=sgd_options["learning_rate"])

            def opt_step():
                with K.GradientTape() as tape:
                    l = loss(vparams)
                grads = tape.gradient(l, [vparams])
                optimizer.apply_gradients(zip(grads, [vparams]))
                return l, vparams

            if compile:
                opt_step = K.function(opt_step)

            l_optimal, params_optimal = 10, self.params
            for e in range(sgd_options["nepochs"]):
                l, vparams = opt_step()
                if l < l_optimal:
                    l_optimal, params_optimal = l, vparams
                if e % sgd_options["nmessage"] == 0:
                    print('ite %d : loss %f' % (e, K.to_numpy(l)))

            result = K.to_numpy(self.cost_function(params_optimal))
            parameters = K.to_numpy(params_optimal)

        else:
            import numpy as np
            from scipy.optimize import minimize
            m = minimize(lambda p: K.to_numpy(loss(p)),
                         self.params,
                         method=method,
                         options=options)
            result = m.fun
            parameters = m.x

        return result, parameters
예제 #28
0
square = np.vectorize(lambda x: x**2)

X = np.random.randint(low=0, high=3, size=10)
#X = [5 for _ in range(200)]
print(X)


def obj_fun(X):
    X = square(X)
    return X.sum()


print(obj_fun(X))
solution, d = cma.fmin2(
    obj_fun, X, 1, {
        'BoundaryHandler': 'BoundTransform',
        'bounds': [0, 3],
        'popsize': 1000,
        'CMA_mu': 10,
        'ftarget': 2
    })

print(d)
solution = np.floor(solution)
print(solution)

# cma.fmin(obj_fun, X, 0.2, {'boundary_handling': 'BoundPenalty',
#                             'bounds':[0,3],
#                             })
예제 #29
0
def optimize_acqf_cmaes_cf(acq, cf, problem_bounds, budget):
    import numpy as np
    import cma
    import warnings

    def inital_point_generator(cf, lb, ub):
        def wrapper():
            while True:
                x = np.random.uniform(lb, ub)
                if np.all(x >= lb) and np.all(x <= ub) and cf(x):
                    return x
        return wrapper

    def acq_wrapper(acq, cf):
        got_cf = cf is not None

        def func(X):
            if isinstance(X, list):
                X = np.reshape(np.array(X), (len(X), -1))

            # convert to torch
            Xt = torch.from_numpy(X).unsqueeze(-2)

            # evaluate and negate because CMA-ES minimise
            Y = -acq(Xt)  # minus as CMA-ES minimises

            # convert back to numpy and then to list (for CMA-ES)
            y = Y.double().numpy().ravel().tolist()

            # evaluate constraint function for each decision vector
            if got_cf:
                for i, x in enumerate(X):
                    if not cf(x):
                        y[i] = np.inf

            # CMA-ES expects a float if it gives one decision vector
            if len(y) == 1:
                return y[0]

            return y
        return func

    lb = problem_bounds[0].numpy()
    ub = problem_bounds[1].numpy()

    cma_options = {'bounds': [list(lb), list(ub)],
                   'tolfun': 1e-7,
                   'maxfevals': budget,
                   'verb_disp': 0,
                   'verb_log': 0,
                   'verbose': -1,
                   'CMA_stds': np.abs(ub - lb),
                   }

    x0 = inital_point_generator(cf, lb, ub)
    f = acq_wrapper(acq, cf)

    # ignore warnings about flat fitness (i.e starting in a flat EI location)
    with torch.no_grad(), warnings.catch_warnings():
        warnings.simplefilter('ignore')
        xopt, es = cma.fmin2(objective_function=None,
                             parallel_objective=f,
                             x0=x0, sigma0=0.25, options=cma_options,
                             bipop=True, restarts=9)
        warnings.resetwarnings()

    train_xnew = torch.from_numpy(es.best.x).unsqueeze(0)
    acq_f = -torch.from_numpy(np.array(es.best.f))

    return train_xnew, acq_f
예제 #30
0
def main(K, population_size, nbre_iteration) :
    # K(Kp, Ki, Kd) is a vector of parameters of pid controller
    # run the optimisation
    # cma.fmin2(objective function, initial solutions, Initial sigma, options={'maxiter': nbre_iteration, 'bounds': [[0, 0, 0], [50, 50, 50]],'popsize':population_size})
    best_solution, best_fitness = cma.fmin2(cma_optimize, [K[0], K[1],K[2]], 15, options={'maxiter': nbre_iteration, 'bounds': [[0, 0, 0], [50, 50, 50]],'popsize':population_size})
    print("best solution :", best_solution) # print the solutions of the optimization