Exemple #1
0
    def __createOptimizer(self, n, tolerance, lower_bounds, upper_bounds,
                          local_max_eval, global_max_eval):
        """Creates an optimizer to find joint configuration that achieves specified tolerance, the number of joints
        could be less than the number of joints in robot if some joints are locked. Since its not possible to change
        the optimizer size after creation, re-creating the optimizer was the simplest way to accommodate locked joints

        :param n: number of joints configuration
        :type n: int
        :param tolerance: stopping criterion for optimizer
        :type tolerance: float
        :param lower_bounds: lower joint bounds
        :type lower_bounds: numpy.ndarray
        :param upper_bounds: upper joint bounds
        :type upper_bounds: numpy.ndarray
        :param local_max_eval: number of evaluations for local optimization
        :type local_max_eval: int
        :param global_max_eval: number of evaluations for global optimization
        :type global_max_eval: int
        """
        nlopt.srand(10)
        self.optimizer = nlopt.opt(nlopt.G_MLSL, n)
        self.optimizer.set_lower_bounds(lower_bounds)
        self.optimizer.set_upper_bounds(upper_bounds)
        self.optimizer.set_min_objective(self.objective)
        self.optimizer.set_stopval(tolerance)
        self.optimizer.set_maxeval(global_max_eval)
        self.optimizer.set_ftol_abs(1e-6)

        opt = nlopt.opt(nlopt.LD_SLSQP, n)
        opt.set_maxeval(local_max_eval)
        opt.set_ftol_abs(1e-6)
        self.optimizer.set_local_optimizer(opt)
Exemple #2
0
def optimize_obj(obj_val, num_parameters, params=None):
    options = {}
    try:
        init_points = params['sample_points'][0]
    except (KeyError, TypeError):
        init_points = np.random.uniform(-np.pi, np.pi, num_parameters)
    try:
        options['maxiter'] = params['n_iter'] + params['init_points']
    except (KeyError, TypeError):
        options['maxiter'] = 100

    def objective(x, grad):
        f = obj_val(x)
        return f

    if params['ansatz'] == 'QAOA':
        lb = np.array([0, 0] * params['ansatz_depth'])
        ub = np.array([np.pi, 2*np.pi] * params['ansatz_depth'])
    elif params['ansatz'] == 'RYRZ':
        lb = np.array([-np.pi] * num_parameters)
        ub = np.array([np.pi] * num_parameters)

    nlopt.srand(params['seed'])
    opt = nlopt.opt(nlopt.G_MLSL_LDS, num_parameters)
    try:
        local_opt_method = getattr(nlopt, params['localopt_method'])
    except AttributeError:
        raise ValueError("Incorrect local opt method: {}".format(
            params['localopt_method']))
    local_opt = nlopt.opt(local_opt_method, num_parameters)
    local_opt.set_lower_bounds(lb)
    local_opt.set_upper_bounds(ub)
    opt.set_ftol_rel(params['ftol_rel'])     
    opt.set_xtol_rel(params['xtol_rel'])
    opt.set_local_optimizer(local_opt)
    opt.set_min_objective(objective)
    opt.set_population(params['max_active_runs'])
    opt.set_maxeval(options['maxiter'])
    opt.set_lower_bounds(lb)
    opt.set_upper_bounds(ub)
    x = opt.optimize(init_points)
    return x
def optimize_obj(obj_val, num_parameters, params=None):
    options = {}
    try:
        init_points = params['sample_points'][0]
    except (KeyError, TypeError):
        init_points = np.random.uniform(-np.pi, np.pi, num_parameters)
    try:
        options['maxiter'] = params['n_iter'] + params['init_points']
    except (KeyError, TypeError):
        options['maxiter'] = 100

    def objective(x, grad):
        f = obj_val(x)
        return f

    nlopt.srand(params['seed'])
    opt = nlopt.opt(nlopt.LN_COBYLA, num_parameters)
    opt.set_min_objective(objective)
    opt.set_maxeval(options['maxiter'])

    if params['ansatz'] == 'QAOA':
        lb = np.array([0, 0] * params['ansatz_depth'])
        ub = np.array([np.pi, 2 * np.pi] * params['ansatz_depth'])
    elif params['ansatz'] == 'RYRZ':
        lb = np.array([-np.pi] * num_parameters)
        ub = np.array([np.pi] * num_parameters)

    #dist_to_bound = min(min(ub-init_points),min(init_points-lb))
    #opt.set_initial_step(dist_to_bound)
    opt.set_ftol_rel(params['ftol_rel'])
    opt.set_xtol_rel(params['xtol_rel'])

    opt.set_lower_bounds(lb)
    opt.set_upper_bounds(ub)
    x = opt.optimize(init_points)
    return x
def global_optimization(objective_function, boundaries, optimizer, maxf,
                        x0=None, approx_grad=True, random=np.random,
                        *args, **kwargs):
    """Maximize objective_function within given boundaries.

    This function optimizes an objective function in a search space with the
    given boundaries. The optimizer may use up to maxf evaluations of the
    objective function. The optimizer is specified by a string which may be
    any of "direct", "direct+lbfgs", "random", "random+lbfgs", "cmaes", or
    "cmaes+lbfgs".
    """
    if optimizer in ["direct", "direct+lbfgs"]:
        # Use DIRECT to perform approximate global optimization of
        # objective_function
        try:
            import nlopt
        except ImportError:
            raise Exception("'direct' optimizer requires the package nlopt."
                            "You may install it using "
                            "'sudo apt-get install python-nlopt'")
        nlopt.srand(0)
        opt = nlopt.opt(nlopt.GN_DIRECT_L_RAND, boundaries.shape[0])
        opt.set_lower_bounds(boundaries[:, 0])
        opt.set_upper_bounds(boundaries[:, 1])
        opt.set_maxeval(maxf)

        def prox_func(params, grad):
            # Note: nlopt minimizes function, hence the minus
            func_value = -objective_function(params)
            if np.iterable(func_value):
                return func_value[0]
            else:
                return func_value
        opt.set_min_objective(prox_func)
        x0 = opt.optimize(boundaries.mean(1))
    elif optimizer in ["random", "random+lbfgs"]:
        # Sample maxf points uniform randomly from the search space and
        # remember the one with maximal objective value
        if x0 is not None:
            f_opt = objective_function(x0)
        else:
            f_opt = -np.inf
        for _ in range(maxf):
            x0_trial = \
                random.uniform(size=boundaries.shape[0]) \
                * (boundaries[:, 1] - boundaries[:, 0]) \
                + boundaries[:, 0]
            f_trial = objective_function(x0_trial)
            if f_trial > f_opt:
                f_opt = f_trial
                x0 = x0_trial
    elif optimizer in ["cmaes", "cmaes+lbfgs"]:
        # Use CMAES to perform approximate global optimization of
        # objective_function
        if x0 is None:
            x0 = boundaries.mean(1)
        x0 = fmin_cma(lambda x, compute_gradient=False: -objective_function(x),
                      x0=x0, xL=boundaries[:, 0], xU=boundaries[:, 1],
                      sigma0=kwargs.get("sigma0", 0.01), maxfun=maxf)
    elif x0 is None:
        raise Exception("Unknown optimizer %s and x0 is None."
                        % optimizer)

    if optimizer in ["direct", "random", "cmaes"]:
        # return DIRECT/Random/CMAES solution without refinement
        return x0
    elif optimizer in ["lbfgs", "direct+lbfgs", "random+lbfgs", "cmaes+lbfgs"]:
        # refine solution with L-BFGS
        def proxy_function(x):
            return -objective_function(x)
        res = fmin_l_bfgs_b(proxy_function, x0,
                            approx_grad=True,
                            bounds=boundaries, disp=0)
        return res[0]
    else:
        raise Exception("Unknown optimizer %s" % optimizer)
Exemple #5
0
from modules.clusterization import clusterize
from numba import jit
import nlopt


@jit(nopython=True, cache=True)
def numba_seed(seed):
    random.seed(seed)


if __name__ == '__main__':
    args = load_arguments()
    if args.random_seed != 0:
        random.seed(args.random_seed)
        numba_seed(args.random_seed)
        nlopt.srand(args.random_seed)
    print(
        colored("\nMACH is running with mode: {}\n".format(args.mode), "blue"))
    if args.mode == "set_of_molecules_info":
        set_of_molecules = SetOfMolecules(args.sdf, args.num_of_molecules)
        set_of_molecules.info(args.atomic_types_pattern)

    elif args.mode == "calculation":
        Calculation(args.sdf, args.method, args.parameters, args.charges,
                    args.atomic_types_pattern, args.rewriting_with_force)

    elif args.mode == "parameterization":
        Parameterization(args.sdf, args.ref_charges, args.parameters,
                         args.method, args.optimization_method,
                         args.minimization_method, args.atomic_types_pattern,
                         args.num_of_molecules, args.num_of_samples,
Exemple #6
0
    def _run(self):
        # initialize RNG with seed from config
        nlopt.srand(self.seed)
        self._get_initial_data()

        for step in range(self.num_steps):
            self.cur_step = step
            self.cur_itr = 0

            # disable range sliders for each step once that step has begun
            self.optparamwin.toggle_enable_user_fields(self.cur_step,
                                                       enable=False)

            self.step_sims = self.optparamwin.get_sims_for_chunk(step)

            if self.step_sims == 0:
                txt = "Skipping optimization step %d (0 simulations)" % \
                    (step + 1)
                self._updatewaitsimwin(txt)
                continue

            self.step_ranges = self.optparamwin.get_chunk_ranges(step)
            if len(self.step_ranges) == 0:
                txt = "Skipping optimization step %d (0 parameters)" % \
                    (step + 1)
                self._updatewaitsimwin(txt)
                continue

            txt = "Starting optimization step %d/%d" % (step + 1,
                                                        self.num_steps)
            self._updatewaitsimwin(txt)
            print(txt)

            self.opt_start = self.optparamwin.get_chunk_start(self.cur_step)
            self.opt_end = self.optparamwin.get_chunk_end(self.cur_step)
            txt = 'Optimizing from [%3.3f-%3.3f] ms' % (self.opt_start,
                                                        self.opt_end)
            self._updatewaitsimwin(txt)
            print(txt)

            # weights calculated once per step
            self.opt_weights = \
                self.optparamwin.get_chunk_weights(self.cur_step)

            # run an opt step
            algorithm = nlopt.LN_COBYLA
            self.num_params = len(self.step_ranges)
            self.opt = nlopt.opt(algorithm, self.num_params)
            opt_results = self._run_opt_step(self.step_ranges, self.step_sims,
                                             algorithm)

            # update with optimized params for the next round
            for var_name, new_value in zip(self.step_ranges, opt_results):
                old_value = self.step_ranges[var_name]['initial']

                # only change the parameter value if it changed significantly
                if not isclose(old_value, new_value, abs_tol=1e-9):
                    self.step_ranges[var_name]['final'] = new_value
                else:
                    self.step_ranges[var_name]['final'] = \
                        self.step_ranges[var_name]['initial']

            # push into GUI and save to param file so that next simulation
            # starts from there.
            push_values = {}
            for param_name in self.step_ranges.keys():
                push_values[param_name] = self.step_ranges[param_name]['final']
            self.baseparamwin.update_gui_params(push_values)

            # update optimization dialog window
            self.optparamwin.push_chunk_ranges(push_values)

        # update opt_data with the final best
        update_event = Event()
        self.update_sim_data_from_opt_data.esig.emit(update_event,
                                                     self.paramfn)
        update_event.wait()
        self.refresh_signal.sig.emit()  # redraw with updated RMSE

        # check that optimization improved RMSE
        err_queue = Queue()
        self.get_err_from_sim_data.qsig.emit(err_queue, self.paramfn,
                                             self.params['tstop'])
        final_err = err_queue.get()
        print("Best RMSE: %f" % final_err)
        if final_err > self.initial_err:
            txt = "Warning: optimization failed to improve RMSE below" + \
                  " %.2f. Reverting to old parameters." % \
                        round(self.initial_err, 2)
            self._updatewaitsimwin(txt)
            print(txt)

            initial_params = self.optparamwin.get_initial_params()
            # populate param values into GUI and save params to file
            self.baseparamwin.update_gui_params(initial_params)

            # update optimization dialog window
            self.optparamwin.push_chunk_ranges(initial_params)

            # run a full length simulation
            self.sim_thread = SimThread(self.ncore,
                                        self.params,
                                        self.result_callback,
                                        mainwin=self.mainwin)
            self.sim_running = True
            self.sim_thread.run()
            self.sim_thread.wait()
            with self.killed_lock:
                if self.killed:
                    self.quit()
            self.sim_running = False
Exemple #7
0
def global_optimization(objective_function,
                        boundaries,
                        optimizer,
                        maxf,
                        x0=None,
                        approx_grad=True,
                        random=np.random,
                        *args,
                        **kwargs):
    """Maximize objective_function within given boundaries.

    This function optimizes an objective function in a search space with the
    given boundaries. The optimizer may use up to maxf evaluations of the
    objective function. The optimizer is specified by a string which may be
    any of "direct", "direct+lbfgs", "random", "random+lbfgs", "cmaes", or
    "cmaes+lbfgs".
    """
    if optimizer in ["direct", "direct+lbfgs"]:
        # Use DIRECT to perform approximate global optimization of
        # objective_function
        try:
            import nlopt
        except ImportError:
            raise Exception("'direct' optimizer requires the package nlopt."
                            "You may install it using "
                            "'sudo apt-get install python-nlopt'")
        nlopt.srand(0)
        opt = nlopt.opt(nlopt.GN_DIRECT_L_RAND, boundaries.shape[0])
        opt.set_lower_bounds(boundaries[:, 0])
        opt.set_upper_bounds(boundaries[:, 1])
        opt.set_maxeval(maxf)

        def prox_func(params, grad):
            # Note: nlopt minimizes function, hence the minus
            func_value = -objective_function(params)
            if np.iterable(func_value):
                return func_value[0]
            else:
                return func_value

        opt.set_min_objective(prox_func)
        x0 = opt.optimize(boundaries.mean(1))
    elif optimizer in ["random", "random+lbfgs"]:
        # Sample maxf points uniform randomly from the search space and
        # remember the one with maximal objective value
        if x0 is not None:
            f_opt = objective_function(x0)
        else:
            f_opt = -np.inf
        for _ in range(maxf):
            x0_trial = \
                random.uniform(size=boundaries.shape[0]) \
                * (boundaries[:, 1] - boundaries[:, 0]) \
                + boundaries[:, 0]
            f_trial = objective_function(x0_trial)
            if f_trial > f_opt:
                f_opt = f_trial
                x0 = x0_trial
    elif optimizer in ["cmaes", "cmaes+lbfgs"]:
        # Use CMAES to perform approximate global optimization of
        # objective_function
        if x0 is None:
            x0 = boundaries.mean(1)
        x0 = fmin_cma(lambda x, compute_gradient=False: -objective_function(x),
                      x0=x0,
                      xL=boundaries[:, 0],
                      xU=boundaries[:, 1],
                      sigma0=kwargs.get("sigma0", 0.01),
                      maxfun=maxf)
    elif x0 is None:
        raise Exception("Unknown optimizer %s and x0 is None." % optimizer)

    if optimizer in ["direct", "random", "cmaes"]:
        # return DIRECT/Random/CMAES solution without refinement
        return x0
    elif optimizer in ["lbfgs", "direct+lbfgs", "random+lbfgs", "cmaes+lbfgs"]:
        # refine solution with L-BFGS
        def proxy_function(x):
            return -objective_function(x)

        res = fmin_l_bfgs_b(proxy_function,
                            x0,
                            approx_grad=True,
                            bounds=boundaries,
                            disp=0)
        return res[0]
    else:
        raise Exception("Unknown optimizer %s" % optimizer)
    def __init__(self, method, submethod=None, seed=None, *args, **kwargs):
        """
        Initialize the non-linear optimizer.

        Args:
            method (str): primary optimization method to be used.
            submethod (str): sub-optimization method to be used in the primary optimization method.
            seed (None, int): random seed
            *args:
            **kwargs:
        """
        super(NLopt, self).__init__(*args, **kwargs)

        # define useful variables
        self.results = {
            1: 'success',
            2: 'stop_val reached',
            3: 'ftol reached',
            4: 'xtol reached',
            5: 'maxeval reached',
            6: 'maxtime reached',
            -1: 'failure',
            -2: 'invalid args',
            -3: 'out of memory',
            -4: 'roundoff limited',
            -5: 'forced stop'
        }

        # define random seed
        nlopt.srand(seed)

        # define which solver to use
        def get_optimizer(method):
            """
            Get the optimizer associated with the given method.

            Args:
                method (str): optimizer string

            Returns:

            """
            if method == 'ISRES':
                return nlopt.opt(nlopt.GN_ISRES, M)
            elif method == 'COBYLA':
                return nlopt.opt(nlopt.LN_COBYLA, M)
            elif method == 'SLSQP':
                return nlopt.opt(nlopt.LD_SLSQP, M)
            elif method == 'AUGLAG':
                return nlopt.opt(nlopt.AUGLAG, M)
            else:
                raise NotImplementedError(
                    "The given method has not been implemented")

        if method is None:
            method = 'SLSQP'
        self.optimizer = get_optimizer(method)

        # define subsolver to use (if we use the AUGLAG method)
        if method == 'AUGLAG':
            if submethod is None:
                submethod = 'SLSQP'
            elif submethod == 'AUGLAG':
                raise ValueError("Submethod should be different from AUGLAG")
            subopt = get_optimizer(submethod)
            subopt.set_lower_bounds(-1)
            subopt.set_upper_bounds(1)
            # subopt.set_ftol_rel(1e-2)
            # subopt.set_maxeval(100)
            self.optimizer.set_local_optimizer(subopt)