def test_create_constant_theta(self): constant = 3 dimension = 4 finite_elements = 5 # test normal use res_dict = dict( zip(range(finite_elements), [constant * DM.ones(dimension, 1)] * finite_elements)) theta = create_constant_theta(constant, dimension, finite_elements) for el in range(finite_elements): self.assertTrue(is_equal(theta[el], res_dict[el])) # Test 0 dimension res_dict = dict( zip(range(finite_elements), [constant * DM.ones(0, 1)] * finite_elements)) theta = create_constant_theta(constant, 0, finite_elements) for el in range(finite_elements): self.assertTrue(is_equal(theta[el], res_dict[el]))
def test_join_thetas(self): # with 3 dicts with same size dimension = 2 finite_elements = 5 theta_1 = create_constant_theta(1, dimension, finite_elements) theta_2 = create_constant_theta(2, dimension + 1, finite_elements) theta_3 = create_constant_theta(3, dimension + 2, finite_elements) res = join_thetas(theta_1, theta_2, theta_3) self.assertEqual(len(res), finite_elements) for el in range(finite_elements): self.assertEqual(res[el].shape, (3 * dimension + 3, 1)) # with a dict and empty dict dimension = 2 finite_elements = 5 theta_1 = create_constant_theta(1, dimension, finite_elements) theta_2 = {} res = join_thetas(theta_1, theta_2) self.assertEqual(len(res), finite_elements) for el in range(finite_elements): self.assertEqual(res[el].shape, (dimension, 1)) # with a dict and None dimension = 2 finite_elements = 5 theta_1 = create_constant_theta(1, dimension, finite_elements) theta_2 = None res = join_thetas(theta_1, theta_2) self.assertEqual(len(res), finite_elements) for el in range(finite_elements): self.assertEqual(res[el].shape, (dimension, 1)) # with a dict and None dimension = 2 finite_elements = 5 theta_1 = create_constant_theta(1, dimension, finite_elements) theta_2 = create_constant_theta(1, dimension, finite_elements + 5) self.assertRaises(ValueError, join_thetas, theta_1, theta_2)
def call_solver(self, initial_guess=None, p=None, theta=None, x_0=None, last_u=None, initial_guess_dict=None): if self.opt_problem is None: self.create_optimization_problem() if x_0 is None: x_0 = self.problem.x_0 if not vertcat(x_0).numel() == self.model.n_x: raise ValueError( 'Size of given x_0 (or obtained from problem.x_0) is different from model.n_x, ' 'x_0.numel() = {}, model.n_x = {}'.format( vertcat(x_0).numel(), self.model.n_x)) # parameters if p is None: if self.problem.n_p_opt == self.model.n_p: p = repmat(0, self.problem.n_p_opt) elif self.problem.model.n_p - 1 > 0: raise ValueError( "A parameter 'p' of size {} should be given.".format( self.problem.model.n_p)) else: p = [] # theta if theta is None: if self.problem.n_theta_opt == self.model.n_theta: theta = create_constant_theta(0, self.problem.n_theta_opt, self.finite_elements) elif self.problem.model.n_theta - self.n_relax * self.degree > 0: raise ValueError( "A parameter 'theta' of size {} should be given".format( self.problem.model.n_theta)) # last control if not self.last_control_as_parameter and last_u is not None: raise warnings.warn( 'solution_method.last_control_as_parameter is False, but last_u was passed. last_u will' ' be ignored.') else: if last_u is not None: if isinstance(last_u, list): last_u = vertcat(*last_u) elif self.problem.last_u is not None: last_u = self.problem.last_u # initialize variables it = 0 t_0 = time.time() while True: t1 = time.time() if self.nu_tilde is not None: self.nu = self.nu_tilde theta_k = self.join_nu_to_theta(theta, self.nu) p_k = vertcat(p, self.mu) # build the optimization parameter vector if theta_k is not None: theta_k_vector = vertcat(*theta_k.values()) par = vertcat(p_k, theta_k_vector) else: par = p_k # initial condition if self.initial_condition_as_parameter: par = vertcat(par, x_0) if last_u is not None: par = vertcat(par, last_u) elif self.problem.last_u is not None: par = vertcat(par, self.problem.last_u) if initial_guess_dict is None: args = dict(initial_guess=initial_guess, p=par) else: args = dict(initial_guess=initial_guess_dict['x'], p=par, lam_x=initial_guess_dict['lam_x'], lam_g=initial_guess_dict['lam_g']) # solve the optimization problem raw_solution_dict = self.opt_problem.solve(**args) initial_guess_dict = raw_solution_dict it += 1 # update parameters if not self.no_update_after_solving: error = self._compute_new_nu_and_error( p=p_k, theta=theta_k, raw_solution_dict=raw_solution_dict) self._update_mu() self.last_violation_error = error else: error = None if self.verbose >= 2: if it == 1: print('{} | {} | {}'.format('Iter.', ' Viol. Error', 'Sol. Time')) if error is not None: print('{:>5} | {:e} | {:>9.3f}'.format( it, float(error), time.time() - t1)) else: print('{:>5} | {} | {:>9.3f}'.format( it, 'Not computed', time.time() - t1)) # Exit condition: error < tol if error is not None and error < self.tol: if self.verbose: print( '=== Exiting: {} | Viol. Error: {} | Total time: {} ===' .format('Tolerance met', error, time.time() - t_0)) break # Exit condition: max_iter if it == self.max_iter: if self.verbose: print( '=== Exiting: {} | Viol. Error: {} | Total time: {} ===' .format('Max iteration reached', error, time.time() - t_0)) break return raw_solution_dict, p_k, theta_k, x_0, last_u
def __init__(self, problem, ocp_solver_class, solver_options=None, **kwargs): """ Augmented Lagrange Method (Aguiar 2016) :param yaocptool.modelling.OptimalControlProblem: Optimal Control Problem :param type ocp_solver_class: Class of Solution Method (Direct/Indirect Method) :param solver_options: Options for the Solution Method class given :param relax_algebraic_index: Index for the algebraic equations that will be relaxed, if not given all the algebraic equations will be relaxed :param relax_algebraic_var_index: Index for the algebraic variables that will be relaxed, if not given it will be assumed the same as the 'relax_algebraic_index' :param relax_state_bounds: This relax the states bounds and put then in the objective, via an algebraic variable :param kwargs: """ if solver_options is None: solver_options = {} self.degree = 3 self.degree_control = 3 self.n_relax = 0 self.mu_sym = None self.nu_sym = DM([]) self.nu_par = DM([]) self.nu_pol = DM([]) self.max_iter = 20 self.mu_0 = 1. self.beta = 4. self.mu_max = self.mu_0 * self.beta**10 self.nu = None self.nu_tilde = None self.last_violation_error = -1 self.alg_violation = None self.eq_violation = None self.new_nu_func = None self.tol = 1e-6 self.last_solution = () self.solver = None self.ocp_solver = None self.solver_initialized = False self.relax_algebraic_index = None self.relax_algebraic_var_index = None self.relax_time_equality_index = None self.relax_time_equality_var_index = None self.relax_state_bounds = False self.relaxed_alg = [] self.relaxed_eq = [] self.no_update_after_solving = False self.verbose = 1 self._debug_skip_parametrize = False self._debug_skip_initialize = False self._debug_skip_compute_nu_and_error = False self._debug_skip_update_nu = False self._debug_skip_update_mu = False super(AugmentedLagrangian, self).__init__(problem, **kwargs) self.mu = self.mu_0 # RELAXATION self.mu_sym = self.problem.create_parameter('mu') if self.relax_algebraic_index is None: self.relax_algebraic_index = range(self.model.n_y) if self.relax_algebraic_var_index is None: self.relax_algebraic_var_index = self.relax_algebraic_index if self.relax_time_equality_index is None: self.relax_time_equality_index = [] if self.relax_time_equality_var_index is None: self.relax_time_equality_var_index = [] if self.model.alg[self.relax_algebraic_index].numel() > 0: self._relax_algebraic_equations() if self.model.alg[self.relax_time_equality_index].numel() > 0: self._relax_time_equalities() if self.relax_state_bounds: self._relax_states_constraints() if not self._debug_skip_initialize: if not self._debug_skip_parametrize: self._parametrize_nu() if self.nu is None: self.nu = self.create_nu_initial_guess() if self.alg_violation is None: self.alg_violation = create_constant_theta( constant=0, dimension=len(self.relax_algebraic_index) * self.degree, finite_elements=self.finite_elements) if self.eq_violation is None: self.eq_violation = create_constant_theta( constant=0, dimension=len(self.relax_time_equality_index) * self.degree, finite_elements=self.finite_elements) # make sure that the ocp_solver and the augmented_lagrangian has the same options for attr in [ 'degree', 'finite_elements', 'degree_control', 'integrator_type' ]: if attr in solver_options and attr in kwargs and solver_options[ attr] != kwargs[attr]: exc_mess = "Trying to pass attribute '{}' for '{}' and '{}' that are not equal: {} != {}" raise Exception( exc_mess.format(attr, self.__class__.__name__, ocp_solver_class.__name__, kwargs[attr], solver_options[attr])) elif attr in solver_options: setattr(self, attr, solver_options[attr]) else: solver_options[attr] = getattr(self, attr) solver_options['integrator_type'] = self.integrator_type # Initialize OCP solver self.ocp_solver = ocp_solver_class(self.problem, **solver_options)
def create_nu_initial_guess(self): nu = create_constant_theta(constant=0, dimension=self.n_relax * self.degree, finite_elements=self.finite_elements) return nu
def call_solver(self, initial_guess=None, p=None, theta=None, x_0=None, last_u=None, initial_guess_dict=None): if self.opt_problem is None: self.create_optimization_problem() # initial conditions if x_0 is None: x_0 = self.problem.x_0 if isinstance(x_0, list): x_0 = vertcat(x_0) if not vertcat(x_0).numel() == self.model.n_x: raise Exception( 'Size of given x_0 (or obtained from problem.x_0) is different from model.n_x, ' 'x_0.numel() = {}, model.n_x = {}'.format( vertcat(x_0).numel(), self.model.n_x)) # parameters if p is None: if self.problem.n_p_opt == self.model.n_p: p = DM.zeros(self.problem.n_p_opt) elif self.problem.model.n_p > 0: raise Exception( "A parameter 'p' of size {} should be given".format( self.problem.model.n_p)) if isinstance(p, list): p = DM(p) # theta if theta is None: if self.problem.n_theta_opt == self.model.n_theta: theta = create_constant_theta(0, self.problem.n_theta_opt, self.finite_elements) elif self.problem.model.n_theta > 0: raise Exception( "A parameter 'theta' of size {} should be given".format( self.problem.model.n_theta)) # Prepare NLP parameter vector theta_vector, par_x_0, par_last_u = [], [], [] if theta is not None: theta_vector = vertcat( *[theta[i] for i in range(self.finite_elements)]) if self.initial_condition_as_parameter: par_x_0 = x_0 # last control if not self.last_control_as_parameter and last_u is not None: raise warnings.warn( 'solution_method.last_control_as_parameter is False, but last_u was passed. last_u will' ' be ignored.') else: if last_u is not None: if isinstance(last_u, list): last_u = vertcat(*last_u) elif self.problem.last_u is not None: last_u = self.problem.last_u elif self.last_control_as_parameter and last_u is None: raise Exception( 'last_control_as_parameter is True, but no "last_u" was passed and the "ocp.last_u" is ' 'None.') if self.last_control_as_parameter: par_last_u = last_u par = vertcat(p, theta_vector, par_x_0, par_last_u) if initial_guess_dict is None: if initial_guess is None: if self.initial_guess_heuristic == 'simulation': initial_guess = self.discretizer.create_initial_guess_with_simulation( p=p, theta=theta) elif self.initial_guess_heuristic == 'problem_info': initial_guess = self.discretizer.create_initial_guess( p, theta) else: raise ValueError( 'initial_guess_heuristic did not recognized, available options: "simulation" and ' '"problem_info". Given: {}'.format( self.initial_guess_heuristic)) args = dict(initial_guess=initial_guess, p=par) else: args = dict(initial_guess=initial_guess_dict['x'], p=par, lam_x=initial_guess_dict['lam_x'], lam_g=initial_guess_dict['lam_g']) sol = self.opt_problem.solve(**args) return sol, p, theta, x_0, last_u