def include_algebraic(self, var, alg=None, y_min=None, y_max=None, y_guess=None): if y_min is None: y_min = -DM.inf(var.numel()) if y_max is None: y_max = DM.inf(var.numel()) if isinstance(y_min, list): y_min = vertcat(*y_min) if isinstance(y_max, list): y_max = vertcat(*y_max) if not var.numel() == y_min.numel(): raise ValueError( "Given 'var' and 'y_min' does not have the same size, {}!={}". format(var.numel(), y_min.numel())) if not var.numel() == y_max.numel(): raise ValueError( "Given 'var' and 'y_max' does not have the same size, {}!={}". format(var.numel(), y_max.numel())) if self.y_guess is not None: if y_guess is None: self.y_guess = vertcat(self.y_guess, DM.zeros(var.numel())) else: self.y_guess = vertcat(self.y_guess, y_guess) self.model.include_algebraic(var, alg) self.y_min = vertcat(self.y_min, y_min) self.y_max = vertcat(self.y_max, y_max)
def set_theta_as_optimization_theta(self, new_theta_opt, new_theta_opt_min=None, new_theta_opt_max=None): if new_theta_opt_min is None: new_theta_opt_min = -DM.inf(new_theta_opt.numel()) if new_theta_opt_max is None: new_theta_opt_max = DM.inf(new_theta_opt.numel()) new_theta_opt = vertcat(new_theta_opt) new_theta_opt_min = vertcat(new_theta_opt_min) new_theta_opt_max = vertcat(new_theta_opt_max) if not new_theta_opt.numel() == new_theta_opt_max.numel(): raise ValueError( 'Size of "new_theta_opt" and "new_theta_opt_max" differ. new_theta_opt.numel()={} ' 'and new_theta_opt_max.numel()={}'.format( new_theta_opt.numel(), new_theta_opt_max.numel())) if not new_theta_opt.numel() == new_theta_opt_min.numel(): raise ValueError( 'Size of "new_theta_opt" and "new_theta_opt_max" differ. new_theta_opt.numel()={} ' 'and new_theta_opt_min.numel()={}'.format( new_theta_opt.numel(), new_theta_opt_min.numel())) self.theta_opt = vertcat(self.theta_opt, new_theta_opt) self.theta_opt_min = vertcat(self.theta_opt_min, new_theta_opt_min) self.theta_opt_max = vertcat(self.theta_opt_max, new_theta_opt_max) return new_theta_opt
def include_optimization_parameter(self, var, p_opt_min=None, p_opt_max=None): if p_opt_min is None: p_opt_min = -DM.inf(var.numel()) if p_opt_max is None: p_opt_max = DM.inf(var.numel()) self.model.include_parameter(var) self.set_parameter_as_optimization_parameter(var, p_opt_min, p_opt_max)
def include_optimization_theta(self, var, theta_opt_min=None, theta_opt_max=None): if theta_opt_min is None: theta_opt_min = -DM.inf(var.numel()) if theta_opt_max is None: theta_opt_max = DM.inf(var.numel()) self.model.include_theta(var) self.set_theta_as_optimization_theta(var, new_theta_opt_min=theta_opt_min, new_theta_opt_max=theta_opt_max)
def include_state(self, var, ode=None, x_0=None, x_min=None, x_max=None, h_initial=None, x_0_sym=None, suppress=False): if x_0 is not None: x_0 = vertcat(x_0) if x_min is None: x_min = -DM.inf(var.numel()) if x_max is None: x_max = DM.inf(var.numel()) if x_0 is None and h_initial is None and not suppress: raise Exception('No initial condition given') var = vertcat(var) x_min = vertcat(x_min) x_max = vertcat(x_max) if not var.numel() == x_max.numel(): raise ValueError('Size of "x" and "x_max" differ. x.numel()={} ' 'and x_max.numel()={}'.format( var.numel(), x_max.numel())) if not var.numel() == x_min.numel(): raise ValueError('Size of "x" and "x_min" differ. x.numel()={} ' 'and x_min.numel()={}'.format( var.numel(), x_min.numel())) if not var.numel() == x_min.numel(): raise ValueError('Size of "x" and "x_0" differ. x.numel()={} ' 'and x_0.numel()={}'.format( var.numel(), x_0.numel())) x_0_sym = self.model.include_state(var, ode, x_0_sym) if x_0 is not None: self.x_0 = vertcat(self.x_0, x_0) h_initial = x_0_sym - var else: x_0 = DM.zeros(var.shape) self.x_0 = vertcat(self.x_0, x_0) if h_initial is not None: self.h_initial = vertcat(self.h_initial, h_initial) self.x_min = vertcat(self.x_min, x_min) self.x_max = vertcat(self.x_max, x_max)
def include_control(self, var, u_min=None, u_max=None, delta_u_min=None, delta_u_max=None, u_guess=None): if isinstance(var, list): var = vertcat(*var) # if not given if u_min is None: u_min = -DM.inf(var.numel()) if u_max is None: u_max = DM.inf(var.numel()) if delta_u_min is None: delta_u_min = -DM.inf(var.numel()) if delta_u_max is None: delta_u_max = DM.inf(var.numel()) if u_guess is None and self.u_guess is not None: raise ValueError( 'The OptimalControlProblem already has a control guess ("ocp.u_guess"), but no guess was ' 'passed for the new variable (parameter "u_guess" is None). Either remove all guesses ' '("ocp.u_guess = None") before including the new variable, or pass a guess for the control' ' using the parameter "u_guess"') # if given as a list if isinstance(u_min, list): u_min = vertcat(*u_min) if isinstance(u_max, list): u_max = vertcat(*u_max) if isinstance(delta_u_min, list): delta_u_min = vertcat(*delta_u_min) if isinstance(delta_u_max, list): delta_u_max = vertcat(*delta_u_max) if u_guess is not None and isinstance(u_guess, list): u_guess = vertcat(*u_guess) # if not a casadi type u_min = vertcat(u_min) u_max = vertcat(u_max) delta_u_min = vertcat(delta_u_min) delta_u_max = vertcat(delta_u_max) if u_guess is not None: u_guess = vertcat(u_guess) # if passed a scalar but meant a vector of that scalar if u_min.numel() == 1 and var.numel() > 1: u_min = repmat(u_min, var.numel()) if u_max.numel() == 1 and var.numel() > 1: u_max = repmat(u_max, var.numel()) if delta_u_min.numel() == 1 and var.numel() > 1: delta_u_min = repmat(delta_u_min, var.numel()) if delta_u_max.numel() == 1 and var.numel() > 1: delta_u_max = repmat(delta_u_max, var.numel()) # if passed but has a wrong size if not var.numel() == u_min.numel(): raise ValueError( "Given 'var' and 'u_min' does not have the same size, {}!={}". format(var.numel(), u_min.numel())) if not var.numel() == u_max.numel(): raise ValueError( "Given 'var' and 'u_max' does not have the same size, {}!={}". format(var.numel(), u_max.numel())) if not var.numel() == delta_u_min.numel(): raise ValueError( "Given 'var' and 'delta_u_min' does not have the same size, {}!={}" .format(var.numel(), delta_u_min.numel())) if not var.numel() == delta_u_max.numel(): raise ValueError( "Given 'var' and 'delta_u_max' does not have the same size, {}!={}" .format(var.numel(), delta_u_max.numel())) if u_guess is not None and var.numel() != u_guess.numel(): raise ValueError( "Given 'var' and 'u_guess' does not have the same size, {}!={}" .format(var.numel(), u_guess.numel())) self.u_min = vertcat(self.u_min, u_min) self.u_max = vertcat(self.u_max, u_max) self.delta_u_min = vertcat(self.delta_u_min, delta_u_min) self.delta_u_max = vertcat(self.delta_u_max, delta_u_max) if u_guess is not None: if self.model.n_u == 0: self.u_guess = u_guess else: self.u_guess = vertcat(self.u_guess, u_guess) self.model.include_control(var)
def include_inequality(self, expr, lb=None, ub=None): """ Include inequality to the problem with the following form lb <= expr <= ub :param expr: expression for the inequality, this is the only term that should contain symbolic variables :param lb: Lower bound of the inequality. If the 'expr' size is greater than one but a scalar is passed as lower bound, a vector of lb with size of 'expr' will be used as a lower bound. (default = [-inf]*size) :param ub: Upper bound of the inequality. If the 'expr' size is greater than one but a scalar is passed as upper bound, a vector of ub with size of 'expr' will be used as a upper bound. (default = [inf]*size) """ # check expr if isinstance(expr, list): expr = vertcat(expr) if expr.size2() > 1: raise Exception( "Given expression is not a vector, number of columns is {}". format(expr.size2())) # check lower bound if lb is None: lb = -DM.inf(expr.size1()) else: lb = vertcat(lb) if lb.numel() == 1 and expr.numel() > 1: lb = repmat(lb, expr.numel()) # check lb correct size if not expr.shape == lb.shape: raise ValueError( "Expression and lower bound does not have the same size: " "expr.shape={}, lb.shape=={}".format(expr.shape, lb.shape)) # check upper bound if ub is None: ub = DM.inf(expr.size1()) else: ub = vertcat(ub) if ub.numel() == 1 and expr.numel() > 1: ub = repmat(ub, expr.numel()) # check ub correct size if not expr.shape == ub.shape: raise ValueError( "Expression and lower bound does not have the same size: " "expr.shape={}, lb.shape=={}".format(expr.shape, ub.shape)) # check for if lb or ub have 'x's and 'p's if depends_on(vertcat(lb, ub), vertcat(self.x, self.p)): raise ValueError( "The lower and upper bound cannot contain variables from the optimization problem." "LB: {}, UB: {}".format(lb, ub)) for i in range(expr.numel()): if lb.is_constant() and ub.is_constant(): if lb[i] > ub[i]: raise ValueError( 'Lower bound is greater than upper bound for index {}. ' 'The inequality {} <= {} <= is infeasible'.format( i, lb[i], expr[i], ub[i])) self.g = vertcat(self.g, expr) self.g_lb = vertcat(self.g_lb, lb) self.g_ub = vertcat(self.g_ub, ub)