def proc_model(self): f = c.Function('f',[self.x,self.u],[self.x + self.u*self.dt]) A = c.Function('A',[self.x,self.u],[c.jacobian(f(self.x,self.u),self.x)]) #linearization B = c.Function('B',[self.x,self.u],[c.jacobian(f(self.x,self.u),self.u)]) return f,A, B
def set_discrete_time_system(self): """ Set discrete-time system matrices from linear continuous dynamics. """ # Check for integrator definition if self.Integrator_lin is None: print("Integrator_lin not defined. Set integrators first.") exit() # Set CasADi variables x = ca.MX.sym('x', 4) u = ca.MX.sym('u', 1) w = ca.MX.sym('w', 1) # Jacobian of exact discretization self.Ad = ca.Function('jac_x_Ad', [x, u, w], [ca.jacobian( self.Integrator_lin(x0=x, p=ca.vertcat(u, w))['xf'], x)]) self.Bd = ca.Function('jac_u_Bd', [x, u, w], [ca.jacobian( self.Integrator_lin(x0=x, p=ca.vertcat(u, w))['xf'], u)]) self.Bw = ca.Function('jac_u_Bd', [x, u, w], [ca.jacobian( self.Integrator_lin(x0=x, p=ca.vertcat(u, w))['xf'], w)]) # C matrix does not depend on the state # TODO: put this in a better place later! Cd_eq = ca.DM.zeros(1,4) Cd_eq[0,0] = 1 Cd_eq[0,1] = 1 Cd_eq[0,2] = 1 Cd_eq[0,3] = 1 self.Cd_eq = Cd_eq
def writeObjective(ocp, out0, exportName): dae = ocp.dae # first make out not a function of xDot or z inputs0 = [dae.xDotVec(), dae.xVec(), dae.zVec(), dae.uVec(), dae.pVec()] outputFun0 = C.SXFunction(inputs0, [out0]) (xDotDict, zDict) = dae.solveForXDotAndZ() xDot = C.veccat([xDotDict[name] for name in dae.xNames()]) z = C.veccat([zDict[name] for name in dae.zNames()]) # plug in xdot, z solution to outputs fun outputFun0.init() [out] = outputFun0.eval([xDot, dae.xVec(), z, dae.uVec(), dae.pVec()]) # make sure each element in the output is only a function of x or u, not both testSeparation(dae,out,exportName) # make new SXFunction that is only fcn of [x, u, p] if exportName == 'lsqExtern': inputs = C.veccat([dae.xVec(), dae.uVec(), dae.pVec()]) outs = C.veccat( [ out, C.jacobian(out,dae.xVec()).T, C.jacobian(out,dae.uVec()).T ] ) outputFun = C.SXFunction([inputs], [C.densify(outs)]) outputFun.init() assert len(outputFun.getFree()) == 0, 'the "impossible" happened >_<' elif exportName == 'lsqEndTermExtern': inputs = C.veccat([dae.xVec(), dae.pVec()]) outs = C.veccat( [ out, C.jacobian(out,dae.xVec()).T ] ) outputFun = C.SXFunction([inputs], [C.densify(outs)]) outputFun.init() assert len(outputFun.getFree()) == 0, 'lsqEndTermExtern cannot be a function of controls u, saw: '+str(outputFun.getFree()) else: raise Exception('unrecognized name "'+exportName+'"') return codegen.writeCCode(outputFun,exportName)
def set_discrete_time_system(self): """ Set discrete-time system matrices from linear continuous dynamics. """ # Check for integrator definition if self.Integrator_lin is None: print("Integrator_lin not defined. Set integrators first.") exit() # Set CasADi variables x = ca.MX.sym('x', 4) u = ca.MX.sym('u', 1) w = ca.MX.sym('w', 1) # Jacobian of exact discretization self.Ad = ca.Function('jac_x_Ad', [x, u, w], [ ca.jacobian( self.Integrator_lin(x0=x, p=ca.vertcat(u, w))['xf'], x) ]) self.Bd = ca.Function('jac_u_Bd', [x, u, w], [ ca.jacobian( self.Integrator_lin(x0=x, p=ca.vertcat(u, w))['xf'], u) ]) self.Bw = ca.Function('jac_u_Bd', [x, u, w], [ ca.jacobian( self.Integrator_lin(x0=x, p=ca.vertcat(u, w))['xf'], w) ])
def __symbolic_setup(self): # build composite symbolic expression self._calc_lengths() expr = cas.MX.nan(2, 1) for i in range(len(self._segments) - 1, -1, -1): s_max = self._fractions[i] s_min = self._fractions[i - 1] if i > 0 else 0 s_loc = (self._s - s_min) / self._lengths[i] expr = cas.if_else(self._s <= s_max, self._segments[i].point(s_loc), expr) self._expr = expr self._point = cas.Function('point', [self._s], [self._expr]) dp_ds = cas.jacobian(self._expr, self._s) # unit tangent vector self._tangent_expr = cas.if_else( cas.norm_2(dp_ds) > 0, dp_ds / cas.norm_2(dp_ds), cas.DM([0, 0])) self._tangent = cas.Function('tangent', [self._s], [self._tangent_expr]) dt_ds = cas.jacobian(self._tangent_expr, self._s) # unit normal vector self._normal_expr = cas.if_else( cas.norm_2(dt_ds) > 0, dt_ds / cas.norm_2(dt_ds), cas.vertcat(self._tangent_expr[1], -self._tangent_expr[0])) self._normal = cas.Function('normal', [self._s], [self._normal_expr]) # curvature value self._curvature_expr = cas.if_else( cas.norm_2(dp_ds) > 0, cas.norm_2(dt_ds) / cas.norm_2(dp_ds), cas.DM(0)) self._curvature = cas.Function('curvature', [self._s], [self._curvature_expr])
def linearize(x0, u0, p0): """ A function to perform linearizatoin of the f16 model @param x0: state @param u0: input @param p0: parameters """ x0 = x0.to_casadi() u0 = u0.to_casadi() # Plot the compensated openloop bode plot x_sym = ca.MX.sym('x', x0.shape[0]) u_sym = ca.MX.sym('u', u0.shape[0]) x = State.from_casadi(x_sym) u = Control.from_casadi(u_sym) dx = dynamics(x, u, p0) A = ca.jacobian(dx.to_casadi(), x_sym) B = ca.jacobian(dx.to_casadi(), u_sym) f_A = ca.Function('A', [x_sym, u_sym], [A]) f_B = ca.Function('B', [x_sym, u_sym], [B]) A = f_A(x0, u0) B = f_B(x0, u0) n = A.shape[0] p = B.shape[1] C = np.eye(n) D = np.zeros((n, p)) return StateSpace(A=A, B=B, C=C, D=D, x=[f.name for f in x.fields()], u=[f.name for f in u.fields()], y=[f.name for f in x.fields()])
def PMSM_dynamics_disc(w, Ts, par): # parameters p = par.p # import pdb; pdb.set_trace() theta = par.theta Rs = par.Rs Ld = par.Ld Lq = par.Lq psi_pm = par.psi_pm m_load = par.m_load u_max = par.u_max J_par = par.J i_d = ca.MX.sym('i_d', 1, 1) i_q = ca.MX.sym('i_q', 1, 1) u_d = ca.MX.sym('u_d', 1, 1) u_q = ca.MX.sym('u_q', 1, 1) x = ca.vertcat(i_d, i_q) u = ca.vertcat(u_d, u_q) xdot = PMSM_dynamics_cont(i_d, i_q, u_d, u_q, par, w) # fixed step Runge-Kutta 4 integrator M = 100 # RK4 steps per interval DT = Ts / M f = ca.Function('f', [x, u], [xdot]) X0 = ca.MX.sym('X0', 2, 1) U = ca.MX.sym('U', 2, 1) X = X0 for j in range(M): k1 = f(X, U) k2 = f(X + DT / 2.0 * k1, U) k3 = f(X + DT / 2.0 * k2, U) k4 = f(X + DT * k3, U) X = X + DT / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4) # x_{k+1} = Ax_k + Bu_k A_exp = ca.jacobian(X, X0) B_exp = ca.jacobian(X, U) A_fun = ca.Function('A_fun', [X0, U], [A_exp]) B_fun = ca.Function('B_fun', [X0, U], [B_exp]) x_plus = ca.Function('x_plus', [X0, U], [X]) # A_exp_c = ca.jacobian(xdot, x) # B_exp_c = ca.jacobian(xdot, u) # A_fun_c = ca.Function('A_fun', [x, u], [A_exp_c]) # B_fun_c = ca.Function('B_fun', [x, u], [B_exp_c]) # A_c = A_fun_c(np.zeros((2,1)), np.zeros((2,1))) # B_c = B_fun_c(np.zeros((2,1)), np.zeros((2,1))) # Ad, Bd, Cd, Dd, dt = sp.signal.cont2discrete((A_c.full(),B_c.full(), \ # np.zeros(2), np.zeros(2)), dt = Ts) A = A_fun(np.zeros((2, 1)), np.zeros((2, 1))) B = B_fun(np.zeros((2, 1)), np.zeros((2, 1))) c = x_plus(np.zeros((2, 1)), np.zeros((2, 1))).full() return A, B, c, x_plus
def __construct_sensitivities(self): """ Construct NLP sensitivities """ # convenience w = self.__w p = self.__p # cost function self.__f_fun = ca.Function('f_fun', [w, p], [self.__f]) self.__jacf_fun = ca.Function('jacf_fun', [w, p], [ca.jacobian(self.__f, self.__w)]) # constraints self.__g_fun = ca.Function('g_fun', [w, p], [self.__g]) self.__jacg_fun = ca.Function('jacg_fun', [w, p], [ca.jacobian(self.__g, self.__w)]) self.__gzeros = np.zeros((self.__g.shape[0], 1)) # exact hessian lam_g = ca.MX.sym('lam_g', self.__g.shape) lag = self.__f + ct.mtimes(lam_g.T, self.__g) self.__jlag_fun = ca.Function('jLag', [w, p, lam_g], [ca.jacobian(lag, w)]) if self.__options['hessian_approximation'] == 'exact': self.__H_fun = ca.Function('H_fun', [w, p, lam_g], [ca.hessian(lag, w)[0]]) else: self.__H_fun = self.__options['hessian_approximation']
def proc_model(self): # f = c.Function('f',[self.x,self.u],[self.x[0] + self.u[0]*c.cos(self.x[2])*self.dt, # self.x[1] + self.u[0]*c.sin(self.x[2])*self.dt, # self.x[2] + self.u[0]*c.tan(self.x[3])*self.dt/self.length, # self.x[3] + self.u[1]*self.dt]) g = c.MX(self.nx,self.nu) g[0,0] = c.cos(self.x[2]); g[0,1] = 0; g[1,0] = c.sin(self.x[2]); g[1,1] = 0; g[2,0] = c.tan(self.x[3])/self.length; g[2,1] = 0 g[3,0] = 0; g[3,1] = 1; # f = c.Function('f',[self.x,self.u],[self.x[0] + self.u[0]*c.cos(self.x[2])*self.dt, # self.x[1] + self.u[0]*c.sin(self.x[2])*self.dt, # self.x[2] + self.u[0]*c.tan(self.x[3])*self.dt/self.length, # self.x[3] + self.u[1]*self.dt]) f = c.Function('f',[self.x,self.u],[self.x + c.mtimes(g,self.u)*self.dt]) # A = c.Function('A',[self.x,self.u],[c.jacobian(f(self.x,self.u)[0],self.x), # c.jacobian(f(self.x,self.u)[1],self.x), # c.jacobian(f(self.x,self.u)[2],self.x), # c.jacobian(f(self.x,self.u)[3],self.x)]) #linearization # B = c.Function('B',[self.x,self.u],[c.jacobian(f(self.x,self.u)[0],self.u), # c.jacobian(f(self.x,self.u)[1],self.u), # c.jacobian(f(self.x,self.u)[2],self.u), # c.jacobian(f(self.x,self.u)[3],self.u)]) A = c.Function('A',[self.x,self.u],[c.jacobian(f(self.x,self.u),self.x)]) B = c.Function('B',[self.x,self.u],[c.jacobian(f(self.x,self.u),self.u)]) return f,A, B
def proc_model(self): g1 = c.MX.zeros(self.nx,self.nx) #assigning zeros # for i in range(self.nx): # for j in range(self.nx): # g1[i,j] = 0 g1[0,3] = 1; g1[1,4] = 1; g1[2,5] = 1; g1[6,9] = 1; g1[6,10] = c.sin(self.x[6])*c.tan(self.x[7]); g1[6,11] = c.cos(self.x[6])*c.tan(self.x[7]); g1[7,10] = c.cos(self.x[6]); g1[7,11] = -c.sin(self.x[6]); g1[8,10] = c.sin(self.x[6])/c.cos(self.x[7]); g1[8,11] = c.cos(self.x[6])/c.cos(self.x[7]); g2 = c.MX.zeros(self.nx,self.nu) g2[3,0] = (c.cos(self.x[8])*c.sin(self.x[7])*c.cos(self.x[6]) + c.sin(self.x[8])*c.sin(self.x[6]))/self.m g2[4,0] = (c.sin(self.x[8])*c.sin(self.x[7])*c.cos(self.x[6]) - c.cos(self.x[8])*c.sin(self.x[6]))/self.m g2[5,0] = c.cos(self.x[7])*c.cos(self.x[6])/self.m g2[9:,1:] = c.inv(self.Ic) g3 = c.MX.zeros(self.nx,1) f = c.Function('f',[self.x,self.u],[self.x + c.mtimes(g1,self.x)*self.dt + c.mtimes(g2,self.u)*self.dt - g3*self.g*self.dt]) A = c.Function('A',[self.x,self.u],[c.jacobian(f(self.x,self.u),self.x)]) B = c.Function('B',[self.x,self.u],[c.jacobian(f(self.x,self.u),self.u)]) return f,A, B
def get_lqr_backward_func(f, x, u, S, Cs, Cu): A = cs.jacobian(f(x, u), x) B = cs.jacobian(f(x, u), u) L = cs.mldivide(Cu + (B.T) @ S @ B, (B.T) @ S @ A) S_prev = (A.T) @ S @ A - (A.T) @ S @ B @ L + Cs S_prev = (S_prev + S_prev.T) / 2 lqr_backward = cs.Function('lqr_backward', [x, u, S], [L, S_prev], ['x', 'u', 'S'], ['L', 'S_prev']) return lqr_backward
def variable_metadata_function(self): in_var = ca.veccat(*self._symbols(self.parameters)) out = [] is_affine = True zero, one = ca.MX(0), ca.MX( 1) # Recycle these common nodes as much as possible. for variable_list in [ self.states, self.alg_states, self.inputs, self.parameters, self.constants ]: attribute_lists = [[] for i in range(len(ast.Symbol.ATTRIBUTES))] for variable in variable_list: for attribute_list_index, attribute in enumerate( ast.Symbol.ATTRIBUTES): value = ca.MX(getattr(variable, attribute)) if value.is_zero(): value = zero elif value.is_one(): value = one value = value if value.numel() != 1 else ca.repmat( value, *variable.symbol.size()) attribute_lists[attribute_list_index].append(value) expr = ca.horzcat(*[ ca.veccat(*attribute_list) for attribute_list in attribute_lists ]) if len(self.parameters) > 0 and isinstance(expr, ca.MX): f = ca.Function('f', [in_var], [expr]) contains_if_else = ca.OP_IF_ELSE_ZERO in [ f.instruction_id(k) for k in range(f.n_instructions()) ] zero_hessian = ca.jacobian(ca.jacobian(expr, in_var), in_var).is_zero() if contains_if_else or not zero_hessian: is_affine = False out.append(expr) if len(self.parameters) > 0 and is_affine: # Rebuild variable metadata as a single affine expression, if all # subexpressions are affine. in_var_ = ca.MX.sym('in_var', in_var.shape) out_ = [] for o in out: Af = ca.Function('Af', [in_var], [ca.jacobian(o, in_var)]) bf = ca.Function('bf', [in_var], [o]) A = Af(0) A = ca.sparsify(A) b = bf(0) b = ca.sparsify(b) o_ = ca.reshape(ca.mtimes(A, in_var_), o.shape) + b out_.append(o_) out = out_ in_var = in_var_ return ca.Function('variable_metadata', [in_var], out)
def generateCModel(dae,ag): writer = AlgorithmWriter() inputs = C.veccat([ag['x'], ag['z'], ag['u'], ag['p'], ag['xdot']]) # dae residual f = ag['f'] rhs = C.SXFunction( [inputs], [f] ) rhs.init() # handle time scaling [f] = rhs.eval([C.veccat([ag['x'], ag['z'], ag['u'], ag['p'], ag['xdot']/ag['timeScaling']])]) rhs = C.SXFunction( [inputs], [f] ) rhs.init() rhs_string = [writer.writePrototype('rhs')] rhs_string.extend(writer.convertAlgorithm(rhs)) rhs_string.append('}') # dae residual jacobian jf = C.veccat( [ C.jacobian(f,inputs).T ] ) rhs_jacob = C.SXFunction( [inputs], [jf] ) rhs_jacob.init() rhs_jacob_string = [writer.writePrototype('rhs_jac')] rhs_jacob_string.extend(writer.convertAlgorithm(rhs_jacob)) rhs_jacob_string.append('}') # outputs o = C.veccat( [dae[outname] for outname in dae.outputNames()] ) outputs = C.SXFunction( [inputs], [o] ) outputs.init() outputs_string = [writer.writePrototype('out')] outputs_string.extend(writer.convertAlgorithm(outputs)) outputs_string.append('}') # outputs jacobian jo = C.veccat( [ C.jacobian(o,inputs).T ] ) outputs_jacob = C.SXFunction( [inputs], [jo] ) outputs_jacob.init() outputs_jacob_string = [writer.writePrototype('out_jac')] outputs_jacob_string.extend(writer.convertAlgorithm(outputs_jacob)) outputs_jacob_string.append('}') # model file modelFile = ['#include "acado.h"'] modelFile.append('') modelFile.extend(rhs_string) modelFile.append('') modelFile.extend(rhs_jacob_string) modelFile.append('') modelFile.append('') modelFile.extend(outputs_string) modelFile.append('') modelFile.extend(outputs_jacob_string) return {'modelFile':'\n'.join(modelFile), 'rhs':rhs, 'rhsJacob':rhs_jacob}
def variable_metadata_function(self): in_var = ca.veccat(*self._symbols(self.parameters)) out = [] is_affine = True zero, one = ca.MX(0), ca.MX(1) # Recycle these common nodes as much as possible. for variable_list in [self.states, self.alg_states, self.inputs, self.parameters, self.constants]: attribute_lists = [[] for i in range(len(CASADI_ATTRIBUTES))] for variable in variable_list: for attribute_list_index, attribute in enumerate(CASADI_ATTRIBUTES): value = ca.MX(getattr(variable, attribute)) if value.is_zero(): value = zero elif value.is_one(): value = one value = value if value.numel() != 1 else ca.repmat(value, *variable.symbol.size()) attribute_lists[attribute_list_index].append(value) expr = ca.horzcat(*[ca.veccat(*attribute_list) for attribute_list in attribute_lists]) if len(self.parameters) > 0 and isinstance(expr, ca.MX): f = ca.Function('f', [in_var], [expr]) # NOTE: This is not a complete list of operations that can be # handled in an affine expression. That said, it should # capture the most common ways variable attributes are # expressed as a function of parameters. allowed_ops = {ca.OP_INPUT, ca.OP_OUTPUT, ca.OP_CONST, ca.OP_SUB, ca.OP_ADD, ca.OP_SUB, ca.OP_MUL, ca.OP_DIV, ca.OP_NEG} f_ops = {f.instruction_id(k) for k in range(f.n_instructions())} contains_unallowed_ops = not f_ops.issubset(allowed_ops) zero_hessian = ca.jacobian(ca.jacobian(expr, in_var), in_var).is_zero() if contains_unallowed_ops or not zero_hessian: is_affine = False out.append(expr) if len(self.parameters) > 0 and is_affine: # Rebuild variable metadata as a single affine expression, if all # subexpressions are affine. in_var_ = ca.MX.sym('in_var', in_var.shape) out_ = [] for o in out: Af = ca.Function('Af', [in_var], [ca.jacobian(o, in_var)]) bf = ca.Function('bf', [in_var], [o]) A = Af(0) A = ca.sparsify(A) b = bf(0) b = ca.sparsify(b) o_ = ca.reshape(ca.mtimes(A, in_var_), o.shape) + b out_.append(o_) out = out_ in_var = in_var_ return self._expand_mx_func(ca.Function('variable_metadata', [in_var], out))
def get_cov_trans_func(f, h, x, x_new, u, p, Q, R): P = cs.reshape(p, 11, 11) A = cs.jacobian(f(x, u), x) H = cs.jacobian(h(x_new, u), x_new) P_pred = A @ P @ (A.T) + Q S = H @ P_pred @ (H.T) + R K = cs.mrdivide(P_pred @ (H.T), S) P_updated = (cs.MX.eye(11) - K @ H) @ P_pred P_updated = (P_updated + P_updated.T) / 2 p_updated = cs.vec(P_updated) cov_trans = cs.Function('cov_trans', [x, x_new, u, p], [p_updated], ['x', 'x_new', 'u', 'p'], ['p_updated']) return cov_trans
def test_jacobian(): x_sym = ca.MX.sym('x', 16) u_sym = ca.MX.sym('u', 4) x = f16.State.from_casadi(x_sym) u = f16.Control.from_casadi(u_sym) p = f16.Parameters() dx = f16.dynamics(x, u, p) A = ca.jacobian(dx.to_casadi(), x_sym) B = ca.jacobian(dx.to_casadi(), u_sym) f_A = ca.Function('A', [x_sym, u_sym], [A]) f_B = ca.Function('B', [x_sym, u_sym], [B]) print('A', f_A(np.ones(16), np.ones(4))) print('B', f_B(np.ones(16), np.ones(4)))
def _check_for_lineq(self): g = [] for con in self.global_constraints: lb, ub = con[1], con[2] g = vertcat(g, con[0] - lb) if not isinstance(lb, np.ndarray): lb, ub = [lb], [ub] for k, _ in enumerate(lb): if lb[k] != ub[k]: return False, None, None sym, jac = [], [] for child, q_i in self.q_i.items(): for name, ind in q_i.items(): var = self.distr_problem.father.get_variables(child, name, spline=False, symbolic=True, substitute=False) jj = jacobian(g, var) jac = horzcat(jac, jj[:, ind]) sym.append(var) for nghb in self.q_ij.keys(): for child, q_ij in self.q_ij[nghb].items(): for name, ind in q_ij.items(): var = self.distr_problem.father.get_variables( child, name, spline=False, symbolic=True, substitute=False) jj = jacobian(g, var) jac = horzcat(jac, jj[:, ind]) sym.append(var) for sym in symvar(jac): if sym not in self.par_global.values(): return False, None, None par = struct_symMX(self.par_global_struct) A, b = jac, -g for s in sym: A = substitute(A, s, np.zeros(s.shape)) b = substitute(b, s, np.zeros(s.shape)) dep_b = [s.name() for s in symvar(b)] dep_A = [s.name() for s in symvar(b)] for name, sym in self.par_global.items(): if sym.name() in dep_b: b = substitute(b, sym, par[name]) if sym.name() in dep_A: A = substitute(A, sym, par[name]) A = Function('A', [par], [A]).expand() b = Function('b', [par], [b]).expand() return True, A, b
def linearize(): eqs = rocket_equations() x = eqs['x'] u = eqs['u'] p = eqs['p'] y = x # state feedback rhs = eqs['rhs'] xdot = rhs(x, u, p) A = ca.jacobian(xdot, x) B = ca.jacobian(xdot, u) C = ca.jacobian(y, x) D = ca.jacobian(y, u) return ca.Function('ss', [x, u, p], [A, B, C, D], ['x', 'u', 'p'], ['A', 'B', 'C', 'D'])
def buildAutomaticDifferentiationTree(self): # Define variables n = self.n d = self.d X = SX.sym('X', n) U = SX.sym('U', d) X_next = self.dynamics(X, U) self.constraint = [] for i in range(0, n): self.constraint = vertcat(self.constraint, X_next[i]) self.A_Eval = Function('A', [X, U], [jacobian(self.constraint, X)]) self.B_Eval = Function('B', [X, U], [jacobian(self.constraint, U)]) self.f_Eval = Function('f', [X, U], [self.constraint])
def set_path(self, path, r2r=False): """Define an analytic expression of the geometric path. Note: The path must be defined as a function of self.s[0]. Args: path (list of SXMatrix): An expression of the geometric path as a function of self.s[0]. Its dimension must equal self.sys.ny r2r (boolean): Reparameterize path such that a rest to rest transition is performed Example: >>> S = FlatSystem(2, 4) >>> P = PathFollowing(S) >>> P.set_path([P.s[0], P.s[0]]) """ if isinstance(path, list): path = cas.vertcat(path) if r2r: path = cas.substitute(path, self.s[0], self._r2r()) self.path[:, 0] = path dot_s = cas.vertcat([self.s[1:], 0]) for i in range(1, self.sys.order + 1): self.path[:, i] = cas.mul(cas.jacobian(self.path[:, i - 1], self.s), dot_s)
def getScalarDerivative(f, nargs=1, wrt=(0, ), vectorize=True): """ Returns a function that gives the derivative of the function scalar f. f must be a function that takes nargs scalar entries and returns a single scalar. Derivatives are taken with respect to the variables specified in wrt, which must be a tuple of integers. E.g., to take a second derivative with respect to the first argument, specify wrt=(0,0). vectorize is a boolean flag to determine whether or not the function should be wrapped with numpy's vectorize. Note that vectorized functions do not play well with Casadi symbolics, so set vectorize=False if you wish to use the function later on with Casadi symbolics. """ x = [casadi.SX.sym("x" + str(n)) for n in range(nargs)] dfdx_expression = f(*x) for i in wrt: dfdx_expression = casadi.jacobian(dfdx_expression, x[i]) dfcasadi = casadi.Function("dfdx", x, [dfdx_expression]) def dfdx(*x): return dfcasadi(*x) if len(wrt) > 1: funcstr = "d^%df/%s" % (len(wrt), "".join(["x%d" % (i, ) for i in wrt])) else: funcstr = "df/dx" dfdx.__doc__ = "\n%s = %s" % (funcstr, repr(dfdx_expression)) if vectorize: ret = np.vectorize(dfdx, otypes=[np.float]) else: ret = dfdx return ret
def jacobian(self, var): """Returns the partial derivative of the expression with respect to var. Return: cs.MX: expression of partial derivative """ return cs.jacobian(self.expression, var)
def create_function_f_J(self): """Jacobian for state integration""" return ca.Function( 'J', [self.t, self.x, self.y, self.m, self.p, self.c, self.ng, self.nu], [ca.jacobian(self.f_x_rhs, self.x)], ['t', 'x', 'y', 'm', 'p', 'c', 'ng', 'nu'], ['J'], self.func_opt)
def getDiscreteLinearSystem(self, ode, xLin, uLin, samplingTime): if ~hasattr(self, 'dfdx') or ~hasattr(self, 'dfdu'): NX = xLin.size NU = uLin.size x = ca.SX.sym('x', NX) u = ca.SX.sym('u', NU) self.dfdx = np.array( ca.Function('dfdx', [x, u], [ca.jacobian(ode(x, u), x)])(xLin, uLin)) self.dfdu = np.array( ca.Function('dfdu', [x, u], [ca.jacobian(ode(x, u), u)])(xLin, uLin)) A = np.eye(NX) + self.dfdx * samplingTime B = self.dfdu * samplingTime return (A, B)
def set_path(self, path, r2r=False): """Define an analytic expression of the geometric path. Note: The path must be defined as a function of self.s[0]. Args: path (list of SXMatrix): An expression of the geometric path as a function of self.s[0]. Its dimension must equal self.sys.ny r2r (boolean): Reparameterize path such that a rest to rest transition is performed Example: >>> S = FlatSystem(2, 4) >>> P = PathFollowing(S) >>> P.set_path([P.s[0], P.s[0]]) """ if isinstance(path, list): path = cas.vertcat(path) if r2r: path = cas.substitute(path, self.s[0], self._r2r()) self.path[:, 0] = path dot_s = cas.vertcat([self.s[1:], 0]) for i in range(1, self.sys.order + 1): self.path[:, i] = cas.mul( cas.jacobian(self.path[:, i - 1], self.s), dot_s)
def __init__(self,name,syms,expression): # info self.name = name self.sizes = (syms[0].numel(),expression.numel(),sum([s.numel() for s in syms[1:]])) # use a single parameter for the expression (because julia sucks sometimes) self.param_sym = oc.MX.sym("P",self.sizes[2]) self.param_names = [s.name() for s in syms[1:]] expression_ = cs.substitute(expression,cs.vcat(syms[1:]),self.param_sym) # expression evaluation self.eval = cs.Function('eval',[syms[0],self.param_sym],[expression_]).expand() # collect the sx version of the symbols sx_syms = self.eval.sx_in() self.main_sym = sx_syms[0] # expression jacobian jacobian = cs.jacobian(expression_,syms[0]) self.eval_jac = cs.Function('eval_jac',[syms[0],self.param_sym],[jacobian]).expand() # hessian of each of the elements of the expression split_eval = [cs.Function('eval',[syms[0],self.param_sym],[expression_[i]]).expand() for i in range(expression_.shape[0]) ] hessian = [cs.hessian(split_eval[i](*sx_syms),sx_syms[0])[0] for i in range(expression_.shape[0])] self.eval_hes = [cs.Function('eval_hes'+str(i),sx_syms,[hessian[i]]).expand() for i in range(expression.shape[0])] # location of the compiled library self.lib_path = None
def mvg_moment_array_functions(max_order, dimension, symbolic_vars=None): """ Args: max_order ([type]): [description] dimension ([type]): [description] Returns: [type]: Resulting functions have signature (t, mu, sigma) where t and mu are vectors, and sigma is the covariance matrix. """ if symbolic_vars == None: # Declare variables. t = casadi.MX.sym('t', dimension, 1) mu = casadi.MX.sym("mu", dimension, 1) sigma = casadi.MX.sym("sigma", dimension, dimension) else: t, mu, sigma = symbolic_vars # This dictionary stores our resulting functions. moment_array_functions = dict() moment_array_syms = dict() # Start auto-differentiating the multivariate Gaussian MGF> foo = mvg_mgf(t, mu, sigma) for i in range(max_order): order = i + 1 foo = casadi.jacobian( foo, t) #TODO: current matrix output form doesn't quite make sense fun = casadi.Function('moment_array_order' + str(order), [t, mu, sigma], [foo]) moment_array_syms[order] = foo moment_array_functions[order] = fun return moment_array_functions, moment_array_syms
def _check_var_existence(self): """Internal function to set _has_virtual, and _has_input. Loops over constraints to see if the derivatives are non-zero.""" self._has_virtual = False if self.virtual_var is not None: virtual_var = self.virtual_var for cnstr in self.constraints: if cs.jacobian(cnstr.expression, virtual_var).nnz() > 0: self._has_virtual = True if hasattr(cnstr, "target"): if isinstance(cnstr.target, cs.MX): if cs.jacobian(cnstr.target, virtual_var).nnz() > 0: self._has_virtual = True if hasattr(cnstr, "set_min"): if isinstance(cnstr.set_min, cs.MX): if cs.jacobian(cnstr.set_min, virtual_var).nnz() > 0: self._has_virtual = True if hasattr(cnstr, "set_max"): if isinstance(cnstr.set_max, cs.MX): if cs.jacobian(cnstr.set_max, virtual_var).nnz() > 0: self._has_virtual = True if hasattr(cnstr, "gain"): if isinstance(cnstr.gain, cs.MX): if cs.jacobian(cnstr.gain, virtual_var).nnz() > 0: self._has_virtual = True self._has_input = False if self.input_var is not None: input_var = self.input_var for cnstr in self.constraints: if cs.jacobian(cnstr.expression, input_var).nnz() > 0: self._has_input = True if hasattr(cnstr, "target"): if isinstance(cnstr.target, cs.MX): if cs.jacobian(cnstr.target, input_var).nnz() > 0: self._has_input = True if hasattr(cnstr, "set_min"): if isinstance(cnstr.set_min, cs.MX): if cs.jacobian(cnstr.set_min, input_var).nnz() > 0: self._has_input = True if hasattr(cnstr, "set_max"): if isinstance(cnstr.set_max, cs.MX): if cs.jacobian(cnstr.set_max, input_var).nnz() > 0: self._has_input = True if hasattr(cnstr, "gain"): if isinstance(cnstr.gain, cs.MX): if cs.jacobian(cnstr.gain, input_var).nnz() > 0: self._has_input = True
def maybeAddBoxConstraint(): inputs = C.veccat([self.dae.xVec(), self.dae.uVec()]) # make sure only x and u are in rhs,lhs rml = rhs - lhs f = C.SXFunction([inputs], [rml]) f.init() if len(f.getFree()) != 0: return # take jacobian of rhs-lhs jac = C.jacobian(rml, inputs) # fail if any jacobian element is not constant coeffs = {} for j in range(inputs.size()): if not jac[0, j].toScalar().isZero(): if not jac[0, j].toScalar().isConstant(): return coeffs[j] = jac[0, j] if len(coeffs) == 0: raise Exception("constraint has no design variables in it") if len(coeffs) > 1: self.debug( "found linear constraint that is not box constraint") return # alright, we've found a box constraint! j = coeffs.keys()[0] coeff = coeffs[j] name = (self.dae.xNames() + self.dae.uNames())[j] [f0] = f.eval([0 * inputs]) # if we just divided by a negative number (coeff), flip the comparison if not coeff.toScalar().isNonNegative(): # lhs `cmp` rhs # 0 `cmp` rhs - lhs # 0 `cmp` coeff*x + f0 # -f0 `cmp` coeff*x # -f0/coeff `FLIP(cmp)` x if comparison == '>=': newComparison = '<=' elif comparison == '<=': newComparison = '>=' else: newComparison = comparison else: newComparison = comparison c = -f0 / coeff self.debug('found linear constraint: ' + str(c) + ' ' + newComparison + ' ' + name) if newComparison == '==': self._bound(name, c, 'equality', when=when) elif newComparison == '<=': self._bound(name, c, 'lower', when=when) elif newComparison == '>=': self._bound(name, c, 'upper', when=when) else: raise Exception('the "impossible" happened, comparison "' + str(comparison) + "\" not in ['==','>=','<=']") return 'found box constraint'
def get_derivative(self, s): # Case 1: s is a constant, e.g. MX(5) if ca.MX(s).is_constant(): return 0 # Case 2: s is a symbol, e.g. MX(x) elif s.is_symbolic(): if s.name() not in self.derivative: if len(self.for_loops ) > 0 and s in self.for_loops[-1].indexed_symbols: # Create a new indexed symbol, referencing to the for loop index inside the vector derivative symbol. for_loop_symbol = self.for_loops[-1].indexed_symbols[s] s_without_index = self.get_mx( ast.ComponentRef(name=for_loop_symbol.tree.name)) der_s_without_index = self.get_derivative(s_without_index) if ca.MX(der_s_without_index).is_symbolic(): return self.get_indexed_symbol( ast.ComponentRef( name=der_s_without_index.name(), indices=for_loop_symbol.tree.indices), der_s_without_index) else: return 0 else: der_s = _new_mx("der({})".format(s.name()), s.size()) self.derivative[s.name()] = der_s self.nodes[self.current_class][der_s.name()] = der_s return der_s else: return self.derivative[s.name()] # Case 3: s is an already indexed symbol, e.g. MX(x[1]) elif s.is_op(ca.OP_GETNONZEROS) and s.dep().is_symbolic(): slice_info = s.info()['slice'] dep = s.dep() if dep.name() not in self.derivative: der_dep = _new_mx("der({})".format(dep.name()), dep.size()) self.derivative[dep.name()] = der_dep return der_dep[ slice_info['start']:slice_info['stop']:slice_info['step']] else: return self.derivative[dep.name( )][slice_info['start']:slice_info['stop']:slice_info['step']] # Case 4: s is an expression that requires differentiation, e.g. MX(x2 * x2) # Need to do this sort of expansion: der(x1 * x2) = der(x1) * x2 + x1 * der(x2) else: # Differentiate expression using CasADi orig_deps = ca.symvar(s) deps = ca.vertcat(*orig_deps) J = ca.Function('J', [deps], [ca.jacobian(s, deps)]) J_sparsity = J.sparsity_out(0) der_deps = [ self.get_derivative(dep) if J_sparsity.has_nz(0, j) else ca.DM.zeros(dep.size()) for j, dep in enumerate(orig_deps) ] return ca.mtimes(J(deps), ca.vertcat(*der_deps))
def gradient_control(cov_trans, c_term_grad, x, p): v = cs.MX.zeros(x.shape[0] - 2) J = cs.jacobian(cov_trans(x, cs.MX.zeros(2), v, p), x)[:, :2] grad = J.T @ c_term_grad(cov_trans(x, cs.MX.zeros(2), v, p)).T u = -1.0 * grad / (cs.sqrt(cs.sum1(grad**2)) + 1e-10) gradient_control = cs.Function('gradient_control', [x, p], [u], ['x', 'p'], ['u']) return gradient_control
def generateCModel(dae,timeScaling,measurements): xdot = C.veccat([dae.ddt(name) for name in dae.xNames()]) inputs = C.veccat([dae.xVec(), dae.zVec(), dae.uVec(), dae.pVec(), xdot]) jacobian_inputs = C.veccat([dae.xVec(), dae.zVec(), dae.uVec(), xdot]) f = dae.getResidual() # dae residual rhs = C.SXFunction( [inputs], [f] ) rhs.init() # handle time scaling [f] = rhs([C.veccat([dae.xVec(), dae.zVec(), dae.uVec(), dae.pVec(), xdot/timeScaling])]) rhs = C.SXFunction( [inputs], [C.dense(f)] ) rhs.init() rhsString = codegen.writeCCode(rhs, 'rhs') # dae residual jacobian jf = C.veccat( [ C.jacobian(f,jacobian_inputs).T ] ) rhsJacob = C.SXFunction( [inputs], [C.dense(jf)] ) rhsJacob.init() rhsJacobString = codegen.writeCCode(rhsJacob, 'rhsJacob') ret = {'rhs':rhs, 'rhsJacob':rhsJacob, 'rhsFile':rhsString, 'rhsJacobFile':rhsJacobString} if measurements is not None: # measurements measurementsFun = C.SXFunction( [inputs], [measurements] ) measurementsFun.init() [measurements] = measurementsFun([C.veccat([dae.xVec(), dae.zVec(), dae.uVec(), dae.pVec(), xdot/timeScaling])]) measurementsFun = C.SXFunction( [inputs], [C.dense(measurements)] ) measurementsFun.init() measurementsString = codegen.writeCCode(measurementsFun, 'measurements') ret['measurements'] = measurementsFun ret['measurementsFile'] = measurementsString # measurements jacobian jo = C.veccat( [ C.jacobian(measurements,jacobian_inputs).T ] ) measurementsJacobFun = C.SXFunction( [inputs], [C.dense(jo)] ) measurementsJacobFun.init() measurementsJacobString = codegen.writeCCode(measurementsJacobFun, 'measurementsJacob') ret['measurementsJacob'] = measurementsJacobFun ret['measurementsJacobFile'] = measurementsJacobString return ret
def T2W(T,p,dp): """ w_101 = T2W(T_10,p,dp) """ R = T2R(T) dR = c.reshape(c.mul(c.jacobian(R,p),dp),(3,3)) return invskew(c.mul(R.T,dR))
def T2W(T, p, dp): """ w_101 = T2W(T_10,p,dp) """ R = T2R(T) dR = c.reshape(c.mul(c.jacobian(R, p), dp), (3, 3)) return invskew(c.mul(R.T, dR))
def maybeAddBoxConstraint(): inputs = C.veccat([self.dae.xVec(),self.dae.uVec()]) # make sure only x and u are in rhs,lhs rml = rhs-lhs f = C.SXFunction([inputs],[rml]) f.init() if f.getFree().shape[0] != 0: return # take jacobian of rhs-lhs jac = C.jacobian(rml,inputs) # fail if any jacobian element is not constant coeffs = {} for j in range(inputs.size()): if jac.hasNZ(0,j): if not jac[0,j].isConstant(): return coeffs[j] = jac[0,j] if len(coeffs) == 0: raise Exception("constraint has no design variables in it") if len(coeffs) > 1: self.debug("found linear constraint that is not box constraint") return # alright, we've found a box constraint! j = coeffs.keys()[0] coeff = coeffs[j] name = (self.dae.xNames()+self.dae.uNames())[j] [f0] = f([0*inputs]) # if we just divided by a negative number (coeff), flip the comparison #if not coeff.toScalar().isNonNegative(): if not coeff>=0.0: # lhs `cmp` rhs # 0 `cmp` rhs - lhs # 0 `cmp` coeff*x + f0 # -f0 `cmp` coeff*x # -f0/coeff `FLIP(cmp)` x if comparison == '>=': newComparison = '<=' elif comparison == '<=': newComparison = '>=' else: newComparison = comparison else: newComparison = comparison c = -f0/coeff self.debug('found linear constraint: '+str(c)+' '+newComparison+' '+name) if newComparison == '==': self._bound( name, c, 'equality', when=when) elif newComparison == '<=': self._bound( name, c, 'lower', when=when) elif newComparison == '>=': self._bound( name, c, 'upper', when=when) else: raise Exception('the "impossible" happened, comparison "'+str(comparison)+ "\" not in ['==','>=','<=']") return 'found box constraint'
def get_derivative(self, s): # Case 1: s is a constant, e.g. MX(5) if ca.MX(s).is_constant(): return 0 # Case 2: s is a symbol, e.g. MX(x) elif s.is_symbolic(): if s.name() not in self.derivative: if len(self.for_loops) > 0 and s in self.for_loops[-1].indexed_symbols: # Create a new indexed symbol, referencing to the for loop index inside the vector derivative symbol. for_loop_symbol = self.for_loops[-1].indexed_symbols[s] s_without_index = self.get_mx(ast.ComponentRef(name=for_loop_symbol.tree.name)) der_s_without_index = self.get_derivative(s_without_index) if ca.MX(der_s_without_index).is_symbolic(): return self.get_indexed_symbol(ast.ComponentRef(name=der_s_without_index.name(), indices=for_loop_symbol.tree.indices), der_s_without_index) else: return 0 else: der_s = _new_mx("der({})".format(s.name()), s.size()) # If the derivative contains an expression (e.g. der(x + y)) this method is # called with MX variables that are the result of a ca.symvar call. This # ca.symvar call strips the _modelica_shape field from the MX variable, # therefore we need to find the original MX to get the modelica shape. der_s._modelica_shape = \ self.nodes[self.current_class][s.name()]._modelica_shape self.derivative[s.name()] = der_s self.nodes[self.current_class][der_s.name()] = der_s return der_s else: return self.derivative[s.name()] # Case 3: s is an already indexed symbol, e.g. MX(x[1]) elif s.is_op(ca.OP_GETNONZEROS) and s.dep().is_symbolic(): slice_info = s.info()['slice'] dep = s.dep() if dep.name() not in self.derivative: der_dep = _new_mx("der({})".format(dep.name()), dep.size()) der_dep._modelica_shape = \ self.nodes[self.current_class][dep.name()]._modelica_shape self.derivative[dep.name()] = der_dep self.nodes[self.current_class][der_dep.name()] = der_dep return der_dep[slice_info['start']:slice_info['stop']:slice_info['step']] else: return self.derivative[dep.name()][slice_info['start']:slice_info['stop']:slice_info['step']] # Case 4: s is an expression that requires differentiation, e.g. MX(x2 * x2) # Need to do this sort of expansion: der(x1 * x2) = der(x1) * x2 + x1 * der(x2) else: # Differentiate expression using CasADi orig_deps = ca.symvar(s) deps = ca.vertcat(*orig_deps) J = ca.Function('J', [deps], [ca.jacobian(s, deps)]) J_sparsity = J.sparsity_out(0) der_deps = [self.get_derivative(dep) if J_sparsity.has_nz(0, j) else ca.DM.zeros(dep.size()) for j, dep in enumerate(orig_deps)] return ca.mtimes(J(deps), ca.vertcat(*der_deps))
def _check_for_lineq(self): g = [] for con in self.constraints: lb, ub = con[1], con[2] g = vertcat([g, con[0] - lb]) if not isinstance(lb, np.ndarray): lb, ub = [lb], [ub] for k in range(len(lb)): if lb[k] != ub[k]: return False, None, None sym, jac = [], [] for child, q_i in self.q_i.items(): for name, ind in q_i.items(): var = child.get_variable(name, spline=False) jj = jacobian(g, var) jac = horzcat([jac, jj[:, ind]]) sym.append(var) for nghb in self.q_ij.keys(): for child, q_ij in self.q_ij[nghb].items(): for name, ind in q_ij.items(): var = child.get_variable(name, spline=False) jj = jacobian(g, var) jac = horzcat([jac, jj[:, ind]]) sym.append(var) for sym in symvar(jac): if sym not in self.par_i.values(): return False, None, None par = struct_symMX(self.par_struct) A, b = jac, -g for s in sym: A = substitute(A, s, np.zeros(s.shape)) b = substitute(b, s, np.zeros(s.shape)) dep_b = [s.getName() for s in symvar(b)] dep_A = [s.getName() for s in symvar(b)] for name, sym in self.par_i.items(): if sym.getName() in dep_b: b = substitute(b, sym, par[name]) if sym.getName() in dep_A: A = substitute(A, sym, par[name]) A = MXFunction('A', [par], [A]).expand() b = MXFunction('b', [par], [b]).expand() return True, A, b
def _check_for_lineq(self): g = [] for con in self.global_constraints: lb, ub = con[1], con[2] g = vertcat(g, con[0] - lb) if not isinstance(lb, np.ndarray): lb, ub = [lb], [ub] for k, _ in enumerate(lb): if lb[k] != ub[k]: return False, None, None sym, jac = [], [] for child, q_i in self.q_i.items(): for name, ind in q_i.items(): var = self.distr_problem.father.get_variables(child, name, spline=False, symbolic=True, substitute=False) jj = jacobian(g, var) jac = horzcat(jac, jj[:, ind]) sym.append(var) for nghb in self.q_ij.keys(): for child, q_ij in self.q_ij[nghb].items(): for name, ind in q_ij.items(): var = self.distr_problem.father.get_variables(child, name, spline=False, symbolic=True, substitute=False) jj = jacobian(g, var) jac = horzcat(jac, jj[:, ind]) sym.append(var) for sym in symvar(jac): if sym not in self.par_global.values(): return False, None, None par = struct_symMX(self.par_global_struct) A, b = jac, -g for s in sym: A = substitute(A, s, np.zeros(s.shape)) b = substitute(b, s, np.zeros(s.shape)) dep_b = [s.name() for s in symvar(b)] dep_A = [s.name() for s in symvar(b)] for name, sym in self.par_global.items(): if sym.name() in dep_b: b = substitute(b, sym, par[name]) if sym.name() in dep_A: A = substitute(A, sym, par[name]) A = Function('A', [par], [A]).expand() b = Function('b', [par], [b]).expand() return True, A, b
def set_path(self, paths): """The path is defined as the convex combination of the paths in paths. Args: paths (list of lists of SXMatrix): The path is taken as the convex combination of the paths in paths. Example: The path is defined as the convex combination of (s, 0.5*s) and (2, 2*s): >>> P.set_path([(P.s[0], 0.5 * P.s[0]), [P.s[0], 2 * P.s[0]]]) """ l = len(paths) self.h = cas.ssym("h", l, self.sys.order + 1) self.path[:, 0] = np.sum(cas.SXMatrix(paths) * cas.horzcat([self.h[:, 0]] * len(paths[0])), axis=0) dot_s = cas.vertcat([self.s[1:], 0]) dot_h = cas.horzcat([self.h[:, 1:], cas.SXMatrix.zeros(l, 1)]) for i in range(1, self.sys.order + 1): # Chainrule self.path[:, i] = (cas.mul(cas.jacobian(self.path[:, i - 1], self.s), dot_s) + sum([cas.mul(cas.jacobian(self.path[:, i - 1], self.h[j, :]), dot_h[j, :].trans()) for j in range(l)]) * self.s[1])
def _make_path(self): """Rewrite the path as a function of the optimization variables. Substitutes the time derivatives of s in the expression of the path by expressions that are function of b and its path derivatives by repeatedly applying the chainrule Returns: * SXMatrix. The substituted path * SXMatrix. b and the path derivatives * SXMatrix. The derivatives of s as a function of b """ b = cas.ssym("b", self.sys.order) db = cas.vertcat((b[1:], 0)) Ds = cas.SXMatrix.nan(self.sys.order) # Time derivatives of s Ds[0] = cas.sqrt(b[0]) Ds[1] = b[1] / 2 # Apply chainrule for finding higher order derivatives for i in range(1, self.sys.order - 1): Ds[i + 1] = (cas.mul(cas.jacobian(Ds[i], b), db) * self.s[1] + cas.jacobian(Ds[i], self.s[1]) * Ds[1]) Ds = cas.substitute(Ds, self.s[1], cas.sqrt(b[0])) return cas.substitute(self.path, self.s[1:], Ds), b, Ds
def T2WJ(T,p): """ w_101 = T2WJ(T_10,p).diff(p,t) """ R = T2R(T) RT = R.T temp = [] for i,k in [(2,1),(0,2),(1,0)]: #temp.append(c.mul(c.jacobian(R[:,k],p).T,R[:,i]).T) temp.append(c.mul(RT[i,:],c.jacobian(R[:,k],p))) return c.vertcat(temp)
def blt(f: List[SYM], x: List[SYM]) -> Dict[str, Any]: """ Sort equations by dependence """ J = ca.jacobian(f, x) nblock, rowperm, colperm, rowblock, colblock, coarserow, coarsecol = J.sparsity().btf() return { 'J': J, 'nblock': nblock, 'rowperm': rowperm, 'colperm': colperm, 'rowblock': rowblock, 'colblock': colblock, 'coarserow': coarserow, 'coarsecol': coarsecol }
def dt(self, expr): """Return time derivative of expr The time derivative is computed using the chainrule: df/dt = df/dy * dy/dt Args: expr (SXMatrix): casadi expression that is differentiated wrt time Returns: SXMatrix. The time derivative of expr Example: >>> S = FlatSystem(2, 4) >>> dy00 = S.dt(S.y[0, 0]) """ return cas.mul(cas.jacobian(expr, self.y), self._dy[:])
def tangent_approx(f: SYM, x: SYM, a: SYM = None, assert_linear: bool = False) -> Dict[str, SYM]: """ Create a tangent approximation of a non-linear function f(x) about point a using a block lower triangular solver 0 = f(x) = f(a) + J*x # taylor series about a (if f(x) linear in x, then globally valid) J*x = -f(a) # solve for x x = -J^{-1}f(a) # but inverse is slow, so we use solve where J = df/dx """ # find f(a) if a is None: a = ca.DM.zeros(x.numel(), 1) f_a = ca.substitute(f, x, a) # f(a) J = ca.jacobian(f, x) if assert_linear and ca.depends_on(J, x): raise AssertionError('not linear') # solve is smart enough to to convert to blt if necessary return ca.solve(J, -f_a)
def generateOctaveSim(dae, functionName): # return C.SXFunction( C.daeIn( x=self.xVec(), # z=C.veccat([self.zVec(),xdot]), # p=C.veccat([self.uVec(),self.pVec()]) # ), # C.daeOut( alg=f, ode=xdot) ) # get the residual fg(xdot,x,z) fg = dae.getResidual() # take the jacobian w.r.t. xdot,z z = dae.zVec() jac = C.jacobian(fg,C.veccat([dae.xDotVec(), z])) # make sure that it was linear in {xdot,z}, i.e. the jacobian is not a function of {xdot,z} testJac = C.SXFunction([dae.xVec(),dae.uVec(),dae.pVec()], [jac]) testJac.init() assert len(testJac.getFree()) == 0, "can't generate octave sim, jacobian a function of {xdot,z}" # it was linear, so export the jacobian fg_fun = C.SXFunction([dae.xVec(),dae.zVec(),dae.uVec(),dae.pVec(),dae.xDotVec()], [fg]) fg_fun.init() # get the constant term [fg_zero] = fg_fun.eval([dae.xVec(),0*dae.zVec(),dae.uVec(),dae.pVec(),0*dae.xDotVec()]) testFun = C.SXFunction([dae.xVec(),dae.uVec(),dae.pVec()], [jac]) testFun.init() assert len(testFun.getFree()) == 0, "can't generate octave sim, function line linear in {xdot,z}" fm = C.SXFunction([dae.xVec(), dae.uVec(), dae.pVec()],[fg_zero, jac]) fm.init() lines = [] lines.append('function [f,MM] = '+functionName+'_modelAndJacob(x,u,p)') lines.append('') lines.append('MM = zeros'+str(jac.shape)+';') lines.append('f = zeros('+str(fg_zero.size())+',1);') lines.append('') # dae residual lines.extend( writeAcadoAlgorithm(dae, fm) ) lines.append('') lines.append('end\n') return ('\n'.join(lines))
def getRobustSteadyStateNlpFunctions(self, dae, ref_dict = {}): xDotSol, zSol = dae.solveForXDotAndZ() ginv = Constraints() def constrainInvariantErrs(): R_c2b = dae['R_c2b'] self.makeOrthonormal(ginv, R_c2b) ginv.add(dae['c'], '==', 0, tag = ('c(0) == 0', None)) ginv.add(dae['cdot'], '==', 0, tag = ('cdot( 0 ) == 0', None)) di = dae['cos_delta'] ** 2 + dae['sin_delta'] ** 2 - 1 ginv.add(di, '==', 0, tag = ('delta invariant', None)) ginv.add(C.mul(dae['R_c2b'].T, dae['w_bn_b']) - C.veccat([0, 0, dae['ddelta']]) , '==', 0, tag = ("Rotational velocities", None)) constrainInvariantErrs() invariants = ginv.getG() J = C.jacobian(invariants,dae.xVec()) # make steady state model g = Constraints() xds = C.vertcat([xDotSol[ name ] for name in dae.xNames()]) jInv = C.mul(J.T,C.solve(C.mul(J,J.T),invariants)) g.add(xds - jInv - dae.xDotVec(), '==', 0, tag = ('dae residual', None)) for name in ['alpha_deg', 'beta_deg', 'cL']: if name in ref_dict: g.addBnds(dae[name], ref_dict[name], tag = (name, None)) dvs = C.veccat([dae.xVec(), dae.uVec(), dae.pVec(), dae.xDotVec()]) obj = 0 for name in dae.uNames() + ['aileron', 'elevator']: if name in dae: obj += dae[ name ] ** 2 return dvs, obj, g.getG(), g.getLb(), g.getUb(), zSol
def solveForXDotAndZ(self): ''' returns (xDotDict,zDict) where these dictionaries contain symbolic xdot and z which are only a function of x,u,p ''' # get the residual fg(xdot,x,z) fg = self.getResidual() # take the jacobian w.r.t. xdot,z jac = C.jacobian(fg,C.veccat([self.xDotVec(), self.zVec()])) # make sure that it was linear in {xdot,z}, i.e. the jacobian is not a function of {xdot,z} testJac = C.SXFunction([self.xVec(),self.uVec(),self.pVec()], [jac]) testJac.init() assert len(testJac.getFree()) == 0, \ "can't convert dae to ode, residual jacobian is a function of {xdot,z}" # get the constant term fg_fun = C.SXFunction([self.xVec(),self.zVec(),self.uVec(),self.pVec(),self.xDotVec()], [fg]) fg_fun.init() [fg_zero] = fg_fun([self.xVec(),0*self.zVec(),self.uVec(),self.pVec(),0*self.xDotVec()]) testFun = C.SXFunction([self.xVec(),self.uVec(),self.pVec()], [fg_zero]) testFun.init() assert len(testFun.getFree()) == 0, \ "the \"impossible\" happened in solveForXDotAndZ" xDotAndZ = C.solve(jac, -fg_zero) xDot = xDotAndZ[0:len(self.xNames())] z = xDotAndZ[len(self.xNames()):] xDotDict = {} for k,name in enumerate(self.xNames()): xDotDict[name] = xDot[k] zDict = {} for k,name in enumerate(self.zNames()): zDict[name] = z[k] return (xDotDict, zDict)
V0 = ca.vertcat([ pl.ones(3), \ pl.zeros(N), \ ydata_noise[0,:].T ]) sol = nlpsolver(x0 = V0) p_est_single_shooting = sol["x"][:3] tstart_Sigma_p = time() J_s = ca.jacobian(r, V) F_s = ca.mul(J_s.T, J_s) beta = (ca.mul(r.T, r) / (r.size() - V.size())) Sigma_p_s = beta * ca.solve(F_s, ca.MX.eye(F_s.shape[0]), "csparse") beta_fcn = ca.MXFunction("beta_fcn", [V], [beta]) print beta_fcn([sol["x"]])[0] Sigma_p_s_fcn = ca.MXFunction("Sigma_p_s_fcn", \ [V] , [Sigma_p_s]) Cov_p = Sigma_p_s_fcn([sol["x"]])[0][:3, :3] tend_Sigma_p = time()
def compute_covariance_matrix(self): r''' This function computes the covariance matrix of the estimated parameters from the inverse of the KKT matrix for the parameter estimation problem. This allows then for statements on the quality of the values of the estimated parameters. For efficiency, only the inverse of the relevant part of the matrix is computed using the Schur complement. A more detailed description of this function will follow in future versions. ''' intro.pecas_intro() print('\n' + 20 * '-' + \ ' PECas covariance matrix computation ' + 21 * '-') print(''' Computing the covariance matrix for the estimated parameters, this might take some time ... ''') self.tstart_cov_computation = time.time() try: N1 = ca.MX(self.Vars.shape[0] - self.w.shape[0], \ self.w.shape[0]) N2 = ca.MX(self.Vars.shape[0] - self.w.shape[0], \ self.Vars.shape[0] - self.w.shape[0]) hess = ca.blockcat([[N2, N1], [N1.T, ca.diag(self.w)],]) # hess = hess + 1e-10 * ca.diag(self.Vars) # J2 can be re-used from parameter estimation, right? J2 = ca.jacobian(self.g, self.Vars) kkt = ca.blockcat( \ [[hess, \ J2.T], \ [J2, \ ca.MX(self.g.size1(), self.g.size1())]] \ ) B1 = kkt[:self.pesetup.np, :self.pesetup.np] E = kkt[self.pesetup.np:, :self.pesetup.np] D = kkt[self.pesetup.np:, self.pesetup.np:] Dinv = ca.solve(D, E, "csparse") F11 = B1 - ca.mul([E.T, Dinv]) self.fbeta = ca.MXFunction("fbeta", [self.Vars], [ca.mul([self.R.T, self.R]) / \ (self.yN.size + self.g.size1() - self.Vars.size())]) [self.beta] = self.fbeta([self.Varshat]) self.fcovp = ca.MXFunction("fcovp", [self.Vars], \ [self.beta * ca.solve(F11, ca.MX.eye(F11.size1()))]) [self.Covp] = self.fcovp([self.Varshat]) print( \ '''Covariance matrix computation finished, run show_results() to visualize.''') except AttributeError as err: errmsg = ''' You must execute run_parameter_estimation() first before the covariance matrix for the estimated parameters can be computed. ''' raise AttributeError(errmsg) finally: self.tend_cov_computation = time.time() self.duration_cov_computation = self.tend_cov_computation - \ self.tstart_cov_computation
def codgen_model(self, model, opts): # from casadi import * # syntax valid only for the entire module import casadi # x if model.x==None: x = casadi.SX.sym('x', 0, 1) else: x = model.x # xdot if model.xdot==None: xdot = casadi.SX.sym('xdot', 0, 1) else: xdot = model.xdot # u if model.u==None: u = casadi.SX.sym('u', 0, 1) else: u = model.u # z if model.z==None: z = casadi.SX.sym('z', 0, 1) else: z = model.z # fun fun = model.ode_expr # sizes nx = model.nx nu = model.nu nz = model.nz # define functions & generate C code casadi_opts = dict(casadi_int='int', casadi_real='double') c_sources = ' ' if opts.scheme=='erk': if opts.sens_forw=='false': fun_name = 'expl_ode_fun' casadi_fun = casadi.Function(fun_name, [x, u], [fun]) casadi_fun.generate(casadi_opts) c_sources = c_sources + fun_name + '.c ' else: fun_name = 'expl_vde_for' Sx = casadi.SX.sym('Sx', nx, nx) Su = casadi.SX.sym('Su', nx, nu) vde_x = casadi.jtimes(fun, x, Sx) vde_u = casadi.jacobian(fun, u) + casadi.jtimes(fun, x, Su) casadi_fun = casadi.Function(fun_name, [x, Sx, Su, u], [fun, vde_x, vde_u]) casadi_fun.generate(casadi_opts) c_sources = c_sources + fun_name + '.c ' elif opts.scheme=='irk': fun_name = 'impl_ode_fun' casadi_fun = casadi.Function(fun_name, [x, xdot, u, z], [fun]) casadi_fun.generate(casadi_opts) c_sources = c_sources + fun_name + '.c ' fun_name = 'impl_ode_fun_jac_x_xdot_z' jac_x = casadi.jacobian(fun, x) jac_xdot = casadi.jacobian(fun, xdot) jac_z = casadi.jacobian(fun, z) casadi_fun = casadi.Function(fun_name, [x, xdot, u, z], [fun, jac_x, jac_xdot, jac_z]) casadi_fun.generate(casadi_opts) c_sources = c_sources + fun_name + '.c ' if opts.sens_forw=='true': fun_name = 'impl_ode_jac_x_xdot_u_z' jac_x = casadi.jacobian(fun, x) jac_xdot = casadi.jacobian(fun, xdot) jac_u = casadi.jacobian(fun, u) jac_z = casadi.jacobian(fun, z) casadi_fun = casadi.Function(fun_name, [x, xdot, u, z], [jac_x, jac_xdot, jac_u, jac_z]) casadi_fun.generate(casadi_opts) c_sources = c_sources + fun_name + '.c ' # create model library lib_name = model.model_name # lib_name = lib_name + '_' + str(id(self)) if opts.scheme=='erk': lib_name = lib_name + '_erk' elif opts.scheme=='irk': lib_name = lib_name + '_irk' if opts.sens_forw=='false': lib_name = lib_name + '_0' else: lib_name = lib_name + '_1' lib_name = lib_name + '_' + str(model.ode_expr_hash) lib_name = lib_name + '.so' system('gcc -fPIC -shared ' + c_sources + ' -o ' + lib_name)
def jacobian(a, b): return ca.jacobian(a, b)
def total_derivative(M,q,dq): J = casadi.jacobian(M, q) dM = casadi.mul(J,dq).reshape(M.shape) return dM
def makeSolver(self,endTime,traj=None): # make sure all bounds are set (xMissing,pMissing) = self._boundMap.getMissing() msg = [] for name in xMissing: msg.append("you forgot to set a bound on \""+name+"\" at timesteps: "+str(xMissing[name])) for name in pMissing: msg.append("you forgot to set a bound on \""+name+"\"") if len(msg)>0: raise ValueError('\n'.join(msg)) # constraints: g = self._constraints.getG() glb = self._constraints.getLb() gub = self._constraints.getUb() gDyn = self._setupDynamicsConstraints(endTime,traj) gDynLb = gDynUb = [C.DMatrix.zeros(gg.shape) for gg in gDyn] g = C.veccat([g]+gDyn) glb = C.veccat([glb]+gDynLb) gub = C.veccat([gub]+gDynUb) self.glb = glb self.gub = gub # design vars V = self._dvMap.vectorize() # gradient of arbitraryObj if hasattr(self,'_obj'): arbitraryObj = self._obj else: arbitraryObj = 0 gradF = C.gradient(arbitraryObj,V) # hessian of lagrangian: Js = [C.jacobian(gnf,V) for gnf in self._gaussNewtonObjF] gradFgns = [C.mul(J.T,F) for (F,J) in zip(self._gaussNewtonObjF, Js)] gaussNewtonHess = sum([C.mul(J.T,J) for J in Js]) hessL = gaussNewtonHess + C.jacobian(gradF,V) gradF += sum(gradFgns) # equality/inequality constraint jacobian gfcn = C.MXFunction([V,self._U],[g]) gfcn.init() jacobG = gfcn.jacobian(0,0) jacobG.init() # function which generates everything needed f = sum([f_*f_ for f_ in self._gaussNewtonObjF]) if hasattr(self,'_obj'): f += self._obj self.masterFun = C.MXFunction([V,self._U],[hessL, gradF, g, jacobG.call([V,self._U])[0], f]) self.masterFun.init() # self.qp = C.CplexSolver(hessL.sparsity(),jacobG.output(0).sparsity()) self.qp = C.NLPQPSolver(hessL.sparsity(),jacobG.output(0).sparsity()) self.qp.setOption('nlp_solver',C.IpoptSolver) self.qp.setOption('nlp_solver_options',{'print_level':0,'print_time':False}) self.qp.init()
def makeSolver(self): # make sure all bounds are set (xuMissing,pMissing) = self._boundMap.getMissing() msg = [] for name in xuMissing: msg.append("you forgot to set a bound on \""+name+"\" at timesteps: "+str(xuMissing[name])) for name in pMissing: msg.append("you forgot to set a bound on \""+name+"\"") if len(msg)>0: raise ValueError('\n'.join(msg)) # constraints: constraints = self._constraints._g constraintLbgs = self._constraints._glb constraintUbgs = self._constraints._gub g = [self._setupDynamicsConstraints()] g = [] h = [] hlbs = [] hubs = [] for k in range(len(constraints)): lb = constraintLbgs[k] ub = constraintUbgs[k] if all(lb==ub): g.append(constraints[k]-lb) # constrain to be zero else: h.append(constraints[k]) hlbs.append(lb) hubs.append(ub) g = C.veccat(g) h = C.veccat(h) hlbs = C.veccat(hlbs) hubs = C.veccat(hubs) # design vars V = self._dvMap.vectorize() # gradient of arbitraryObj if hasattr(self,'_obj'): arbitraryObj = self._obj else: arbitraryObj = 0 gradF = C.gradient(arbitraryObj,V) # hessian of lagrangian: J = 0 for gnf in self._gaussNewtonObjF: J += C.jacobian(gnf,V) hessL = C.mul(J.T,J) + C.jacobian(gradF,V) # equality constraint jacobian jacobG = C.jacobian(g,V) # inequality constraint jacobian jacobH = C.jacobian(h,V) # function which generates everything needed masterFun = C.MXFunction([V],[hessL, gradF, g, jacobG, h, jacobH]) masterFun.init() class JorisError(Exception): pass raise JorisError('JORIS, please read the following comment')
def detectLinearSubsystems(dae): f = dae.getResidual() xdot = C.veccat([dae.ddt(name) for name in dae.xNames()]) x = dae.xVec() z = dae.zVec() p = dae.pVec() u = dae.uVec() nx = x.size() nz = z.size() nup = u.size() + p.size() inputs = C.veccat([x, z, u, p, xdot]) # take jacobian and find which entries are constant, zero, or nonzer jac = C.jacobian(f,inputs) def qualifyM(m): M = C.IMatrix.zeros(m.size1(),m.size2()) for i in range(m.size1()): for j in range(m.size2()): M[i,j] = qualify(m[i,j].toScalar()) return M def qualify(e): if e.isZero(): return 0 f = C.SXFunction([p],[e]) f.init() if len(f.getFree()) > 0: return 2 else: return 1 MA = qualifyM(jac[:,:nx]) MZ = qualifyM(jac[:,nx:nx+nz]) MU = qualifyM(jac[:,nx+nz:nx+nz+nup]) MC = qualifyM(jac[:,nx+nz+nup:]) M = qualifyM(jac) # which equations have nonlinearity fi_nonlinear = set() fi_linear = set() for i in range(f.shape[0]): if any(M[i,:] > 1) or any(MZ[i,:] > 0): fi_nonlinear.add(i) else: fi_linear.add(i) # which variables are in the linear set xj_linear = set() for i in fi_linear: for j in range(nx): if MA[i,j] == 2 or MC[i,j] == 2: raise Exception('the "impossible" happend') # if MC[i,j] == 1 or MA[i,j] == 1: if MC[i,j] == 1: # poor man's C1 square constraint xj_linear.add(j) # which variables are in the nonlinear set xj_nonlinear = set() for i in fi_nonlinear: for j in range(nx): if MA[i,j] >= 1 or MC[i,j] >= 1: xj_nonlinear.add(j) x23_candidates = xj_nonlinear - xj_linear x1_candidates = set(range(nx))-x23_candidates # kick out linear variables which depend on nonlinear ones changed = True niters = 0 while changed == True: niters += 1 changed = False badRows = set() for i in fi_linear: if any(MZ[i,:] > 0): raise Exception('the "impossible" happened') for j in x23_candidates: if MA[i,j] > 0 or MC[i,j] > 0: badRows.add(i) # oh shit, a tainted row, blow away everything here if i in badRows: removeUs = set() for j in x1_candidates: if MA[i,j] > 0 or MC[i,j] > 0: changed = True removeUs.add(j) for j in removeUs: x1_candidates.remove(j) x23_candidates.add(j) for i in badRows: fi_nonlinear.add(i) fi_linear.remove(i) f1 = fi_linear f23 = fi_nonlinear x1 = x1_candidates x23 = x23_candidates print "finished in",niters,"iterations" print "LOL HERE IS WHERE WE CHECK IF C1 IS SQUARE (yolo)" # separate x23 into x2 and x3 x3_candidates = set(x23) for i in f23: not_x3 = set() for j in x3_candidates: if MC[i,j] == 2 or MA[i,j] == 2: not_x3.add(j) for j in not_x3: x3_candidates.remove(j) x3 = x3_candidates x2 = x23 - x3 # separate f23 into f2 and f3 not_f3 = set() for i in f23: for j in x3: if MA[i,j] > 0 or MC[i,j] > 0: not_f3.add(i) f2 = f23 - not_f3 f3 = f23 - f2 print "LOL HERE IS WHERE WE CHECK IF C3 IS SQUARE (yolo)" print "x1",[dae.xNames()[j] for j in x1] print "x2",[dae.xNames()[j] for j in x2] print "x3",[dae.xNames()[j] for j in x3] print "f1",f1 print "f2",f2 print "f3",f3
f_w = -CD*speed*v_w # Generalized aerodynamic force dxdq=casadi.vertcat([r*-s_theta, r*c_theta]) f_gen = mul(dxdq.T,f_w) # Formulate the Lagrangian KE = 0.5*m*vdotv PE = 0 L = KE-PE #################### # Derive the implicit ODE (DAE-like) form of the equations of motion #################### dLdq = jacobian(L,q) # d/d(q) L. Contains q,dq. 1xlen(q) dLddq = jacobian(L,dq) # d/d(dq) L. Contains q,dq. 1xlen(q) q_and_dq = q[:] q_and_dq.append(dq) # [q; dq] dq_and_ddq = dq[:] dq_and_ddq.append(ddq) # [dq; ddq] #LHS1 = total_derivative(dLddq,q_and_dq,dq_and_ddq) # d/dt dL/d(dq) J_temp = jacobian(dLddq, q_and_dq) LHS1_ = mul(J_temp,dq_and_ddq).reshape(dLddq.shape) LHS1 = jacobianTimesVector(dLddq.T,q_and_dq,dq_and_ddq).reshape(dLddq.shape) LHS = (LHS1 - dLdq).T RHS = f_gen
ll, (p_1, p_2, h_1, h_2) = loglikilihood(r, params1, params2) if not asVals: with Timer('subs model'): ll = params1.subsForModel(ll) #ll = params2.subsForModel(ll) params = casadi.vertcat(params1.vertcat) #, params2.vertcat) param_vals = casadi.vertcat(params1.vertcatvals) #, params2.vertcatvals) with Timer('subs ll'): print casadi.substitute(ll, params, param_vals) #with Timer('calc hess'): # hess = casadi.hessian(ll, params) #with Timer('subs hess'): # hessret = casadi.substitute(hess[0], params, param_vals) # hessret2 = casadi.substitute(hess[1], params, param_vals) with Timer('calc jacob'): jacob = casadi.jacobian(ll, params) with Timer('subs jacob'): jacobret = casadi.substitute(jacob, params, param_vals) #import pdb; pdb.set_trace() bounds = [] paramsFlat = [params[x] for x in xrange(params.shape[0])] for count, value in enumerate(paramsFlat): bounds.append(params1.bound(value)) def optim(x): #for x_i, value in zip(x, paramsFlat): #print '%s: %s' % (x_i, value) ret = casadi.substitute(ll, params, casadi.vertcat(*x)) return float(ret)
rhs = cat.struct_SX(state) rhs['x'] = control['v'] * ca.cos(state['phi']) rhs['y'] = control['v'] * ca.sin(state['phi']) rhs['phi'] = control['w'] f = ca.SXFunction('Continuous dynamics', [state, control], [rhs]) # Discrete dynamics state_next = state + dt_sym * f([state, control])[0] op = {'input_scheme': ['state', 'control', 'dt'], 'output_scheme': ['state_next']} F = ca.SXFunction('Discrete dynamics', [state, control, dt_sym], [state_next], op) Fj_x = F.jacobian('state') Fj_u = F.jacobian('control') F_xx = ca.SXFunction('F_xx', [state, control, dt_sym], [ca.jacobian(F.jac('state')[i, :].T, state) for i in range(nx)]) F_uu = ca.SXFunction('F_uu', [state, control, dt_sym], [ca.jacobian(F.jac('control')[i, :].T, control) for i in range(nx)]) F_ux = ca.SXFunction('F_ux', [state, control, dt_sym], [ca.jacobian(F.jac('control')[i, :].T, state) for i in range(nx)]) # Cost functions Qf = ca.diagcat([1., 1., 0.]) final_cost = 0.5 * ca.mul([state.cat.T, Qf, state.cat]) op = {'input_scheme': ['state'], 'output_scheme': ['cost']} lf = ca.SXFunction('Final cost', [state], [final_cost], op)