def momentum_correction(options, generalized_coordinates, system_variables, node_masses, outputs, architecture): """Compute momentum correction for translational lagrangian dynamics of an open system. Here the system is "open" because the main tether mass is changing in time. During reel-out, momentum is injected in the system, and during reel-in, momentum is extracted. It is assumed that the tether mass is concentrated at the main tether node. See "Lagrangian Dynamics for Open Systems", R. L. Greene and J. J. Matese 1981, Eur. J. Phys. 2, 103. @return: lagrangian_momentum_correction - correction term that can directly be added to rhs of transl_dyn """ # initialize xgcdot = generalized_coordinates['scaled']['xgcdot'].cat lagrangian_momentum_correction = cas.DM.zeros(xgcdot.shape) use_wound_tether = options['tether']['use_wound_tether'] if not use_wound_tether: for node in range(1, architecture.number_of_nodes): label = str(node) + str(architecture.parent_map[node]) mass = node_masses['m' + label] mass_flow = tools.time_derivative(mass, system_variables, architecture) # velocity of the mass particles leaving the system velocity = system_variables['SI']['xd']['dq' + label] # see formula in reference lagrangian_momentum_correction += mass_flow * cas.mtimes( velocity.T, cas.jacobian(velocity, xgcdot)).T return lagrangian_momentum_correction
def get_induced_velocity_at_kite(model_options, variables, parameters, architecture, kite, outputs): lin_params = parameters['lin'] var_sym = {} var_sym_cat = [] var_actual_cat = [] for var_type in variables.keys(): var_sym[var_type] = variables[var_type](cas.SX.sym( var_type, (variables[var_type].cat.shape))) var_sym_cat = cas.vertcat(var_sym_cat, var_sym[var_type].cat) var_actual_cat = cas.vertcat(var_actual_cat, variables[var_type].cat) columnized_list = outputs['vortex']['filament_list'] filament_list = vortex_filament_list.decolumnize(model_options, architecture, columnized_list) uind_sym = vortex_flow.get_induced_velocity_at_kite( model_options, filament_list, variables, architecture, kite) jac_sym = cas.jacobian(uind_sym, var_sym_cat) uind_fun = cas.Function('uind_fun', [var_sym_cat], [uind_sym]) jac_fun = cas.Function('jac_fun', [var_sym_cat], [jac_sym]) slope = jac_fun(lin_params) const = uind_fun(lin_params) uind_lin = cas.mtimes(slope, var_actual_cat) + const return uind_lin
def time_derivative(expr, variables, architecture): # notice that the the chain rule # df/dt = partial f/partial t # + (partial f/partial x1)(partial x1/partial t) # + (partial f/partial x2)(partial x2/partial t)... # doesn't care about the scaling of the component functions, as long as # the units of (partial xi) will cancel # it is here assumed that (partial f/partial t) = 0. vars_scaled = variables['scaled'] deriv = 0. # (partial f/partial xi) for variables with trivial derivatives deriv_vars = struct_op.subkeys(vars_scaled, 'xddot') for deriv_name in deriv_vars: deriv_type = struct_op.get_variable_type(variables['SI'], deriv_name) var_name = deriv_name[1:] var_type = struct_op.get_variable_type(variables['SI'], var_name) q_sym = vars_scaled[var_type, var_name] dq_sym = vars_scaled[deriv_type, deriv_name] deriv += cas.mtimes(cas.jacobian(expr, q_sym), dq_sym) # (partial f/partial xi) for kite rotation matrices kite_nodes = architecture.kite_nodes for kite in kite_nodes: parent = architecture.parent_map[kite] r_name = 'r{}{}'.format(kite, parent) kite_has_6dof = r_name in struct_op.subkeys(vars_scaled, 'xd') if kite_has_6dof: r_kite = vars_scaled['xd', r_name] dcm_kite = cas.reshape(r_kite, (3, 3)) omega = vars_scaled['xd', 'omega{}{}'.format(kite, parent)] dexpr_dr = cas.jacobian(expr, r_kite) dr_dt = cas.reshape( cas.mtimes(vect_op.skew(omega), cas.inv(dcm_kite.T)), (9, 1)) deriv += cas.mtimes(dexpr_dr, dr_dt) return deriv
def __poly_coeffs(self): """Compute coefficients of interpolating polynomials and their derivatives """ # discretization info nk = self.__n_k d = self.__d # choose collocation points tau_root = cas.vertcat(0.0, cas.collocation_points(d, self.__scheme)) # coefficients of the collocation equation coeff_collocation = np.zeros((d + 1, d + 1)) # coefficients of the continuity equation coeff_continuity = np.zeros(d + 1) # dimensionless time inside one control interval tau = cas.SX.sym('tau') # all collocation time points t = np.zeros((nk, d + 1)) for k in range(nk): for j in range(d + 1): t[k, j] = (k + tau_root[j]) # for all collocation points ls = [] for j in range(d + 1): # construct lagrange polynomials to get the polynomial basis at the # collocation point l = 1 for r in range(d + 1): if r != j: l *= (tau - tau_root[r]) / (tau_root[j] - tau_root[r]) lfcn = cas.Function('lfcn', [tau], [l]) ls = cas.vertcat(ls, l) # evaluate the polynomial at the final time to get the coefficients of # the continuity equation coeff_continuity[j] = lfcn([1.0]) # evaluate the time derivative of the polynomial at all collocation # points to get the coefficients of the continuity equation tfcn = cas.Function('lfcntan', [tau], [cas.jacobian(l, tau)]) for r in range(d + 1): coeff_collocation[j][r] = tfcn(tau_root[r]) # interpolating function for all polynomials lfcns = cas.Function('lfcns', [tau], [ls]) self.__coeff_continuity = coeff_continuity self.__coeff_collocation = coeff_collocation self.__coeff_fun = lfcns return None
def create_constraint_outputs(g_list, g_bounds, g_struct, V, P): g = g_struct(cas.vertcat(*g_list)) g_fun = cas.Function('g_fun',[V, P], [g.cat]) g_jacobian_fun = cas.Function('g_jacobian_fun',[V,P],[g.cat, cas.jacobian(g.cat, V.cat)]) g_bounds['lb'] = cas.vertcat(*g_bounds['lb']) g_bounds['ub'] = cas.vertcat(*g_bounds['ub']) return g, g_fun, g_jacobian_fun, g_bounds
def jacobian_dcm(expr, xd_si, variables_scaled, kite, parent): """ Differentiate expression w.r.t. kite direct cosine matrix""" dcm_si = xd_si['r{}{}'.format(kite, parent)] dcm_scaled = variables_scaled['xd', 'r{}{}'.format(kite, parent)] jac_dcm = rotation( cas.reshape(dcm_si, (3,3)), cas.reshape(cas.jacobian(expr, dcm_scaled), (3,3)) ).T return jac_dcm
def sosc_check(health_solver_options, nlp, solution, arg): logging.debug('sosc check...') V = nlp.V P = nlp.P V_vals = V(solution['x']) lambda_g_vals = solution['lam_g'] lambda_x_vals = solution['lam_x'] p_fix_num = nlp.P(arg['p']) logging.debug('get constraints...') [stacked_cstr_fun, stacked_lam_fun, stacked_labels] = collect_equality_and_active_inequality_constraints( health_solver_options, nlp, solution, arg) relevant_constraints = stacked_cstr_fun(V, P, arg['lbx'], arg['ubx']) logging.debug('get jacobian...') linearization = cas.jacobian(relevant_constraints, V) linearization_fun = cas.Function('linearization_fun', [V, P], [linearization]) linearization_eval = np.array(linearization_fun(V_vals, p_fix_num)) logging.debug('find the null space...') pdb.set_trace() null = vect_op.null( linearization_eval, health_solver_options['sosc']['reduced_hessian_null_tol']) logging.debug('make lagrangian') [lagr_fun, lagr_jacobian_fun, lagr_hessian_fun] = generate_lagrangian(health_solver_options, nlp, arg, solution) lagr_hessian = lagr_hessian_fun(V_vals, p_fix_num, lambda_g_vals, lambda_x_vals) logging.debug('get reduced hessian') reduced_hessian = cas.mtimes(cas.mtimes(null.T, lagr_hessian), null) logging.debug('rank check on reduced hessian') full_rank_check(health_solver_options, reduced_hessian, 'reduced hessian', 'SOSC')
def build_integrator(self, options, model): print('Building integrator...') # get model variables variables = model.variables # construct the DAE variables x = cas.struct_SX([cas.entry('xd', expr = variables['xd'])]) # differential states z = cas.struct_SX([cas.entry('xddot', expr = variables['xddot']), # state derivatives cas.entry('xa', expr = variables['xa']), # algebraic variables cas.entry('xl', expr = variables['xl']), # lifted variables ]) p = cas.struct_SX([cas.entry('u', expr = variables['u']), # dae parameters cas.entry('theta', expr = variables['theta']), cas.entry('phi', expr = model.parameters)]) # scale xddot with t_f time_scaled_variables = scale_xddot(variables) # model equations alg = model.dynamics(time_scaled_variables, model.parameters) ode = variables['xddot'] # create dae dae = {'x': x.cat, 'z': z.cat, 'p': p.cat, 'alg': alg,'ode': ode} # system dynamics f = cas.Function('f', [x, z, p], [ode, alg], ['x', 'z', 'p'], ['ode', 'alg']) # create integrator and rootfinder if cas.sprank(cas.jacobian(alg,z)) < z.cat.size()[0]: # check dae index raise ValueError('jacobian of dynamics is structurally rank-deficient: DAE is not of index 1!') else: # create integrator I = cas.integrator('I', options['integrator']['type'], dae, {'tf': 1.0 / self.__N_sim}) # create rootfinder g = cas.Function('g',[z.cat,x.cat,p.cat],[alg]) G = cas.rootfinder('G', 'newton', g, {'linear_solver': 'csparse'}) self.__integrator = I self.__rootfinder = G self.__variables_dict = model.variables_dict self.__phi = model.parameters self.__dae = dae self.__f = f self.__x = x self.__z = z self.__p = p
def licq_check(health_solver_options, nlp, solution, arg): V = nlp.V P = nlp.P p_fix_num = nlp.P(arg['p']) [stacked_cstr_fun, stacked_lam_fun, stacked_labels] = collect_equality_and_active_inequality_constraints( health_solver_options, nlp, solution, arg) relevant_constraints = stacked_cstr_fun(V, P, arg['lbx'], arg['ubx']) cstr_block = cas.jacobian(relevant_constraints, V) cstr_block_fun = cas.Function('cstr_block_fun', [V, P], [cstr_block]) cstr_block_eval = np.array(cstr_block_fun(V(solution['x']), p_fix_num)) full_rank_check(health_solver_options, cstr_block_eval, 'constraint block', 'LICQ')
def get_jacobian_of_eq_and_active_ineq_constraints(nlp, solution, arg, cstr_fun): awelogger.logger.info( 'compute jacobian of equality and active inequality constraints...') V = nlp.V P = nlp.P p_fix_num = nlp.P(arg['p']) relevant_constraints = cstr_fun(V, P, arg['lbx'], arg['ubx']) cstr_jacobian = cas.jacobian(relevant_constraints, V) cstr_jacobian_fun = cas.Function('cstr_jacobian_fun', [V, P], [cstr_jacobian]) cstr_jacobian_eval = np.array( cstr_jacobian_fun(V(solution['x']), p_fix_num)) return cstr_jacobian_eval
def __init__(self, variables, parameters, dynamics, integral_outputs_fun, param = 'sym'): """ Constructor. :type variables: casadi.tools.structure.ssymStruct :param variables: model variables :type parameters: casadi.tools.structure.ssymStruct :param parameters: model parameters :type dynamics: casadi.Function :param dynamics: fully implicit dae dynamics :type integral_outputs_fun: casadi.Function :param integral_outputs_fun: quadrature state dynamics :raises ValueError: if the DAE-index is higher than 1 :rtype: None """ # construct the DAE variables x, z, p = self.__build_dae_variables(variables, parameters, param) # model equations alg = dynamics(variables, parameters) ode = variables['theta','t_f']*variables['xddot'] quad = variables['theta','t_f']*integral_outputs_fun(variables, parameters) # create dae dictionary dae = {'x': x, 'z': z, 'p': p, 'alg': alg,'ode': ode, 'quad': quad} if cas.sprank(cas.jacobian(alg,z)) < z.cat.size()[0]: # check dae index raise ValueError('jacobian of dynamics is structurally rank-deficient: DAE is not of index 1!') self.__x = x self.__z = z self.__p = p self.__dae = dae return None
def __generate_discretization(self, nlp_options, model, formulation): awelogger.logger.info('discretize problem... ') timer = time.time() [ V, P, Xdot, Xdot_fun, ocp_cstr_list, Outputs, Outputs_fun, Integral_outputs, Integral_outputs_fun, time_grids, Collocation, Multiple_shooting ] = discretization.discretize(nlp_options, model, formulation) self.__timings['discretization'] = time.time() - timer self.__V = V self.__P = P self.__Xdot = Xdot self.__Xdot_fun = Xdot_fun self.__ocp_cstr_list = ocp_cstr_list self.__Outputs = Outputs self.__Outputs_fun = Outputs_fun self.__Integral_outputs = Integral_outputs self.__Integral_outputs_fun = Integral_outputs_fun self.__n_k = nlp_options['n_k'] self.__d = nlp_options['collocation']['d'] self.__options = nlp_options self.__discretization = nlp_options['discretization'] self.__time_grids = time_grids self.__Collocation = Collocation self.__Multiple_shooting = Multiple_shooting self.__g = ocp_cstr_list.get_expression_list('all') self.__g_fun = ocp_cstr_list.get_function(nlp_options, V, P, 'all') self.__g_jacobian_fun = cas.jacobian(self.__g, V) self.__g_bounds = { 'lb': ocp_cstr_list.get_lb('all'), 'ub': ocp_cstr_list.get_ub('all') } return None
def make_kkt_matrix_function(health_solver_options, nlp, solution, arg): V = nlp.V P = nlp.P lam_x_sym = cas.MX.sym('lam_x_sym', nlp.V.shape) lam_g_sym = cas.MX.sym('lam_g_sym', nlp.g.shape) [stacked_cstr_fun, stacked_lam_fun, stacked_labels] = collect_equality_and_active_inequality_constraints( health_solver_options, nlp, solution, arg) relevant_constraints = stacked_cstr_fun(V, P, arg['lbx'], arg['ubx']) [lagr_fun, lagr_jacobian_fun, lagr_hessian_fun] = generate_lagrangian(health_solver_options, nlp, arg, solution) lagr_block = lagr_hessian_fun(V, P, lam_g_sym, lam_x_sym) constraint_block = cas.jacobian(relevant_constraints, V) zeros_block = np.zeros( (constraint_block.shape[0], constraint_block.shape[0])) # kkt_matrix = # nabla^2 lagr nabla g^top nabla hA^top # nabla g 0 0 # nabla hA 0 0 kkt_matrix = cas.horzcat(lagr_block, constraint_block.T) kkt_matrix = cas.vertcat(kkt_matrix, cas.horzcat(constraint_block, zeros_block)) kkt_matrix_fun = cas.Function('kkt_matrix_fun', [V, P, lam_g_sym, lam_x_sym], [kkt_matrix]) return kkt_matrix_fun
def licq_check_on_equality_constraints(health_solver_options, nlp, solution, p_fix_num): V = nlp.V P = nlp.P [equality_constraints, eq_labels, eq_fun] = collect_equality_constraints(nlp) cstr_block = cas.jacobian(equality_constraints, V) cstr_block_fun = cas.Function('cstr_block_fun', [V, P], [cstr_block]) cstr_block_eval = np.array(cstr_block_fun(V(solution['x']), p_fix_num)) vect_op.spy(cstr_block_eval) poss_trouble_var = np.argmax(np.max(cstr_block_eval, axis=0)) print(poss_trouble_var) print((nlp.V.getCanonicalIndex(poss_trouble_var))) print((np.max(cstr_block_eval[:, poss_trouble_var]))) pdb.set_trace() full_rank_check(health_solver_options, cstr_block_eval, 'equality constraint block', 'LICQ')
def get_dynamics(options, atmos, wind, architecture, system_variables, system_gc, parameters, outputs): parent_map = architecture.parent_map number_of_nodes = architecture.number_of_nodes # generalized coordinates, velocities and accelerations generalized_coordinates = {} generalized_coordinates['scaled'] = generate_generalized_coordinates( system_variables['scaled'], system_gc) generalized_coordinates['SI'] = generate_generalized_coordinates( system_variables['SI'], system_gc) # ------------------------------- # mass distribution in the system # ------------------------------- node_masses_si, outputs = mass_comp.generate_m_nodes( options, system_variables['SI'], outputs, parameters, architecture) # -------------------------------- # lagrangian # -------------------------------- outputs = energy_comp.energy_outputs(options, parameters, outputs, node_masses_si, system_variables['SI'], architecture) e_kinetic = sum(outputs['e_kinetic'][nodes] for nodes in list(outputs['e_kinetic'].keys())) e_potential = sum(outputs['e_potential'][nodes] for nodes in list(outputs['e_potential'].keys())) holonomic_constraints, outputs, g, gdot, gddot = holonomic_comp.generate_holonomic_constraints( architecture, outputs, system_variables, parameters, options) # lagrangian function lag = e_kinetic - e_potential - holonomic_constraints # -------------------------------- # generalized forces in the system # -------------------------------- f_nodes, outputs = forces_comp.generate_f_nodes(options, atmos, wind, system_variables['SI'], parameters, outputs, architecture) outputs = forces_comp.generate_tether_moments(options, system_variables['SI'], system_variables['scaled'], holonomic_constraints, outputs, architecture) cstr_list = mdl_constraint.MdlConstraintList() # -------------------------------- # translational dynamics # -------------------------------- # lhs of lagrange equations dlagr_dqdot = cas.jacobian( lag, generalized_coordinates['scaled']['xgcdot'].cat).T dlagr_dqdot_dt = tools.time_derivative(dlagr_dqdot, system_variables, architecture) dlagr_dq = cas.jacobian(lag, generalized_coordinates['scaled']['xgc'].cat).T lagrangian_lhs_translation = dlagr_dqdot_dt - dlagr_dq lagrangian_lhs_constraints = holonomic_comp.get_constraint_lhs( g, gdot, gddot, parameters) # lagrangian momentum correction if options['tether']['use_wound_tether']: lagrangian_momentum_correction = 0. else: lagrangian_momentum_correction = momentum_correction( options, generalized_coordinates, system_variables, node_masses_si, outputs, architecture) # rhs of lagrange equations lagrangian_rhs_translation = cas.vertcat( *[f_nodes['f' + str(n) + str(parent_map[n])] for n in range(1, number_of_nodes)]) + \ lagrangian_momentum_correction lagrangian_rhs_constraints = np.zeros(g.shape) # scaling holonomic_scaling = holonomic_comp.generate_holonomic_scaling( options, architecture, system_variables['SI'], parameters) node_masses_scaling = mass_comp.generate_m_nodes_scaling( options, system_variables['SI'], outputs, parameters, architecture) forces_scaling = node_masses_scaling * options['scaling']['other'][ 'g'] * options['model_bounds']['acceleration']['acc_max'] dynamics_translation = (lagrangian_lhs_translation - lagrangian_rhs_translation) / forces_scaling dynamics_translation_cstr = cstr_op.Constraint(expr=dynamics_translation, cstr_type='eq', name='dynamics_translation') cstr_list.append(dynamics_translation_cstr) dynamics_constraints = (lagrangian_lhs_constraints - lagrangian_rhs_constraints) / holonomic_scaling dynamics_constraint_cstr = cstr_op.Constraint(expr=dynamics_constraints, cstr_type='eq', name='dynamics_constraint') cstr_list.append(dynamics_constraint_cstr) # -------------------------------- # rotational dynamics # -------------------------------- kite_has_6dof = (int(options['kite_dof']) == 6) if kite_has_6dof: rotation_dynamics_cstr, outputs = generate_rotational_dynamics( system_variables, f_nodes, parameters, outputs, architecture) cstr_list.append(rotation_dynamics_cstr) # -------------------------------- # trivial kinematics # -------------------------------- for name in system_variables['SI']['xddot'].keys(): name_in_xd = name in system_variables['SI']['xd'].keys() name_in_u = name in system_variables['SI']['u'].keys() if name_in_xd: trivial_dyn = cas.vertcat(*[ system_variables['scaled']['xddot', name] - system_variables['scaled']['xd', name] ]) elif name_in_u: trivial_dyn = cas.vertcat(*[ system_variables['scaled']['xddot', name] - system_variables['scaled']['u', name] ]) if name_in_xd or name_in_u: trivial_dyn_cstr = cstr_op.Constraint(expr=trivial_dyn, cstr_type='eq', name='trivial_' + name) cstr_list.append(trivial_dyn_cstr) return cstr_list, outputs