def get_primitives(el, *args): """ Out of a list of expression, retrieves all primitive expressions The result is sorted into a dictionary with the key originating from the 'shorthand' class attribute of OptimzationObject subclasses """ if len(args) == 0: dep = True else: dep = args[0] try: vars = C.symvar(C.veccat(*el)) except: vars = {} syms = dict() for i, v in enumerate(vars): mymapping = OptimizationObject.mapping vobj = mymapping[hash(v)] symkey = vobj.shorthand if symkey in syms: syms[symkey] = syms[symkey] + [vobj] else: syms[symkey] = [vobj] return syms
def get_dependency(expression): sym = symvar(expression) f = Function('f', sym, [expression]) dep = col.OrderedDict() for index, sym in enumerate(sym): J = f.sparsity_jac(index, 0) dep[sym] = sorted(set(J.T.row())) return dep
def get_dependency(expression): sym = symvar(expression) f = MXFunction('f', sym, [expression]) dep = {} for index, sym in enumerate(sym): J = f.jacSparsity(index, 0) dep[sym] = sorted(sumRows(J).find()) return dep
def get_derivative(self, s): # Case 1: s is a constant, e.g. MX(5) if ca.MX(s).is_constant(): return 0 # Case 2: s is a symbol, e.g. MX(x) elif s.is_symbolic(): if s.name() not in self.derivative: if len(self.for_loops) > 0 and s in self.for_loops[-1].indexed_symbols: # Create a new indexed symbol, referencing to the for loop index inside the vector derivative symbol. for_loop_symbol = self.for_loops[-1].indexed_symbols[s] s_without_index = self.get_mx(ast.ComponentRef(name=for_loop_symbol.tree.name)) der_s_without_index = self.get_derivative(s_without_index) if ca.MX(der_s_without_index).is_symbolic(): return self.get_indexed_symbol(ast.ComponentRef(name=der_s_without_index.name(), indices=for_loop_symbol.tree.indices), der_s_without_index) else: return 0 else: der_s = _new_mx("der({})".format(s.name()), s.size()) # If the derivative contains an expression (e.g. der(x + y)) this method is # called with MX variables that are the result of a ca.symvar call. This # ca.symvar call strips the _modelica_shape field from the MX variable, # therefore we need to find the original MX to get the modelica shape. der_s._modelica_shape = \ self.nodes[self.current_class][s.name()]._modelica_shape self.derivative[s.name()] = der_s self.nodes[self.current_class][der_s.name()] = der_s return der_s else: return self.derivative[s.name()] # Case 3: s is an already indexed symbol, e.g. MX(x[1]) elif s.is_op(ca.OP_GETNONZEROS) and s.dep().is_symbolic(): slice_info = s.info()['slice'] dep = s.dep() if dep.name() not in self.derivative: der_dep = _new_mx("der({})".format(dep.name()), dep.size()) der_dep._modelica_shape = \ self.nodes[self.current_class][dep.name()]._modelica_shape self.derivative[dep.name()] = der_dep self.nodes[self.current_class][der_dep.name()] = der_dep return der_dep[slice_info['start']:slice_info['stop']:slice_info['step']] else: return self.derivative[dep.name()][slice_info['start']:slice_info['stop']:slice_info['step']] # Case 4: s is an expression that requires differentiation, e.g. MX(x2 * x2) # Need to do this sort of expansion: der(x1 * x2) = der(x1) * x2 + x1 * der(x2) else: # Differentiate expression using CasADi orig_deps = ca.symvar(s) deps = ca.vertcat(*orig_deps) J = ca.Function('J', [deps], [ca.jacobian(s, deps)]) J_sparsity = J.sparsity_out(0) der_deps = [self.get_derivative(dep) if J_sparsity.has_nz(0, j) else ca.DM.zeros(dep.size()) for j, dep in enumerate(orig_deps)] return ca.mtimes(J(deps), ca.vertcat(*der_deps))
def _substitute_symbols(self, expr, variables, parameters): if isinstance(expr, (int, float)): return expr for sym in symvar(expr): [child, name] = self.symbol_dict[sym.getName()] if name in child._variables: expr = substitute(expr, sym, variables[child.label, name]) elif name in child._parameters: expr = substitute(expr, sym, parameters[child.label, name]) return expr
def _check_for_lineq(self): g = [] for con in self.constraints: lb, ub = con[1], con[2] g = vertcat([g, con[0] - lb]) if not isinstance(lb, np.ndarray): lb, ub = [lb], [ub] for k in range(len(lb)): if lb[k] != ub[k]: return False, None, None sym, jac = [], [] for child, q_i in self.q_i.items(): for name, ind in q_i.items(): var = child.get_variable(name, spline=False) jj = jacobian(g, var) jac = horzcat([jac, jj[:, ind]]) sym.append(var) for nghb in self.q_ij.keys(): for child, q_ij in self.q_ij[nghb].items(): for name, ind in q_ij.items(): var = child.get_variable(name, spline=False) jj = jacobian(g, var) jac = horzcat([jac, jj[:, ind]]) sym.append(var) for sym in symvar(jac): if sym not in self.par_i.values(): return False, None, None par = struct_symMX(self.par_struct) A, b = jac, -g for s in sym: A = substitute(A, s, np.zeros(s.shape)) b = substitute(b, s, np.zeros(s.shape)) dep_b = [s.getName() for s in symvar(b)] dep_A = [s.getName() for s in symvar(b)] for name, sym in self.par_i.items(): if sym.getName() in dep_b: b = substitute(b, sym, par[name]) if sym.getName() in dep_A: A = substitute(A, sym, par[name]) A = MXFunction('A', [par], [A]).expand() b = MXFunction('b', [par], [b]).expand() return True, A, b
def _check_for_lineq(self): g = [] for con in self.global_constraints: lb, ub = con[1], con[2] g = vertcat(g, con[0] - lb) if not isinstance(lb, np.ndarray): lb, ub = [lb], [ub] for k, _ in enumerate(lb): if lb[k] != ub[k]: return False, None, None sym, jac = [], [] for child, q_i in self.q_i.items(): for name, ind in q_i.items(): var = self.distr_problem.father.get_variables(child, name, spline=False, symbolic=True, substitute=False) jj = jacobian(g, var) jac = horzcat(jac, jj[:, ind]) sym.append(var) for nghb in self.q_ij.keys(): for child, q_ij in self.q_ij[nghb].items(): for name, ind in q_ij.items(): var = self.distr_problem.father.get_variables(child, name, spline=False, symbolic=True, substitute=False) jj = jacobian(g, var) jac = horzcat(jac, jj[:, ind]) sym.append(var) for sym in symvar(jac): if sym not in self.par_global.values(): return False, None, None par = struct_symMX(self.par_global_struct) A, b = jac, -g for s in sym: A = substitute(A, s, np.zeros(s.shape)) b = substitute(b, s, np.zeros(s.shape)) dep_b = [s.name() for s in symvar(b)] dep_A = [s.name() for s in symvar(b)] for name, sym in self.par_global.items(): if sym.name() in dep_b: b = substitute(b, sym, par[name]) if sym.name() in dep_A: A = substitute(A, sym, par[name]) A = Function('A', [par], [A]).expand() b = Function('b', [par], [b]).expand() return True, A, b
def _evaluate_symbols(self, expression, variables, parameters): symbols = symvar(expression) f = MXFunction(symbols, [expression]) f.init() f_in = [] for sym in symbols: [child, name] = self.symbol_dict[sym.getName()] if name in child._variables: f_in.append(variables[child.label, name]) elif name in child._parameters: f_in.append(parameters[child.label, name]) return evalf(f, f_in)[0]
def get_integer(self, tree: Union[ast.Primary, ast.ComponentRef, ast.Expression, ast.Slice]) -> Union[int, ca.MX, np.ndarray]: # CasADi needs to know the dimensions of symbols at instantiation. # We therefore need a mechanism to evaluate expressions that define dimensions of symbols. if isinstance(tree, ast.Primary): return None if tree.value is None else int(tree.value) if isinstance(tree, ast.ComponentRef): s = self.current_class.symbols[tree.name] assert (s.type.name == 'Integer') return self.get_integer(s.value) if isinstance(tree, ast.Expression): # Make sure that the expression has been converted to MX by (re)visiting the # relevant part of the AST. ast_walker = TreeWalker() ast_walker.walk(self, tree) # Obtain expression expr = self.get_mx(tree) # Obtain the symbols it depends on free_vars = ca.symvar(expr) # Find the values of the symbols vals = [] for free_var in free_vars: if free_var.is_symbolic(): if (len(self.for_loops) > 0) and (free_var.name() == self.for_loops[-1].name): vals.append(self.for_loops[-1].index_variable) else: vals.append(self.get_integer(self.current_class.symbols[free_var.name()].value)) # Evaluate the expression F = ca.Function('get_integer', free_vars, [expr]) ret = F.call(vals, *self.function_mode) if ret[0].is_constant(): # We managed to evaluate the expression. Assume the result to be integer. return int(ret[0]) else: # Expression depends on other symbols. Could not extract integer value. return ret[0] if isinstance(tree, ast.Slice): start = self.get_integer(tree.start) step = self.get_integer(tree.step) stop = self.get_integer(tree.stop) return slice(start, stop, step) else: raise Exception('Unexpected node type {}'.format(tree.__class__.__name__))
def exitForStatement(self, tree): logger.debug('exitForStatement') f = self.for_loops.pop() if len(f.values) > 0: indexed_symbols = list(f.indexed_symbols.keys()) args = [f.index_variable] + indexed_symbols expr = ca.vcat([ca.vec(self.get_mx(e.right)) for e in tree.statements]) free_vars = ca.symvar(expr) arg_names = [arg.name() for arg in args] free_vars = [e for e in free_vars if e.name() not in arg_names] all_args = args + free_vars F = ca.Function('loop_body', all_args, [expr]) indexed_symbols_full = [] for k in indexed_symbols: s = f.indexed_symbols[k] orig_symbol = self.nodes[self.current_class][s.tree.name] indexed_symbol = orig_symbol[s.indices] if s.transpose: indexed_symbol = ca.transpose(indexed_symbol) indexed_symbols_full.append(indexed_symbol) Fmap = F.map("map", self.map_mode, len(f.values), list( range(len(args), len(all_args))), []) res = Fmap.call([f.values] + indexed_symbols_full + free_vars) # Split into a list of statements variables = [assignment.left for statement in tree.statements for assignment in self.get_mx(statement)] all_assignments = [] for i in range(len(f.values)): for j, variable in enumerate(variables): all_assignments.append(Assignment(variable, res[0][j, i].T)) self.src[tree] = all_assignments else: self.src[tree] = []
def exitForEquation(self, tree): logger.debug('exitForEquation') f = self.for_loops.pop() if len(f.values) > 0: indexed_symbols = list(f.indexed_symbols.keys()) args = [f.index_variable] + indexed_symbols expr = ca.vcat([ca.vec(self.get_mx(e)) for e in tree.equations]) free_vars = ca.symvar(expr) arg_names = [arg.name() for arg in args] free_vars = [e for e in free_vars if e.name() not in arg_names] all_args = args + free_vars F = ca.Function('loop_body', all_args, [expr]) indexed_symbols_full = [] for k in indexed_symbols: s = f.indexed_symbols[k] indices = s.indices try: i = self.model.delay_states.index(k.name()) except ValueError: orig_symbol = self.nodes[self.current_class][s.tree.name] else: # We are missing a similarly shaped delayed symbol. Make a new one with the appropriate shape. delay_symbol = self.model.delay_arguments[i] # We need to figure out the shape of the expression that # we are delaying. The symbols that can occur in the delay # expression should have been encountered before this # iteration of the loop. The assert statement below covers # this. delay_expr_args = free_vars + all_args[:len(indexed_symbols_full)+1] assert set(ca.symvar(delay_symbol.expr)).issubset(delay_expr_args) f_delay_expr = ca.Function('delay_expr', delay_expr_args, [delay_symbol.expr]) f_delay_map = f_delay_expr.map("map", self.map_mode, len(f.values), list( range(len(free_vars))), []) [res] = f_delay_map.call(free_vars + [f.values] + indexed_symbols_full) res = res.T # Make the symbol with the appropriate size, and replace the old symbol with the new one. orig_symbol = _new_mx(k.name(), *res.size()) assert res.size1() == 1 or res.size2() == 1, "Slicing does not yet work with 2-D indices" indices = slice(None, None) model_input = next(x for x in self.model.inputs if x.symbol.name() == k.name()) model_input.symbol = orig_symbol self.model.delay_arguments[i] = DelayArgument(res, delay_symbol.duration) indexed_symbol = orig_symbol[indices] if s.transpose: indexed_symbol = ca.transpose(indexed_symbol) indexed_symbols_full.append(indexed_symbol) Fmap = F.map("map", self.map_mode, len(f.values), list( range(len(args), len(all_args))), []) res = Fmap.call([f.values] + indexed_symbols_full + free_vars) self.src[tree] = res[0].T else: self.src[tree] = ca.MX()
def exitExpression(self, tree): if isinstance(tree.operator, ast.ComponentRef): op = tree.operator.name else: op = tree.operator if op == '*': op = 'mtimes' # .* differs from * if op.startswith('.'): op = op[1:] logger.debug('exitExpression') n_operands = len(tree.operands) if op == 'der': v = self.get_mx(tree.operands[0]) src = self.get_derivative(v) elif op == '-' and n_operands == 1: src = -self.get_mx(tree.operands[0]) elif op == 'not' and n_operands == 1: src = ca.if_else(self.get_mx(tree.operands[0]), 0, 1, True) elif op == 'mtimes': assert n_operands >= 2 src = self.get_mx(tree.operands[0]) for i in tree.operands[1:]: src = ca.mtimes(src, self.get_mx(i)) elif op == 'transpose' and n_operands == 1: src = self.get_mx(tree.operands[0]).T elif op == 'sum' and n_operands == 1: v = self.get_mx(tree.operands[0]) src = ca.sum1(v) elif op == 'linspace' and n_operands == 3: a = self.get_mx(tree.operands[0]) b = self.get_mx(tree.operands[1]) n_steps = self.get_integer(tree.operands[2]) src = ca.linspace(a, b, n_steps) elif op == 'fill' and n_operands == 2: val = self.get_mx(tree.operands[0]) n_row = self.get_integer(tree.operands[1]) src = val * ca.DM.ones(n_row) elif op == 'fill' and n_operands == 3: val = self.get_mx(tree.operands[0]) n_row = self.get_integer(tree.operands[1]) n_col = self.get_integer(tree.operands[2]) src = val * ca.DM.ones(n_row, n_col) elif op == 'zeros' and n_operands == 1: n_row = self.get_integer(tree.operands[0]) src = ca.DM.zeros(n_row) elif op == 'zeros' and n_operands == 2: n_row = self.get_integer(tree.operands[0]) n_col = self.get_integer(tree.operands[1]) src = ca.DM.zeros(n_row, n_col) elif op == 'ones' and n_operands == 1: n_row = self.get_integer(tree.operands[0]) src = ca.DM.ones(n_row) elif op == 'ones' and n_operands == 2: n_row = self.get_integer(tree.operands[0]) n_col = self.get_integer(tree.operands[1]) src = ca.DM.ones(n_row, n_col) elif op == 'identity' and n_operands == 1: n = self.get_integer(tree.operands[0]) src = ca.DM.eye(n) elif op == 'diagonal' and n_operands == 1: diag = self.get_mx(tree.operands[0]) n = len(diag) indices = list(range(n)) src = ca.DM.triplet(indices, indices, diag, n, n) elif op == 'cat': axis = self.get_integer(tree.operands[0]) assert axis == 1, "Currently only concatenation on first axis is supported" entries = [] for sym in [self.get_mx(op) for op in tree.operands[1:]]: if isinstance(sym, list): for e in sym: entries.append(e) else: entries.append(sym) src = ca.vertcat(*entries) elif op == 'delay' and n_operands == 2: expr = self.get_mx(tree.operands[0]) duration = self.get_mx(tree.operands[1]) src = _new_mx('_pymoca_delay_{}'.format(self.delay_counter), *expr.size()) self.delay_counter += 1 for f in self.for_loops: syms = set(ca.symvar(expr)) if syms.intersection(f.indexed_symbols): f.register_indexed_symbol(src, lambda i: i, True, tree.operands[0], f.index_variable) self.model.delay_states.append(src.name()) self.model.inputs.append(Variable(src)) delay_argument = DelayArgument(expr, duration) self.model.delay_arguments.append(delay_argument) elif op == '_pymoca_interp1d' and n_operands >= 3 and n_operands <= 4: entered_class = self.entered_classes[-1] if isinstance(tree.operands[0], ast.ComponentRef): xp = self.get_mx(entered_class.symbols[tree.operands[0].name].value) else: xp = self.get_mx(tree.operands[0]) if isinstance(tree.operands[1], ast.ComponentRef): yp = self.get_mx(entered_class.symbols[tree.operands[1].name].value) else: yp = self.get_mx(tree.operands[1]) arg = self.get_mx(tree.operands[2]) if n_operands == 4: assert isinstance(tree.operands[3], ast.Primary) mode = tree.operands[3].value else: mode = 'linear' func = ca.interpolant('interpolant', mode, [xp], yp) src = func(arg) elif op == '_pymoca_interp2d' and n_operands >= 5 and n_operands <= 6: entered_class = self.entered_classes[-1] if isinstance(tree.operands[0], ast.ComponentRef): xp = self.get_mx(entered_class.symbols[tree.operands[0].name].value) else: xp = self.get_mx(tree.operands[0]) if isinstance(tree.operands[1], ast.ComponentRef): yp = self.get_mx(entered_class.symbols[tree.operands[1].name].value) else: yp = self.get_mx(tree.operands[1]) if isinstance(tree.operands[2], ast.ComponentRef): zp = self.get_mx(entered_class.symbols[tree.operands[2].name].value) else: zp = self.get_mx(tree.operands[2]) arg_1 = self.get_mx(tree.operands[3]) arg_2 = self.get_mx(tree.operands[4]) if n_operands == 6: assert isinstance(tree.operands[5], ast.Primary) mode = tree.operands[5].value else: mode = 'linear' func = ca.interpolant('interpolant', mode, [xp, yp], np.array(zp).ravel(order='F')) src = func(ca.vertcat(arg_1, arg_2)) elif op in OP_MAP and n_operands == 2: lhs = ca.MX(self.get_mx(tree.operands[0])) rhs = ca.MX(self.get_mx(tree.operands[1])) lhs_op = getattr(lhs, OP_MAP[op]) src = lhs_op(rhs) elif op in OP_MAP and n_operands == 1: lhs = ca.MX(self.get_mx(tree.operands[0])) lhs_op = getattr(lhs, OP_MAP[op]) src = lhs_op() else: src = ca.MX(self.get_mx(tree.operands[0])) # Check for built-in operations, such as the # elementary functions, first. if hasattr(src, op) and n_operands <= 2: if n_operands == 1: src = ca.MX(self.get_mx(tree.operands[0])) src = getattr(src, op)() else: lhs = ca.MX(self.get_mx(tree.operands[0])) rhs = ca.MX(self.get_mx(tree.operands[1])) lhs_op = getattr(lhs, op) src = lhs_op(rhs) else: func = self.get_function(op) src = ca.vertcat(*func.call([self.get_mx(operand) for operand in tree.operands], *self.function_mode)) self.src[tree] = src
def get_derivative(self, s): # Case 1: s is a constant, e.g. MX(5) if ca.MX(s).is_constant(): return 0 # Case 2: s is a symbol, e.g. MX(x) elif s.is_symbolic(): if s.name() not in self.derivative: if len(self.for_loops ) > 0 and s in self.for_loops[-1].indexed_symbols: # Create a new indexed symbol, referencing to the for loop index inside the vector derivative symbol. for_loop_symbol = self.for_loops[-1].indexed_symbols[s] s_without_index = self.get_mx( ast.ComponentRef(name=for_loop_symbol.tree.name)) der_s_without_index = self.get_derivative(s_without_index) if ca.MX(der_s_without_index).is_symbolic(): return self.get_indexed_symbol( ast.ComponentRef( name=der_s_without_index.name(), indices=for_loop_symbol.tree.indices), der_s_without_index) else: return 0 else: der_s = _new_mx("der({})".format(s.name()), s.size()) # If the derivative contains an expression (e.g. der(x + y)) this method is # called with MX variables that are the result of a ca.symvar call. This # ca.symvar call strips the _modelica_shape field from the MX variable, # therefore we need to find the original MX to get the modelica shape. der_s._modelica_shape = \ self.nodes[self.current_class][s.name()]._modelica_shape self.derivative[s.name()] = der_s self.nodes[self.current_class][der_s.name()] = der_s return der_s else: return self.derivative[s.name()] # Case 3: s is an already indexed symbol, e.g. MX(x[1]) elif s.is_op(ca.OP_GETNONZEROS) and s.dep().is_symbolic(): slice_info = s.info()['slice'] dep = s.dep() if dep.name() not in self.derivative: der_dep = _new_mx("der({})".format(dep.name()), dep.size()) der_dep._modelica_shape = \ self.nodes[self.current_class][dep.name()]._modelica_shape self.derivative[dep.name()] = der_dep self.nodes[self.current_class][der_dep.name()] = der_dep return der_dep[ slice_info['start']:slice_info['stop']:slice_info['step']] else: return self.derivative[dep.name( )][slice_info['start']:slice_info['stop']:slice_info['step']] # Case 4: s is an expression that requires differentiation, e.g. MX(x2 * x2) # Need to do this sort of expansion: der(x1 * x2) = der(x1) * x2 + x1 * der(x2) else: # Differentiate expression using CasADi orig_deps = ca.symvar(s) deps = ca.vertcat(*orig_deps) J = ca.Function('J', [deps], [ca.jacobian(s, deps)]) J_sparsity = J.sparsity_out(0) der_deps = [ self.get_derivative(dep) if J_sparsity.has_nz(0, j) else ca.DM.zeros(dep.size()) for j, dep in enumerate(orig_deps) ] return ca.mtimes(J(deps), ca.vertcat(*der_deps))
def exitForEquation(self, tree): logger.debug('exitForEquation') f = self.for_loops.pop() if len(f.values) > 0: indexed_symbols = list(f.indexed_symbols.keys()) args = [f.index_variable] + indexed_symbols expr = ca.vcat([ca.vec(self.get_mx(e)) for e in tree.equations]) free_vars = ca.symvar(expr) arg_names = [arg.name() for arg in args] free_vars = [e for e in free_vars if e.name() not in arg_names] all_args = args + free_vars F = ca.Function('loop_body', all_args, [expr]) indexed_symbols_full = [] for k in indexed_symbols: s = f.indexed_symbols[k] indices = s.indices try: i = self.model.delay_states.index(k.name()) except ValueError: orig_symbol = self.nodes[self.current_class][s.tree.name] else: # We are missing a similarly shaped delayed symbol. Make a new one with the appropriate shape. delay_symbol = self.model.delay_arguments[i] # We need to figure out the shape of the expression that # we are delaying. The symbols that can occur in the delay # expression should have been encountered before this # iteration of the loop. The assert statement below covers # this. delay_expr_args = free_vars + all_args[:len( indexed_symbols_full) + 1] assert set(ca.symvar( delay_symbol.expr)).issubset(delay_expr_args) f_delay_expr = ca.Function('delay_expr', delay_expr_args, [delay_symbol.expr]) f_delay_map = f_delay_expr.map("map", self.map_mode, len(f.values), list(range(len(free_vars))), []) [res] = f_delay_map.call(free_vars + [f.values] + indexed_symbols_full) res = res.T # Make the symbol with the appropriate size, and replace the old symbol with the new one. orig_symbol = _new_mx(k.name(), *res.size()) assert res.size1() == 1 or res.size2( ) == 1, "Slicing does not yet work with 2-D indices" indices = slice(None, None) model_input = next(x for x in self.model.inputs if x.symbol.name() == k.name()) model_input.symbol = orig_symbol self.model.delay_arguments[i] = DelayArgument( res, delay_symbol.duration) indexed_symbol = orig_symbol[indices] if s.transpose: indexed_symbol = ca.transpose(indexed_symbol) indexed_symbols_full.append(indexed_symbol) Fmap = F.map("map", self.map_mode, len(f.values), list(range(len(args), len(all_args))), []) res = Fmap.call([f.values] + indexed_symbols_full + free_vars) self.src[tree] = res[0].T else: self.src[tree] = ca.MX()
def exitExpression(self, tree): if isinstance(tree.operator, ast.ComponentRef): op = tree.operator.name else: op = tree.operator if op == '*': op = 'mtimes' # .* differs from * if op.startswith('.'): op = op[1:] logger.debug('exitExpression') n_operands = len(tree.operands) if op == 'der': v = self.get_mx(tree.operands[0]) src = self.get_derivative(v) elif op == '-' and n_operands == 1: src = -self.get_mx(tree.operands[0]) elif op == 'not' and n_operands == 1: src = ca.if_else(self.get_mx(tree.operands[0]), 0, 1, True) elif op == 'mtimes': assert n_operands >= 2 src = self.get_mx(tree.operands[0]) for i in tree.operands[1:]: src = ca.mtimes(src, self.get_mx(i)) elif op == 'transpose' and n_operands == 1: src = self.get_mx(tree.operands[0]).T elif op == 'sum' and n_operands == 1: v = self.get_mx(tree.operands[0]) src = ca.sum1(v) elif op == 'linspace' and n_operands == 3: a = self.get_mx(tree.operands[0]) b = self.get_mx(tree.operands[1]) n_steps = self.get_integer(tree.operands[2]) src = ca.linspace(a, b, n_steps) elif op == 'fill' and n_operands == 2: val = self.get_mx(tree.operands[0]) n_row = self.get_integer(tree.operands[1]) src = val * ca.DM.ones(n_row) elif op == 'fill' and n_operands == 3: val = self.get_mx(tree.operands[0]) n_row = self.get_integer(tree.operands[1]) n_col = self.get_integer(tree.operands[2]) src = val * ca.DM.ones(n_row, n_col) elif op == 'zeros' and n_operands == 1: n_row = self.get_integer(tree.operands[0]) src = ca.DM.zeros(n_row) elif op == 'zeros' and n_operands == 2: n_row = self.get_integer(tree.operands[0]) n_col = self.get_integer(tree.operands[1]) src = ca.DM.zeros(n_row, n_col) elif op == 'ones' and n_operands == 1: n_row = self.get_integer(tree.operands[0]) src = ca.DM.ones(n_row) elif op == 'ones' and n_operands == 2: n_row = self.get_integer(tree.operands[0]) n_col = self.get_integer(tree.operands[1]) src = ca.DM.ones(n_row, n_col) elif op == 'identity' and n_operands == 1: n = self.get_integer(tree.operands[0]) src = ca.DM.eye(n) elif op == 'diagonal' and n_operands == 1: diag = self.get_mx(tree.operands[0]) n = len(diag) indices = list(range(n)) src = ca.DM.triplet(indices, indices, diag, n, n) elif op == 'cat': axis = self.get_integer(tree.operands[0]) assert axis == 1, "Currently only concatenation on first axis is supported" entries = [] for sym in [self.get_mx(op) for op in tree.operands[1:]]: if isinstance(sym, list): for e in sym: entries.append(e) else: entries.append(sym) src = ca.vertcat(*entries) elif op == 'delay' and n_operands == 2: expr = self.get_mx(tree.operands[0]) duration = self.get_mx(tree.operands[1]) src = _new_mx('_pymoca_delay_{}'.format(self.delay_counter), *expr.size()) self.delay_counter += 1 for f in self.for_loops: syms = set(ca.symvar(expr)) if syms.intersection(f.indexed_symbols): f.register_indexed_symbol(src, lambda i: i, True, tree.operands[0], f.index_variable) self.model.delay_states.append(src.name()) self.model.inputs.append(Variable(src)) delay_argument = DelayArgument(expr, duration) self.model.delay_arguments.append(delay_argument) elif op == '_pymoca_interp1d' and n_operands >= 3 and n_operands <= 4: entered_class = self.entered_classes[-1] if isinstance(tree.operands[0], ast.ComponentRef): xp = self.get_mx( entered_class.symbols[tree.operands[0].name].value) else: xp = self.get_mx(tree.operands[0]) if isinstance(tree.operands[1], ast.ComponentRef): yp = self.get_mx( entered_class.symbols[tree.operands[1].name].value) else: yp = self.get_mx(tree.operands[1]) arg = self.get_mx(tree.operands[2]) if n_operands == 4: assert isinstance(tree.operands[3], ast.Primary) mode = tree.operands[3].value else: mode = 'linear' func = ca.interpolant('interpolant', mode, [xp], yp) src = func(arg) elif op == '_pymoca_interp2d' and n_operands >= 5 and n_operands <= 6: entered_class = self.entered_classes[-1] if isinstance(tree.operands[0], ast.ComponentRef): xp = self.get_mx( entered_class.symbols[tree.operands[0].name].value) else: xp = self.get_mx(tree.operands[0]) if isinstance(tree.operands[1], ast.ComponentRef): yp = self.get_mx( entered_class.symbols[tree.operands[1].name].value) else: yp = self.get_mx(tree.operands[1]) if isinstance(tree.operands[2], ast.ComponentRef): zp = self.get_mx( entered_class.symbols[tree.operands[2].name].value) else: zp = self.get_mx(tree.operands[2]) arg_1 = self.get_mx(tree.operands[3]) arg_2 = self.get_mx(tree.operands[4]) if n_operands == 6: assert isinstance(tree.operands[5], ast.Primary) mode = tree.operands[5].value else: mode = 'linear' func = ca.interpolant('interpolant', mode, [xp, yp], np.array(zp).ravel(order='F')) src = func(ca.vertcat(arg_1, arg_2)) elif op in OP_MAP and n_operands == 2: lhs = ca.MX(self.get_mx(tree.operands[0])) rhs = ca.MX(self.get_mx(tree.operands[1])) lhs_op = getattr(lhs, OP_MAP[op]) src = lhs_op(rhs) elif op in OP_MAP and n_operands == 1: lhs = ca.MX(self.get_mx(tree.operands[0])) lhs_op = getattr(lhs, OP_MAP[op]) src = lhs_op() else: src = ca.MX(self.get_mx(tree.operands[0])) # Check for built-in operations, such as the # elementary functions, first. if hasattr(src, op) and n_operands <= 2: if n_operands == 1: src = ca.MX(self.get_mx(tree.operands[0])) src = getattr(src, op)() else: lhs = ca.MX(self.get_mx(tree.operands[0])) rhs = ca.MX(self.get_mx(tree.operands[1])) lhs_op = getattr(lhs, op) src = lhs_op(rhs) else: func = self.get_function(op) src = ca.vertcat(*func.call( [self.get_mx(operand) for operand in tree.operands], *self.function_mode)) self.src[tree] = src
def add_to_dict(self, symbol, child, name): for sym in symvar(symbol): self.symbol_dict[sym.getName()] = [child, name]
def save_model(model_folder: str, model_name: str, model: Model, compiler_options: Dict[str, str]) -> None: """ Saves a CasADi model to disk. :param model_folder: Folder where the precompiled CasADi model will be stored. :param model_name: Name of the model. :param model: Model instance. :param compiler_options: Dictionary of compiler options. """ objects = {'dae_residual': None, 'initial_residual': None, 'variable_metadata': None, 'delay_arguments': None} for o in objects.keys(): f = getattr(model, o + '_function') if compiler_options.get('codegen', False): objects[o] = _codegen_model(model_folder, f, '{}_{}'.format(model_name, o)) else: objects[o] = f # Output metadata db_file = os.path.join(model_folder, model_name + ".pymoca_cache") with open(db_file, 'wb') as f: db = {} # Store version db['version'] = __version__ # Include references to the shared libraries (codegen) or pickled functions (cache) db.update(objects) db['library_os'] = os.name db['options'] = compiler_options # Describe variables per category for key in ['states', 'der_states', 'alg_states', 'inputs', 'parameters', 'constants']: db[key] = [e.to_dict() for e in getattr(model, key)] # Caching using CasADi functions will lead to constants seemingly # depending on MX variables. Figuring out that they do not is slow, # especially when doing it on a lazy function call, as would be the # case when reading from cache. So instead, we do the depency check # once when saving the model. # Metadata dependency checking parameter_vector = ca.veccat(*[v.symbol for v in model.parameters]) for k, key in enumerate(['states', 'alg_states', 'inputs', 'parameters', 'constants']): metadata_shape = (len(getattr(model, key)), len(CASADI_ATTRIBUTES)) m = db[key + "__metadata_dependent"] = np.zeros(metadata_shape, dtype=bool) for i, v in enumerate(getattr(model, key)): for j, tmp in enumerate(CASADI_ATTRIBUTES): attr = getattr(v, tmp) if (isinstance(attr, ca.MX) and not attr.is_constant() and ca.depends_on(attr, parameter_vector)): m[i, j] = True # Delay dependency checking if model.delay_states: all_symbols = [model.time, *model._symbols(model.states), *model._symbols(model.der_states), *model._symbols(model.alg_states), *model._symbols(model.inputs), *model._symbols(model.constants), *model._symbols(model.parameters)] symbol_to_index = {x: i for i, x in enumerate(all_symbols)} expressions, durations = zip(*model.delay_arguments) duration_dependencies = [] for dur in durations: duration_dependencies.append( [symbol_to_index[var] for var in ca.symvar(dur) if ca.depends_on(dur, var)]) db['__delay_duration_dependent'] = duration_dependencies db['outputs'] = model.outputs db['delay_states'] = model.delay_states db['alias_relation'] = model.alias_relation pickle.dump(db, f, protocol=-1)
def add_to_dict(self, symbol, name): for sym in symvar(symbol): if sym in self.symbol_dict: raise ValueError('Symbol already added for %s' % self.label) self.symbol_dict[sym.name()] = [self, name]
def _construct_upd_z_nlp(self): # construct variables self._var_struct_updz = struct([entry('z_i', struct=self.q_i_struct), entry('z_ij', struct=self.q_ij_struct)]) var = struct_symMX(self._var_struct_updz) z_i = self.q_i_struct(var['z_i']) z_ij = self.q_ij_struct(var['z_ij']) # construct parameters self._par_struct_updz = struct([entry('x_i', struct=self.q_i_struct), entry('x_j', struct=self.q_ij_struct), entry('l_i', struct=self.q_i_struct), entry('l_ij', struct=self.q_ij_struct), entry('t'), entry('T'), entry('rho'), entry('par', struct=self.par_struct)]) par = struct_symMX(self._par_struct_updz) x_i, x_j = self.q_i_struct(par['x_i']), self.q_ij_struct(par['x_j']) l_i, l_ij = self.q_i_struct(par['l_i']), self.q_ij_struct(par['l_ij']) t, T, rho = par['t'], par['T'], par['rho'] t0 = t/T # transform spline variables: only consider future piece of spline tf = lambda cfs, basis: shift_knot1_fwd(cfs, basis, t0) self._transform_spline([x_i, z_i, l_i], tf, self.q_i) self._transform_spline([x_j, z_ij, l_ij], tf, self.q_ij) # construct constraints constraints, lb, ub = [], [], [] for con in self.constraints: c = con[0] for sym in symvar(c): for label, child in self.group.items(): if sym.getName() in child.symbol_dict: name = child.symbol_dict[sym.getName()][1] v = z_i[label, name] ind = self.q_i[child][name] sym2 = MX.zeros(sym.size()) sym2[ind] = v sym2 = reshape(sym2, sym.shape) c = substitute(c, sym, sym2) break for nghb in self.q_ij.keys(): for label, child in nghb.group.items(): if sym.getName() in child.symbol_dict: name = child.symbol_dict[sym.getName()][1] v = z_ij[nghb.label, label, name] ind = self.q_ij[nghb][child][name] sym2 = MX.zeros(sym.size()) sym2[ind] = v sym2 = reshape(sym2, sym.shape) c = substitute(c, sym, sym2) break for name, s in self.par_i.items(): if s.getName() == sym.getName(): c = substitute(c, sym, par['par', name]) constraints.append(c) lb.append(con[1]) ub.append(con[2]) self.lb_updz, self.ub_updz = lb, ub # construct objective obj = 0. for child, q_i in self.q_i.items(): for name in q_i.keys(): x = x_i[child.label, name] z = z_i[child.label, name] l = l_i[child.label, name] obj += mul(l.T, x-z) + 0.5*rho*mul((x-z).T, (x-z)) for nghb in self.q_ij.keys(): for child, q_ij in self.q_ij[nghb].items(): for name in q_ij.keys(): x = x_j[str(nghb), child.label, name] z = z_ij[str(nghb), child.label, name] l = l_ij[str(nghb), child.label, name] obj += mul(l.T, x-z) + 0.5*rho*mul((x-z).T, (x-z)) # construct problem prob, compile_time = self.father.create_nlp(var, par, obj, constraints, self.options) self.problem_upd_z = prob
def load_model(model_folder: str, model_name: str, compiler_options: Dict[str, str]) -> CachedModel: """ Loads a precompiled CasADi model into a CachedModel instance. :param model_folder: Folder where the precompiled CasADi model is located. :param model_name: Name of the model. :param compiler_options: Dictionary of compiler options. :returns: CachedModel instance. """ compiler_options = _merge_default_options(compiler_options) db_file = os.path.join(model_folder, model_name + ".pymoca_cache") if compiler_options['mtime_check']: # Mtime check cache_mtime = os.path.getmtime(db_file) for folder in [model_folder] + compiler_options['library_folders']: for root, dir, files in os.walk(folder, followlinks=True): for item in fnmatch.filter(files, "*.mo"): filename = os.path.join(root, item) if os.path.getmtime(filename) > cache_mtime: raise InvalidCacheError("Cache out of date") # Create empty model object model = CachedModel() # Load metadata with open(db_file, 'rb') as f: try: db = pickle.load(f) except RuntimeError as e: if "DeserializingStream" in str(e): raise InvalidCacheError( 'Cache generated for incompatible CasADi version') else: raise if db['version'] != __version__: raise InvalidCacheError( 'Cache generated for a different version of pymoca') # Check compiler options. We ignore the library folders, as they have # already been checked, and checking them will impede platform # portability of the cache. exclude_options = ['library_folders'] old_opts = { k: v for k, v in db['options'].items() if k not in exclude_options } new_opts = { k: v for k, v in compiler_options.items() if k not in exclude_options } if old_opts != new_opts: raise InvalidCacheError( 'Cache generated for different compiler options') # Pickles are platform independent, but dynamic libraries are not if compiler_options['codegen']: if db['library_os'] != os.name: raise InvalidCacheError('Cache generated for incompatible OS') # Include references to the shared libraries for o in [ 'dae_residual', 'initial_residual', 'variable_metadata', 'delay_arguments' ]: if isinstance(db[o], str): # Path to codegen'd library f = ca.external(o, db[o]) else: # Pickled CasADi Function; use as is assert isinstance(db[o], ca.Function) f = db[o] setattr(model, '_' + o + '_function', f) # Load variables per category variables_with_metadata = [ 'states', 'alg_states', 'inputs', 'parameters', 'constants' ] variable_dict = {} for key in variables_with_metadata: variables = getattr(model, key) for i, d in enumerate(db[key]): variable = Variable.from_dict(d) variables.append(variable) variable_dict[variable.symbol.name()] = variable model.der_states = [Variable.from_dict(d) for d in db['der_states']] model.outputs = db['outputs'] model.delay_states = db['delay_states'] model.alias_relation = db['alias_relation'] # Evaluate variable metadata: parameter_vector = ca.veccat(*[v.symbol for v in model.parameters]) metadata = dict( zip(variables_with_metadata, model.variable_metadata_function(parameter_vector))) independent_metadata = dict( zip(variables_with_metadata, (ca.MX(x) for x in model.variable_metadata_function( ca.veccat(*[np.nan for v in model.parameters]))))) for k, key in enumerate(variables_with_metadata): m = db[key + "__metadata_dependent"] for i, d in enumerate(db[key]): variable = variable_dict[d['name']] for j, tmp in enumerate(CASADI_ATTRIBUTES): if m[i, j] == _DepMeta.MX_DEPENDENT: setattr(variable, tmp, metadata[key][i, j]) elif m[i, j] == _DepMeta.MX_INDEPENDENT: setattr(variable, tmp, independent_metadata[key][i, j]) else: # Already handled as part of Variable dict. That way # we also do not have to worry about making sure the # type of the cached model is exactly the same, as # pickling ensures that. pass # Evaluate delay arguments: if model.delay_states: args = [ model.time, ca.veccat(*model._symbols(model.states)), ca.veccat(*model._symbols(model.der_states)), ca.veccat(*model._symbols(model.alg_states)), ca.veccat(*model._symbols(model.inputs)), ca.veccat(*model._symbols(model.constants)), ca.veccat(*model._symbols(model.parameters)) ] delay_arguments_raw = model.delay_arguments_function(*args) nan_args = [ca.repmat(np.nan, *arg.size()) for arg in args] independent_delay_arguments_raw = model.delay_arguments_function( *nan_args) delay_expressions_raw = delay_arguments_raw[::2] delay_durations_raw = delay_arguments_raw[1::2] independent_delay_durations_raw = independent_delay_arguments_raw[ 1::2] assert 1 == len({ len(delay_expressions_raw), len(delay_durations_raw), len(independent_delay_durations_raw) }) all_symbols = [ model.time, *model._symbols(model.states), *model._symbols(model.der_states), *model._symbols(model.alg_states), *model._symbols(model.inputs), *model._symbols(model.constants), *model._symbols(model.parameters) ] duration_dependencies = db['__delay_duration_dependent'] # Get rid of false dependency symbols not used in any delay # durations. This significantly reduces the work the (slow) # substitute() calls have to do later on. actual_deps = sorted(set(np.array(duration_dependencies).ravel())) actual_dep_symbols = [np.nan] * len(all_symbols) for i in actual_deps: actual_dep_symbols[i] = all_symbols[i] delay_durations_simplified = ca.Function( 'replace_false_deps', all_symbols, delay_durations_raw).call(actual_dep_symbols) # Get rid of remaining hidden dependencies in the delay durations for i, expr in enumerate(delay_expressions_raw): if duration_dependencies[i]: dur = delay_durations_simplified[i] if len(duration_dependencies[i]) < len(actual_deps): deps = set(ca.symvar(dur)) actual_deps = { all_symbols[j] for j in duration_dependencies[i] } false_deps = deps - actual_deps if false_deps: [dur] = ca.substitute([dur], list(false_deps), [np.nan] * len(false_deps)) else: # Already removed all false dependencies pass else: dur = independent_delay_durations_raw[i] model.delay_arguments.append(DelayArgument(expr, dur)) # Done return model
def load_model(model_folder: str, model_name: str, compiler_options: Dict[str, str]) -> CachedModel: """ Loads a precompiled CasADi model into a CachedModel instance. :param model_folder: Folder where the precompiled CasADi model is located. :param model_name: Name of the model. :param compiler_options: Dictionary of compiler options. :returns: CachedModel instance. """ db_file = os.path.join(model_folder, model_name + ".pymoca_cache") if compiler_options.get('mtime_check', True): # Mtime check cache_mtime = os.path.getmtime(db_file) for folder in [model_folder] + compiler_options.get('library_folders', []): for root, dir, files in os.walk(folder, followlinks=True): for item in fnmatch.filter(files, "*.mo"): filename = os.path.join(root, item) if os.path.getmtime(filename) > cache_mtime: raise InvalidCacheError("Cache out of date") # Create empty model object model = CachedModel() # Load metadata with open(db_file, 'rb') as f: db = pickle.load(f) if db['version'] != __version__: raise InvalidCacheError('Cache generated for a different version of pymoca') # Check compiler options. We ignore the library folders, as they have # already been checked, and checking them will impede platform # portability of the cache. exclude_options = ['library_folders'] old_opts = {k: v for k, v in db['options'].items() if k not in exclude_options} new_opts = {k: v for k, v in compiler_options.items() if k not in exclude_options} if old_opts != new_opts: raise InvalidCacheError('Cache generated for different compiler options') # Pickles are platform independent, but dynamic libraries are not if compiler_options.get('codegen', False): if db['library_os'] != os.name: raise InvalidCacheError('Cache generated for incompatible OS') # Include references to the shared libraries for o in ['dae_residual', 'initial_residual', 'variable_metadata', 'delay_arguments']: if isinstance(db[o], str): # Path to codegen'd library f = ca.external(o, db[o]) else: # Pickled CasADi Function; use as is assert isinstance(db[o], ca.Function) f = db[o] setattr(model, '_' + o + '_function', f) # Load variables per category variables_with_metadata = ['states', 'alg_states', 'inputs', 'parameters', 'constants'] variable_dict = {} for key in variables_with_metadata: variables = getattr(model, key) for i, d in enumerate(db[key]): variable = Variable.from_dict(d) variables.append(variable) variable_dict[variable.symbol.name()] = variable model.der_states = [Variable.from_dict(d) for d in db['der_states']] model.outputs = db['outputs'] model.delay_states = db['delay_states'] model.alias_relation = db['alias_relation'] # Evaluate variable metadata: parameter_vector = ca.veccat(*[v.symbol for v in model.parameters]) metadata = dict(zip(variables_with_metadata, model.variable_metadata_function(parameter_vector))) independent_metadata = dict(zip( variables_with_metadata, (np.array(x) for x in model.variable_metadata_function(ca.veccat(*[np.nan for v in model.parameters]))))) for k, key in enumerate(variables_with_metadata): m = db[key + "__metadata_dependent"] for i, d in enumerate(db[key]): variable = variable_dict[d['name']] for j, tmp in enumerate(CASADI_ATTRIBUTES): if m[i, j]: setattr(variable, tmp, metadata[key][i, j]) else: setattr(variable, tmp, independent_metadata[key][i, j]) # Evaluate delay arguments: if model.delay_states: args = [model.time, ca.veccat(*model._symbols(model.states)), ca.veccat(*model._symbols(model.der_states)), ca.veccat(*model._symbols(model.alg_states)), ca.veccat(*model._symbols(model.inputs)), ca.veccat(*model._symbols(model.constants)), ca.veccat(*model._symbols(model.parameters))] delay_arguments_raw = model.delay_arguments_function(*args) nan_args = [ca.repmat(np.nan, *arg.size()) for arg in args] independent_delay_arguments_raw = model.delay_arguments_function(*nan_args) delay_expressions_raw = delay_arguments_raw[::2] delay_durations_raw = delay_arguments_raw[1::2] independent_delay_durations_raw = independent_delay_arguments_raw[1::2] assert 1 == len({len(delay_expressions_raw), len(delay_durations_raw), len(independent_delay_durations_raw)}) all_symbols = [model.time, *model._symbols(model.states), *model._symbols(model.der_states), *model._symbols(model.alg_states), *model._symbols(model.inputs), *model._symbols(model.constants), *model._symbols(model.parameters)] duration_dependencies = db['__delay_duration_dependent'] # Get rid of false dependency symbols not used in any delay # durations. This significantly reduces the work the (slow) # substitute() calls have to do later on. actual_deps = sorted(set(np.array(duration_dependencies).ravel())) actual_dep_symbols = [np.nan] * len(all_symbols) for i in actual_deps: actual_dep_symbols[i] = all_symbols[i] delay_durations_simplified = ca.Function( 'replace_false_deps', all_symbols, delay_durations_raw).call( actual_dep_symbols) # Get rid of remaining hidden dependencies in the delay durations for i, expr in enumerate(delay_expressions_raw): if duration_dependencies[i]: dur = delay_durations_simplified[i] if len(duration_dependencies[i]) < len(actual_deps): deps = set(ca.symvar(dur)) actual_deps = {all_symbols[j] for j in duration_dependencies[i]} false_deps = deps - actual_deps if false_deps: [dur] = ca.substitute( [dur], list(false_deps), [np.nan] * len(false_deps)) else: # Already removed all false dependencies pass else: dur = independent_delay_durations_raw[i] model.delay_arguments.append(DelayArgument(expr, dur)) # Try to coerce parameters into their Python types for p in model.parameters: for attr in CASADI_ATTRIBUTES: v = getattr(p, attr) v_mx = ca.MX(v) if v_mx.is_constant() and v_mx.is_regular(): setattr(p, attr, p.python_type(v)) # Done return model
def save_model(model_folder: str, model_name: str, model: Model, compiler_options: Dict[str, str]) -> None: """ Saves a CasADi model to disk. :param model_folder: Folder where the precompiled CasADi model will be stored. :param model_name: Name of the model. :param model: Model instance. :param compiler_options: Dictionary of compiler options. """ objects = { 'dae_residual': None, 'initial_residual': None, 'variable_metadata': None, 'delay_arguments': None } for o in objects.keys(): f = getattr(model, o + '_function') if compiler_options.get('codegen', False): objects[o] = _codegen_model(model_folder, f, '{}_{}'.format(model_name, o)) else: objects[o] = f # Output metadata db_file = os.path.join(model_folder, model_name + ".pymoca_cache") with open(db_file, 'wb') as f: db = {} # Store version db['version'] = __version__ # Include references to the shared libraries (codegen) or pickled functions (cache) db.update(objects) db['library_os'] = os.name db['options'] = compiler_options # Describe variables per category for key in [ 'states', 'der_states', 'alg_states', 'inputs', 'parameters', 'constants' ]: db[key] = [e.to_dict() for e in getattr(model, key)] # Caching using CasADi functions will lead to constants seemingly # depending on MX variables. Figuring out that they do not is slow, # especially when doing it on a lazy function call, as would be the # case when reading from cache. So instead, we do the depency check # once when saving the model. # Metadata dependency checking parameter_vector = ca.veccat(*[v.symbol for v in model.parameters]) for k, key in enumerate( ['states', 'alg_states', 'inputs', 'parameters', 'constants']): metadata_shape = (len(getattr(model, key)), len(CASADI_ATTRIBUTES)) m = db[key + "__metadata_dependent"] = np.zeros(metadata_shape, dtype=bool) for i, v in enumerate(getattr(model, key)): for j, tmp in enumerate(CASADI_ATTRIBUTES): attr = getattr(v, tmp) if (isinstance(attr, ca.MX) and not attr.is_constant() and ca.depends_on(attr, parameter_vector)): m[i, j] = True # Delay dependency checking if model.delay_states: all_symbols = [ model.time, *model._symbols(model.states), *model._symbols(model.der_states), *model._symbols(model.alg_states), *model._symbols(model.inputs), *model._symbols(model.constants), *model._symbols(model.parameters) ] symbol_to_index = {x: i for i, x in enumerate(all_symbols)} expressions, durations = zip(*model.delay_arguments) duration_dependencies = [] for dur in durations: duration_dependencies.append([ symbol_to_index[var] for var in ca.symvar(dur) if ca.depends_on(dur, var) ]) db['__delay_duration_dependent'] = duration_dependencies db['outputs'] = model.outputs db['delay_states'] = model.delay_states db['alias_relation'] = model.alias_relation pickle.dump(db, f, protocol=-1)
def load_model(model_folder: str, model_name: str, compiler_options: Dict[str, str]) -> CachedModel: """ Loads a precompiled CasADi model into a CachedModel instance. :param model_folder: Folder where the precompiled CasADi model is located. :param model_name: Name of the model. :param compiler_options: Dictionary of compiler options. :returns: CachedModel instance. """ db_file = os.path.join(model_folder, model_name + ".pymoca_cache") if compiler_options.get('mtime_check', True): # Mtime check cache_mtime = os.path.getmtime(db_file) for folder in [model_folder] + compiler_options.get( 'library_folders', []): for root, dir, files in os.walk(folder, followlinks=True): for item in fnmatch.filter(files, "*.mo"): filename = os.path.join(root, item) if os.path.getmtime(filename) > cache_mtime: raise InvalidCacheError("Cache out of date") # Create empty model object model = CachedModel() # Load metadata with open(db_file, 'rb') as f: db = pickle.load(f) if db['version'] != __version__: raise InvalidCacheError( 'Cache generated for a different version of pymoca') # Check compiler options. We ignore the library folders, as they have # already been checked, and checking them will impede platform # portability of the cache. exclude_options = ['library_folders'] old_opts = { k: v for k, v in db['options'].items() if k not in exclude_options } new_opts = { k: v for k, v in compiler_options.items() if k not in exclude_options } if old_opts != new_opts: raise InvalidCacheError( 'Cache generated for different compiler options') # Pickles are platform independent, but dynamic libraries are not if compiler_options.get('codegen', False): if db['library_os'] != os.name: raise InvalidCacheError('Cache generated for incompatible OS') # Include references to the shared libraries for o in [ 'dae_residual', 'initial_residual', 'variable_metadata', 'delay_arguments' ]: if isinstance(db[o], str): # Path to codegen'd library f = ca.external(o, db[o]) else: # Pickled CasADi Function; use as is assert isinstance(db[o], ca.Function) f = db[o] setattr(model, '_' + o + '_function', f) # Load variables per category variables_with_metadata = [ 'states', 'alg_states', 'inputs', 'parameters', 'constants' ] variable_dict = {} for key in variables_with_metadata: variables = getattr(model, key) for i, d in enumerate(db[key]): variable = Variable.from_dict(d) variables.append(variable) variable_dict[variable.symbol.name()] = variable model.der_states = [Variable.from_dict(d) for d in db['der_states']] model.outputs = db['outputs'] model.delay_states = db['delay_states'] model.alias_relation = db['alias_relation'] # Evaluate variable metadata: parameter_vector = ca.veccat(*[v.symbol for v in model.parameters]) metadata = dict( zip(variables_with_metadata, model.variable_metadata_function(parameter_vector))) independent_metadata = dict( zip(variables_with_metadata, (np.array(x) for x in model.variable_metadata_function( ca.veccat(*[np.nan for v in model.parameters]))))) for k, key in enumerate(variables_with_metadata): m = db[key + "__metadata_dependent"] for i, d in enumerate(db[key]): variable = variable_dict[d['name']] for j, tmp in enumerate(CASADI_ATTRIBUTES): if m[i, j]: setattr(variable, tmp, metadata[key][i, j]) else: setattr(variable, tmp, independent_metadata[key][i, j]) # Evaluate delay arguments: if model.delay_states: args = [ model.time, ca.veccat(*model._symbols(model.states)), ca.veccat(*model._symbols(model.der_states)), ca.veccat(*model._symbols(model.alg_states)), ca.veccat(*model._symbols(model.inputs)), ca.veccat(*model._symbols(model.constants)), ca.veccat(*model._symbols(model.parameters)) ] delay_arguments_raw = model.delay_arguments_function(*args) nan_args = [ca.repmat(np.nan, *arg.size()) for arg in args] independent_delay_arguments_raw = model.delay_arguments_function( *nan_args) delay_expressions_raw = delay_arguments_raw[::2] delay_durations_raw = delay_arguments_raw[1::2] independent_delay_durations_raw = independent_delay_arguments_raw[ 1::2] assert 1 == len({ len(delay_expressions_raw), len(delay_durations_raw), len(independent_delay_durations_raw) }) all_symbols = [ model.time, *model._symbols(model.states), *model._symbols(model.der_states), *model._symbols(model.alg_states), *model._symbols(model.inputs), *model._symbols(model.constants), *model._symbols(model.parameters) ] duration_dependencies = db['__delay_duration_dependent'] for i, expr in enumerate(delay_expressions_raw): if duration_dependencies[i]: dur = delay_durations_raw[i] deps = set(ca.symvar(dur)) actual_deps = { all_symbols[j] for j in duration_dependencies[i] } false_deps = deps - actual_deps if false_deps: [dur] = ca.substitute( [dur], list(false_deps), [ca.repmat(np.nan, *d.size()) for d in false_deps]) else: dur = independent_delay_durations_raw[i] model.delay_arguments.append(DelayArgument(expr, dur)) # Try to coerce parameters into their Python types for p in model.parameters: for attr in CASADI_ATTRIBUTES: v = getattr(p, attr) v_mx = ca.MX(v) if v_mx.is_constant() and v_mx.is_regular(): setattr(p, attr, p.python_type(v)) # Done return model
def construct_upd_xz(self, problem=None): # construct optifather & give reference to problem self.father_updx = OptiFather(self.group.values()) self.problem.father = self.father_updx # define z_ij variables init = self.q_ij_struct(0) for nghb, q_ij in self.q_ij.items(): for child, q_j in q_ij.items(): for name, ind in q_j.items(): var = np.array(child._values[name]) v = var.T.flatten()[ind] init[nghb.label, child.label, name, ind] = v z_ij = self.define_variable( 'z_ij', self.q_ij_struct.shape[0], value=np.array(init.cat)) # define parameters l_ij = self.define_parameter('l_ij', self.q_ij_struct.shape[0]) l_ji = self.define_parameter('l_ji', self.q_ji_struct.shape[0]) # put them in the struct format z_ij = self.q_ij_struct(z_ij) l_ij = self.q_ij_struct(l_ij) l_ji = self.q_ji_struct(l_ji) # get (part of) variables x_i = self._get_x_variables(symbolic=True) # construct local copies of parameters par = {} for name, s in self.par_global.items(): par[name] = self.define_parameter(name, s.shape[0], s.shape[1]) if problem is None: # get time info t = self.define_symbol('t') T = self.define_symbol('T') t0 = t/T # transform spline variables: only consider future piece of spline tf = lambda cfs, basis: shift_knot1_fwd(cfs, basis, t0) self._transform_spline(x_i, tf, self.q_i) self._transform_spline([z_ij, l_ij], tf, self.q_ij) self._transform_spline(l_ji, tf, self.q_ji) # construct objective obj = 0. for child, q_i in self.q_i.items(): for name in q_i.keys(): x = x_i[child.label][name] for nghb in self.q_ji.keys(): l = l_ji[str(nghb), child.label, name] obj += mtimes(l.T, x) for nghb, q_j in self.q_ij.items(): for child in q_j.keys(): for name in q_j[child].keys(): z = z_ij[str(nghb), child.label, name] l = l_ij[str(nghb), child.label, name] obj -= mtimes(l.T, z) self.define_objective(obj) # construct constraints for con in self.global_constraints: c = con[0] for sym in symvar(c): for label, child in self.group.items(): if sym.name() in child.symbol_dict: name = child.symbol_dict[sym.name()][1] v = x_i[label][name] ind = self.q_i[child][name] sym2 = MX.zeros(sym.size()) sym2[ind] = v sym2 = reshape(sym2, sym.shape) c = substitute(c, sym, sym2) break for nghb in self.q_ij.keys(): for label, child in nghb.group.items(): if sym.name() in child.symbol_dict: name = child.symbol_dict[sym.name()][1] v = z_ij[nghb.label, label, name] ind = self.q_ij[nghb][child][name] sym2 = MX.zeros(sym.size()) sym2[ind] = v sym2 = reshape(sym2, sym.shape) c = substitute(c, sym, sym2) break for name, s in self.par_global.items(): if s.name() == sym.name(): c = substitute(c, sym, par[name]) lb, ub = con[1], con[2] self.define_constraint(c, lb, ub) # construct problem prob, buildtime = self.father_updx.construct_problem( self.options, str(self._index), problem) self.problem_upd_xz = prob self.father_updx.init_transformations(self.problem.init_primal_transform, self.problem.init_dual_transform) self.init_var_dd() return buildtime
def add_to_dict(self, symbol, name): for sym in symvar(symbol): if sym in self.symbol_dict: raise ValueError('Symbol already added for %s' % self.label) self.symbol_dict[sym.getName()] = [self, name]
def free_symbols(expression): return ca.symvar(expression)
def add_to_dict(self, symbol, child, name): for sym in symvar(symbol): self.symbol_dict[sym.name()] = [child, name]