def RBF(n): if n == 1: sX = 1 sY = n n_features = self.n_features X = cs.SX.sym('X', sX, n_features) Y = cs.SX.sym('Y', sY, n_features) length_scale = cs.SX.sym('l', 1, n_features) X_ = X / cs.repmat(length_scale, sX, 1) Y_ = Y / cs.repmat(length_scale, sY, 1) dist = cs.SX.zeros((sX, sY)) for i in xrange(0, sX): for j in xrange(0, sY): dist[i, j] = cs.sum2((X_[i, :] - Y_[j, :])**2) K = cs.exp(-.5 * dist) self.RBF1 = cs.Function('RBF1', [X, Y, length_scale], [K]) else: sX = 1 sY = n n_features = self.n_features X = cs.SX.sym('X', sX, n_features) Y = self.model.X_train_ length_scale = cs.SX.sym('l', 1, n_features) X_ = X / cs.repmat(length_scale, sX, 1) Y_ = Y / cs.repmat(length_scale, sY, 1) dist = cs.SX.zeros((sX, sY)) for i in xrange(0, sX): for j in xrange(0, sY): dist[i, j] = cs.sum2((X_[i, :] - Y_[j, :])**2) K = cs.exp(-.5 * dist) self.RBFn = cs.Function('RBFn', [X, length_scale], [K])
def __init__(self, dae, t, poly_order=5, tdp_fun=None): '''Constructor ''' pdq = Pdq(t, poly_order) N = len(pdq.collocationPoints) scheme = CollocationScheme(dae, pdq, tdp_fun=tdp_fun) x0 = cs.MX.sym('x0', dae.nx) X = scheme.x Z = scheme.z z0 = dae.z # Solve the collocation equations w.r.t. (X,Z) var = scheme.combine(['x', 'z']) eq = cs.Function('eq', [var, x0, scheme.u, scheme.p], [cs.vertcat(scheme.eq, scheme.x[:, 0] - x0)]) rf = cs.rootfinder('rf', 'newton', eq) # Initial point for the rootfinder w0 = ce.struct_MX(var) w0['x'] = cs.repmat(x0, 1, N) w0['z'] = cs.repmat(z0, 1, N - 1) sol = var(rf(w0, x0, scheme.u, scheme.p)) sol_X = sol['x'] sol_Z = sol['z'] [sol_Q] = cs.substitute([scheme.q], [X, Z], [sol_X, sol_Z]) self._simulate = cs.Function('CollocationSimulator', [x0, z0, scheme.u, scheme.p], [sol_X[:, -1], sol_Z[:, -1], sol_Q[:, -1], sol_X, sol_Z, sol_Q], ['x0', 'z0', 'u', 'p'], ['xf', 'zf', 'qf', 'X', 'Z', 'Q']) self._pdq = pdq self._dae = dae
def _make_casadi_types_broadcastable(x1, x2): def shape_2D(object: Union[float, int, Iterable, _onp.ndarray]) -> Tuple: shape = _onp.shape(object) if len(shape) == 0: return (1, 1) elif len(shape) == 1: return (1, shape[0]) elif len(shape) == 2: return shape else: raise ValueError( "CasADi can't handle arrays with >2 dimensions, unfortunately." ) x1_shape = shape_2D(x1) x2_shape = shape_2D(x2) shape = _onp.broadcast_shapes(x1_shape, x2_shape) x1_tiled = _cas.repmat( x1, shape[0] // x1_shape[0], shape[1] // x1_shape[1], ) x2_tiled = _cas.repmat( x2, shape[0] // x2_shape[0], shape[1] // x2_shape[1], ) return x1_tiled, x2_tiled
def _k_mat52(x, y=None, variance=1., lengthscale=None, diag_only=False, ARD=False): """ Evaluate the Matern52 kernel function symbolically using Casadi""" n_x, dim_x = x.shape if diag_only: ret = SX(n_x, ) ret[:] = variance return ret if y is None: y = x n_y, _ = np.shape(y) if lengthscale is None: if ARD: lengthscale = np.ones((dim_x, )) else: lengthscale = 1. if ARD is False: lengthscale = lengthscale * np.ones((dim_x, )) lens_x = repmat(lengthscale.reshape(1, -1), n_x) lens_y = repmat(lengthscale.reshape(1, -1), n_y) r = _unscaled_dist(x / lens_x, y / lens_y) # GPY: self.variance*(1+np.sqrt(5.)*r+5./3*r**2)*np.exp(-np.sqrt(5.)*r) return variance * (1. + sqrt(5.) * r + 5. / 3 * r**2) * exp(-sqrt(5.) * r)
def test_include_equality_scalar_bound(self): rhs = 2 aop = AbstractOptimizationProblem() x = aop.create_variable('x', 2) g = 2 * x aop.include_equality(g, rhs=rhs) self.assertTrue(is_equal(aop.g_lb, repmat(rhs, 2))) self.assertTrue(is_equal(aop.g_ub, repmat(rhs, 2)))
def test_include_inequality_scalar_bound(self): lb = 1 ub = 4 aop = AbstractOptimizationProblem() x = aop.create_variable('x', 2) g = 2 * x aop.include_inequality(g, lb=lb, ub=ub) self.assertTrue(is_equal(aop.g_lb, repmat(lb, 2))) self.assertTrue(is_equal(aop.g_ub, repmat(ub, 2)))
def maha(a1, b1, Q1, N): """Calculate the Mahalanobis distance Copyright (c) 2018, Eric Bradford """ aQ = ca.mtimes(a1, Q1) bQ = ca.mtimes(b1, Q1) K1 = ca.repmat(ca.sum2(aQ * a1), 1, N) \ + ca.repmat(ca.transpose(ca.sum2(bQ * b1)), N, 1) \ - 2 * ca.mtimes(aQ, ca.transpose(b1)) return K1
def CasadiRBF(X, Y, model): """ RBF kernel in CasADi """ sX = X.shape[0] sY = Y.shape[0] length_scale = model.kernel_.get_params()['k1__k2__length_scale'].reshape(1,-1) constant = model.kernel_.get_params()['k1__k1__constant_value'] X = X / cs.repmat(length_scale, sX , 1) Y = Y / cs.repmat(length_scale, sY , 1) dist = cs.repmat(cs.sum1(X.T**2).T,1,sY) + cs.repmat(cs.sum1(Y.T**2),sX,1) - 2*cs.mtimes(X,Y.T) K = constant*cs.exp(-.5 * dist) return K
def _unscaled_dist(x, y): """ calculate the squared distance between two sets of datapoints Source: https://github.com/SheffieldML/GPy/blob/devel/GPy/kern/src/stationary.py """ n_x, _ = np.shape(x) n_y, _ = np.shape(y) x1sq = sum2(x**2) x2sq = sum2(y**2) r2 = -2 * mtimes(x, y.T) + repmat(x1sq, 1, n_y) + repmat(x2sq.T, n_x, 1) return sqrt(r2)
def expandInput(self, u): '''Return input at collocation points given the input u on collocation intervals. ''' n = self.numIntervals assert u.shape[1] == n return cs.horzcat(*[cs.repmat(u[:, k], 1, len(self._collocationGroups[k]) - 1) for k in range(n)])
def interpolate(ts, xs, t, equidistant, mode=0): if interp1d is not None: if mode == 0: mode_str = 'linear' elif mode == 1: mode_str = 'floor' else: mode_str = 'ceil' return interp1d(ts, xs, t, mode_str, equidistant) else: if mode == 1: xs = xs[:-1] # block-forward else: xs = xs[1:] # block-backward t = ca.MX(t) if t.size1() > 1: t_ = ca.MX.sym('t') xs_ = ca.MX.sym('xs', xs.size1()) f = ca.Function('interpolant', [t_, xs_], [ ca.mtimes(ca.transpose((t_ >= ts[:-1]) * (t_ < ts[1:])), xs_) ]) f = f.map(t.size1(), 'serial') return ca.transpose(f(ca.transpose(t), ca.repmat(xs, 1, t.size1()))) else: return ca.mtimes(ca.transpose((t >= ts[:-1]) * (t < ts[1:])), xs)
def set_initial(self, stage, opti, initial): initial = dict(initial) algs = get_ranges_dict(stage.algebraics) for a, v in list(initial.items()): # from casadi import *;x=MX.sym('x');a=MX.sym('a');print(x is x+0) if a in algs: for k in range(self.N): for e in self.Zc[k]: e_shape = e[algs[a], :].shape opti.set_initial(e[algs[a], :], repmat(v, 1, e_shape[1])) del initial[a] super().set_initial(stage, opti, initial) for k in range(self.N): x0 = DM(opti.debug.value(self.X[k], opti.initial())) for e in self.Xc[k]: opti.set_initial(e, repmat(x0, 1, e.shape[1] // x0.shape[1]))
def set_initial(self, stage, opti, initial): for var, expr in initial.items(): for k in list(range(self.N))+[-1]: target = self.eval_at_control(stage, var, k) value = DM(opti.debug.value(self.eval_at_control(stage, expr, k), opti.initial())) if target.numel()*(self.N)==value.numel(): if repmat(target, self.N, 1).shape==value.shape: value = value[k,:] elif repmat(target, 1, self.N).shape==value.shape: value = value[:,k] if target.numel()*(self.N+1)==value.numel(): if repmat(target, self.N+1, 1).shape==value.shape: value = value[k,:] elif repmat(target, 1, self.N+1).shape==value.shape: value = value[:,k] opti.set_initial(target, value)
def test_cartpole_integrator(self): ########### PyTorch system ############### pyTorchEnv = CartPoleModel(initRandom=False) param_truth = pyTorchEnv.theta.detach().numpy() # set the variance to 0 to have a determinisitic environmenht pyTorchEnv.set_param_values( torch.from_numpy(np.concatenate([param_truth, np.zeros(2)])).float()) ########### casadi system ################ casadiEnv = CartpoleModelCasadi() states, states_d, controls, params = casadiEnv.buildDynamicalSystem() euler_func = euler_integration(states, states_d, controls, pyTorchEnv.tau, integrator_stepsize=1, angular_idx=[2]) step = ca.Function("step", [states, controls, params], [euler_func]) ############ Simulating the system ########## for traj in range(10): # simulate in total 10 different trajectoriess timesteps = 500 # simulate the system for 50 timesteps # u_inputs = ca.SX.sym("u_outputs", timesteps) # # init_z_state = ca.SX(x0) # z_states = [init_z_state] # for i in range(control_steps): # current_z = step(z_states[-1], u_inputs[i], param_truth) # z_states.append(current_z) # simulate 1 trajectory sim_one_traj = step.mapaccum("all_steps", timesteps) actions = ca.DM(np.random.rand(timesteps)) policy = ReplayControlPolicy(pyTorchEnv.spec, np.array(actions)) policy.normalized_input = [False, False, False, False] policy.normalized_output = [False] path = rollout_torch(pyTorchEnv, policy, timesteps, terminate_only_max_path=True) x0 = ca.DM(path["observations"][0, :]) sim_states = sim_one_traj(x0, actions, ca.repmat(param_truth, 1, timesteps)) np.testing.assert_allclose(np.array(sim_states.T), path["next_observations"], rtol=1e-2, atol=1e-4)
def create_initial_guess(self, p=None, theta=None): """Create an initial guess for the optimal control problem using problem.x_0, problem.y_guess, problem.u_guess, and a given p and theta (for p_opt and theta_opt) if they are given. If y_guess or u_guess are None the initial guess uses a vector of zeros of appropriate size. :param p: Optimization parameters :param theta: Optimization theta :return: """ x_init = repmat(self.problem.x_0, (self.degree + 1) * self.finite_elements) if self.problem.y_guess is not None: y_init = repmat(self.problem.y_guess, self.degree * self.finite_elements) else: y_init = repmat(DM.zeros(self.model.n_y), self.degree * self.finite_elements) if self.model.n_u_par > 0: if self.problem.u_guess is not None: u_init = repmat(self.problem.u_guess, self.degree_control * self.finite_elements) else: u_init = repmat(DM.zeros(self.model.n_u), self.degree_control * self.finite_elements) else: u_init = [] eta_init = DM.zeros(self.problem.n_eta, 1) p_opt_init = DM.zeros(self.problem.n_p_opt, 1) theta_opt_init = DM.zeros( self.problem.n_theta_opt * self.finite_elements, 1) if p is not None: for k, ind in enumerate(self.problem.get_p_opt_indices()): p_opt_init[k] = p[ind] if theta is not None: for el in range(self.finite_elements): for k, ind in enumerate(self.problem.get_theta_opt_indices()): theta_opt_init[k + el * self.problem.n_theta_opt] = theta[el][ind] return vertcat(x_init, y_init, u_init, eta_init, p_opt_init, theta_opt_init)
def phi_cas(self, x, v=None): if v is None: v = self.v[None, :] else: v = cas.repmat(v, self.W.shape[0], 1) assert (x.shape[1] == self.x_dim) y = cas.sin(cas.mtimes(self.W / v, x.T) + self.psi) return y.T
def variable_metadata_function(self): in_var = ca.veccat(*self._symbols(self.parameters)) out = [] is_affine = True zero, one = ca.MX(0), ca.MX( 1) # Recycle these common nodes as much as possible. for variable_list in [ self.states, self.alg_states, self.inputs, self.parameters, self.constants ]: attribute_lists = [[] for i in range(len(ast.Symbol.ATTRIBUTES))] for variable in variable_list: for attribute_list_index, attribute in enumerate( ast.Symbol.ATTRIBUTES): value = ca.MX(getattr(variable, attribute)) if value.is_zero(): value = zero elif value.is_one(): value = one value = value if value.numel() != 1 else ca.repmat( value, *variable.symbol.size()) attribute_lists[attribute_list_index].append(value) expr = ca.horzcat(*[ ca.veccat(*attribute_list) for attribute_list in attribute_lists ]) if len(self.parameters) > 0 and isinstance(expr, ca.MX): f = ca.Function('f', [in_var], [expr]) contains_if_else = ca.OP_IF_ELSE_ZERO in [ f.instruction_id(k) for k in range(f.n_instructions()) ] zero_hessian = ca.jacobian(ca.jacobian(expr, in_var), in_var).is_zero() if contains_if_else or not zero_hessian: is_affine = False out.append(expr) if len(self.parameters) > 0 and is_affine: # Rebuild variable metadata as a single affine expression, if all # subexpressions are affine. in_var_ = ca.MX.sym('in_var', in_var.shape) out_ = [] for o in out: Af = ca.Function('Af', [in_var], [ca.jacobian(o, in_var)]) bf = ca.Function('bf', [in_var], [o]) A = Af(0) A = ca.sparsify(A) b = bf(0) b = ca.sparsify(b) o_ = ca.reshape(ca.mtimes(A, in_var_), o.shape) + b out_.append(o_) out = out_ in_var = in_var_ return ca.Function('variable_metadata', [in_var], out)
def include_variable(self, variable, lb=-inf, ub=inf): """Include a symbolic variable in the optimization problem :param variable: variable to be included :param lb: Lower bound of the variable. If the given variable size is greater than one but a scalar is passed as lower bound, a vector of lb with size of the given variable will be used as a lower bound. (default = [-inf]*size) :param ub: Upper bound of the variable. If the given variable size is greater than one but a scalar is passed as upper bound, a vector of ub with size of the given variable will be used as a upper bound. (default = [inf]*size) """ lb = vertcat(lb) ub = vertcat(ub) if lb.numel() == 1 and variable.numel() > 1: lb = repmat(lb, variable.numel()) if ub.numel() == 1 and variable.numel() > 1: ub = repmat(ub, variable.numel()) if not variable.numel() == lb.numel() or not variable.numel( ) == ub.numel(): raise ValueError( "Lower bound or upper bound has different size of the given variable" ) if not lb.is_constant() and depends_on( lb, self.p) or not ub.is_constant() and depends_on(ub, self.p): raise ValueError( "Neither the lower or the upper bound can depend on the optimization problem parameter. " "lb={}, ub={}".format(lb, ub)) for i in range(variable.numel()): if lb[i] > ub[i]: raise ValueError( 'Lower bound is greater than upper bound for index {}. ' 'The inequality {} <= {} <= is infeasible'.format( i, lb[i], variable[i], ub[i])) self.x = vertcat(self.x, variable) self.x_lb = vertcat(self.x_lb, lb) self.x_ub = vertcat(self.x_ub, ub)
def create_variable(self, name, size=1, lb=-inf, ub=inf): """Create an optimization variable :param str name: Name of the optimization variable. :param int size: Size of the variable (default = 1) :param MX|SX lb: Lower bound of the variable. If the given 'size' is greater than one but a scalar is passed as lower bound, a vector of lb of size 'size' will be used as a lower bound. (default = [-inf]*size) :param MX|SX ub: Upper bound of the variable. If the given 'size' is greater than one but a scalar is passed as upper bound, a vector of ub of size 'size' will be used as a upper bound. (default = [inf]*size) :return: Return the variable :rtype: MX """ if isinstance(lb, Number): lb = repmat(lb, size) if isinstance(ub, Number): ub = repmat(ub, size) new_x = MX.sym(name, size) self.include_variable(new_x, lb=lb, ub=ub) return new_x
def variable_metadata_function(self): in_var = ca.veccat(*self._symbols(self.parameters)) out = [] is_affine = True zero, one = ca.MX(0), ca.MX(1) # Recycle these common nodes as much as possible. for variable_list in [self.states, self.alg_states, self.inputs, self.parameters, self.constants]: attribute_lists = [[] for i in range(len(CASADI_ATTRIBUTES))] for variable in variable_list: for attribute_list_index, attribute in enumerate(CASADI_ATTRIBUTES): value = ca.MX(getattr(variable, attribute)) if value.is_zero(): value = zero elif value.is_one(): value = one value = value if value.numel() != 1 else ca.repmat(value, *variable.symbol.size()) attribute_lists[attribute_list_index].append(value) expr = ca.horzcat(*[ca.veccat(*attribute_list) for attribute_list in attribute_lists]) if len(self.parameters) > 0 and isinstance(expr, ca.MX): f = ca.Function('f', [in_var], [expr]) # NOTE: This is not a complete list of operations that can be # handled in an affine expression. That said, it should # capture the most common ways variable attributes are # expressed as a function of parameters. allowed_ops = {ca.OP_INPUT, ca.OP_OUTPUT, ca.OP_CONST, ca.OP_SUB, ca.OP_ADD, ca.OP_SUB, ca.OP_MUL, ca.OP_DIV, ca.OP_NEG} f_ops = {f.instruction_id(k) for k in range(f.n_instructions())} contains_unallowed_ops = not f_ops.issubset(allowed_ops) zero_hessian = ca.jacobian(ca.jacobian(expr, in_var), in_var).is_zero() if contains_unallowed_ops or not zero_hessian: is_affine = False out.append(expr) if len(self.parameters) > 0 and is_affine: # Rebuild variable metadata as a single affine expression, if all # subexpressions are affine. in_var_ = ca.MX.sym('in_var', in_var.shape) out_ = [] for o in out: Af = ca.Function('Af', [in_var], [ca.jacobian(o, in_var)]) bf = ca.Function('bf', [in_var], [o]) A = Af(0) A = ca.sparsify(A) b = bf(0) b = ca.sparsify(b) o_ = ca.reshape(ca.mtimes(A, in_var_), o.shape) + b out_.append(o_) out = out_ in_var = in_var_ return self._expand_mx_func(ca.Function('variable_metadata', [in_var], out))
def gen(NX, NU, M, stateBounds=None, controlBounds=None): """gen Generates a casadi struct containing the symbolic optimization variables required for direct multiple shooting. x is a <NX>x<M+1> matrix, u is a <NU>x<M> matrix. :param NX: Number of state variables :param NU: Number of control variables :param M: Prediction horizon length :returns: A casadi struct_symSX object """ # decision (free) variables variables = struct_symSX( [entry('x', shape=(NX, M + 1)), entry('u', shape=(NU, M))]) # symbolic bounds bx = ca.SX.sym('bx', NX) bu = ca.SX.sym('bu', NU) # bounds struct, must be identical to variables struct in dimensions and keys bounds = struct_SX([ entry('x', expr=ca.repmat(bx, 1, M + 1)), entry('u', expr=ca.repmat(bu, 1, M)) ]) boundsFun = ca.Function('varBoundsFun', [bx, bu], [bounds.cat]) if stateBounds is None: stateBounds = np.multiply(np.ones((2, NX)), np.array((-np.inf, np.inf), ndmin=2).T) if controlBounds is None: controlBounds = np.multiply(np.ones((2, NU)), np.array((-np.inf, np.inf), ndmin=2).T) lbw = boundsFun(stateBounds[0, :], controlBounds[0, :]) ubw = boundsFun(stateBounds[1, :], controlBounds[1, :]) return variables, lbw, ubw
def __init__(self, name, dae, t, order, method='legendre', tdp_fun=None): """Make an integrator based on collocation method """ N = order scheme = CollocationScheme(dae, t=t, order=order, method=method, tdp_fun=tdp_fun) x0 = cs.MX.sym('x0', dae.nx) z0 = dae.z # Solve the collocation equations w.r.t. (x,K,Z) var = scheme.combine(['x', 'K', 'Z']) eq = cs.Function('eq', [var, x0, scheme.u, scheme.p], [cs.vertcat(scheme.eq, scheme.x[:, 0] - x0)]) rf = cs.rootfinder('rf', 'newton', eq) # Initial point for the rootfinder w0 = ce.struct_MX(var) w0['x'] = cs.repmat(x0, 1, scheme.x.shape[1]) w0['K'] = cs.MX.zeros(scheme.K.shape) w0['Z'] = cs.repmat(z0, 1, scheme.Z.shape[1]) sol = var(rf(w0, x0, scheme.u, dae.p)) sol_x = sol['x'] sol_K = sol['K'] sol_Z = sol['Z'] [sol_q, sol_Q, sol_X] = cs.substitute([scheme.q, scheme.Q, scheme.X], [scheme.x, scheme.K, scheme.Z], [sol_x, sol_K, sol_Z]) # TODO: return correct value for zf! # TODO: return only x instead of x and xf? super().__init__(name, [x0, z0, scheme.u, dae.p], [sol_x[:, 1 :], np.repeat(np.nan, dae.nz), sol_q[:, -1], sol_X, sol_Z, sol_Q, sol_x, sol_K, scheme.tc], ['x0', 'z0', 'u', 'p'], ['xf', 'zf', 'qf', 'X', 'Z', 'Q', 'x', 'K', 'tc']) self._scheme = scheme
def _k_rbf(x, y=None, variance=1., lengthscale=None, diag_only=False): """ Evaluate the RBF kernel function symbolically using Casadi """ n_x, dim_x = x.shape if diag_only: ret = SX(n_x, ) ret[:] = variance return ret if y is None: y = x n_y, _ = np.shape(y) if lengthscale is None: lengthscale = np.ones((dim_x, )) lens_x = repmat(lengthscale.reshape(1, -1), n_x) lens_y = repmat(lengthscale.reshape(1, -1), n_y) r = _unscaled_dist(x / lens_x, y / lens_y) return variance * exp(-0.5 * r**2)
def _k_lin(x, y=None, variances=None, diag_only=False): """ Evaluate the Linear kernel function symbolically using Casadi """ n_x, dim_x = np.shape(x) if variances is None: variances = np.ones((dim_x, )) if diag_only: var = repmat(variances.reshape(1, -1), n_x) ret = sum2(var * x**2) return ret var_x = sqrt(repmat(variances.reshape(1, -1), n_x)) if y is None: var_y = var_x y = x else: n_y, _ = np.shape(y) var_y = sqrt(repmat(variances.reshape(1, -1), n_y)) return mtimes(x * var_x, (y * var_y).T)
def CasadiMatern(X, Y, model): """ Matern kernel in CasADi """ length_scale = model.kernel_.get_params()['k2__length_scale'].reshape(1,-1) constant = model.kernel_.get_params()['k1__constant_value'] nu = model.kernel_.get_params()['k2__nu'] sX = X.shape[0] sY = Y.shape[0] X = X / cs.repmat(length_scale, sX , 1) Y = Y / cs.repmat(length_scale, sY , 1) dist = cs.repmat(cs.sum1(X.T**2).T,1,sY) + cs.repmat(cs.sum1(Y.T**2),sX,1) - 2*cs.mtimes(X,Y.T) if nu == 0.5: K = constant*cs.exp(-dist**0.5) elif nu == 1.5: K = np.sqrt(3)*dist**0.5 K = constant*(1. + K) * cs.exp(-K) elif nu == 2.5: K = np.sqrt(5)*dist**0.5 K = constant*(1. + K + 5/3*dist) * cs.exp(-K) else: raise NotImplementedError return K
def casadi_struct(s,default=0,**kwargs): ret = OrderedDict() args = dict(kwargs) for k in s.keys(): if k in kwargs: e = args[k] del args[k] if not hasattr(e,'shape'): e = C.DM(e) if e.is_scalar(): e = C.repmat(e,*s[k].shape) assert e.shape == s[k].shape else: e = default*C.DM.ones(*s[k].shape) ret[k] = e return ret
def add_inf_constraints(self, stage, opti, c, k, l, meta): coeff = stage._method.poly_coeff[k * self.M + l] degree = coeff.shape[1]-1 basis = BSplineBasis([0]*(degree+1)+[1]*(degree+1),degree) tscale = self.T / self.N / self.M tpower = vcat([tscale**i for i in range(degree+1)]) coeff = coeff * repmat(tpower.T,stage.nx,1) # TODO: bernstein transformation as function of degree Poly_to_Bernstein_matrix_4 = DM([[1,0,0,0,0],[1,1.0/4, 0, 0, 0],[1, 1.0/2, 1.0/6, 0, 0],[1, 3.0/4, 1.0/2, 1.0/4, 0],[1, 1, 1, 1, 1]]) state_coeff = Poly_to_Bernstein_matrix_4 @ coeff.T statesize = [0] + [elem.nnz() for elem in stage.states] statessizecum = np.cumsum(statesize) subst_from = stage.states state_coeff_split = horzsplit(state_coeff,statessizecum) subst_to = [BSpline(basis,coeff) for coeff in state_coeff_split] c_spline = reinterpret_expr(c, subst_from, subst_to) opti.subject_to(self.eval_at_control(stage, c_spline, k), meta=meta)
def chain_dyn(x, u, model_parameters): """ returns the derivative of the state dx=f(x) """ a = 3 positions = cd.reshape(x[0:model_parameters.dimension * (model_parameters.number_of_balls + 1), 0], \ ( model_parameters.dimension,model_parameters.number_of_balls + 1) \ ) velocities=cd.reshape(\ x[model_parameters.dimension * (model_parameters.number_of_balls + 1):, 0], \ (model_parameters.dimension,model_parameters.number_of_balls)\ ) # compute distance between masses distance_between_balls = \ positions[0:model_parameters.dimension, 1:model_parameters.number_of_balls + 1 ] - \ positions[0:model_parameters.dimension, 0:model_parameters.number_of_balls ] # add the distance(and its norm) between the first ball and the fixed wall distance_between_balls = cd.horzcat( positions[0:model_parameters.dimension, 0],distance_between_balls \ ) distance_between_balls_norm = cd.sqrt(cd.sum1(distance_between_balls**2)) # calculate force between balls on springs F = model_parameters.spring_constant * (1 - model_parameters.rest_length_of_springs / \ cd.repmat(distance_between_balls_norm,model_parameters.dimension,1) )\ * distance_between_balls gravitational_force = np.concatenate(\ (\ np.zeros((1,model_parameters.number_of_balls)), \ -np.ones((1,model_parameters._number_of_balls))*model_parameters.gravity_acceleration \ ) \ ,axis=0) # find acceleration acceleration = (1 / model_parameters.ball_mass) * \ (F[:, 1:] - F[:, 0:model_parameters.number_of_balls]) \ + gravitational_force x_dot = cd.horzcat(velocities, u, acceleration) return cd.reshape(x_dot, (model_parameters.number_of_outputs, 1))
def include_equality(self, expr, rhs=None): """Include a equality with the following form expr = rhs :param expr: expression, this is the only term that should contain symbolic variables :param rhs: right hand side, by default it is a vector of zeros with same size of expr. If the 'expr' size is greater than one but a scalar is passed as 'rhs', a vector of 'rhs' with size of 'expr' will be used as right hand side. (default = [0]*size) """ if isinstance(expr, list): expr = vertcat(expr) if expr.size2() > 1: raise Exception( "Given expression is not a vector, number of columns is {}". format(expr.size2())) if rhs is None: rhs = DM.zeros(expr.shape) else: rhs = vertcat(rhs) if rhs.numel() == 1 and expr.numel() > 1: rhs = repmat(rhs, expr.numel()) if not expr.shape == rhs.shape: msg = "Expression and the right hand side does not have the same size: " \ "expr.shape={}, rhs.shape=={}".format(expr.shape, rhs.shape) raise ValueError(msg) # check for if rhs have 'x's and 'p's if depends_on(rhs, vertcat(self.x, self.p)): raise ValueError( "Right-hand side cannot contain variables from the optimization problem. " "RHS = {}".format(rhs)) self.g = vertcat(self.g, expr) self.g_lb = vertcat(self.g_lb, rhs) self.g_ub = vertcat(self.g_ub, rhs)
def expm(a_matrix): """Since casadi does not have native support for matrix exponential, this is a trick to computing it. It can be quite expensive, specially for large matrices. THIS ONLY SUPPORT NUMERIC MATRICES AND MX VARIABLES, DOES NOT SUPPORT SX SYMBOLIC VARIABLES. :param DM a_matrix: matrix :return: """ dim = a_matrix.shape[1] # Create the integrator x_mx = MX.sym('x', a_matrix.shape[1]) a_mx = MX.sym('x', a_matrix.shape) ode = mtimes(a_mx, x_mx) dae_system_dict = {'x': x_mx, 'ode': ode, 'p': vec(a_mx)} integrator_ = integrator("integrator", "cvodes", dae_system_dict, {'tf': 1}) integrator_map = integrator_.map(a_matrix.shape[1], 'thread') res = integrator_map(x0=DM.eye(dim), p=repmat(vec(a_matrix), (1, a_matrix.shape[1])))['xf'] return res
def _simulate_with_casadi_with_inputs(self, initcon, tsim, varying_inputs, integrator, integrator_options): xalltemp = [self._templatemap[i] for i in self._diffvars] xall = casadi.vertcat(*xalltemp) time = casadi.SX.sym('time') odealltemp = [time * convert_pyomo2casadi(self._rhsdict[i]) for i in self._derivlist] odeall = casadi.vertcat(*odealltemp) # Time-varying inputs ptemp = [self._templatemap[i] for i in self._siminputvars.values()] pall = casadi.vertcat(time, *ptemp) dae = {'x': xall, 'p': pall, 'ode': odeall} if len(self._algvars) != 0: zalltemp = [self._templatemap[i] for i in self._simalgvars] zall = casadi.vertcat(*zalltemp) # Need to do anything special with time scaling?? algalltemp = [convert_pyomo2casadi(i) for i in self._alglist] algall = casadi.vertcat(*algalltemp) dae['z'] = zall dae['alg'] = algall integrator_options['tf'] = 1.0 F = casadi.integrator('F', integrator, dae, integrator_options) N = len(tsim) # This approach removes the time scaling from tsim so must # create an array with the time step between consecutive # time points tsimtemp = np.hstack([0, tsim[1:] - tsim[0:-1]]) tsimtemp.shape = (1, len(tsimtemp)) palltemp = [casadi.DM(tsimtemp)] # Need a similar np array for each time-varying input for p in self._siminputvars.keys(): profile = varying_inputs[p] tswitch = list(profile.keys()) tswitch.sort() tidx = [tsim.searchsorted(i) for i in tswitch] + \ [len(tsim) - 1] ptemp = [profile[0]] + \ [casadi.repmat(profile[tswitch[i]], 1, tidx[i + 1] - tidx[i]) for i in range(len(tswitch))] temp = casadi.horzcat(*ptemp) palltemp.append(temp) I = F.mapaccum('simulator', N) sol = I(x0=initcon, p=casadi.vertcat(*palltemp)) profile = sol['xf'].full().T if len(self._algvars) != 0: algprofile = sol['zf'].full().T profile = np.concatenate((profile, algprofile), axis=1) return [tsim, profile]
def load_model(model_folder: str, model_name: str, compiler_options: Dict[str, str]) -> CachedModel: """ Loads a precompiled CasADi model into a CachedModel instance. :param model_folder: Folder where the precompiled CasADi model is located. :param model_name: Name of the model. :param compiler_options: Dictionary of compiler options. :returns: CachedModel instance. """ db_file = os.path.join(model_folder, model_name + ".pymoca_cache") if compiler_options.get('mtime_check', True): # Mtime check cache_mtime = os.path.getmtime(db_file) for folder in [model_folder] + compiler_options.get('library_folders', []): for root, dir, files in os.walk(folder, followlinks=True): for item in fnmatch.filter(files, "*.mo"): filename = os.path.join(root, item) if os.path.getmtime(filename) > cache_mtime: raise InvalidCacheError("Cache out of date") # Create empty model object model = CachedModel() # Load metadata with open(db_file, 'rb') as f: db = pickle.load(f) if db['version'] != __version__: raise InvalidCacheError('Cache generated for a different version of pymoca') # Check compiler options. We ignore the library folders, as they have # already been checked, and checking them will impede platform # portability of the cache. exclude_options = ['library_folders'] old_opts = {k: v for k, v in db['options'].items() if k not in exclude_options} new_opts = {k: v for k, v in compiler_options.items() if k not in exclude_options} if old_opts != new_opts: raise InvalidCacheError('Cache generated for different compiler options') # Pickles are platform independent, but dynamic libraries are not if compiler_options.get('codegen', False): if db['library_os'] != os.name: raise InvalidCacheError('Cache generated for incompatible OS') # Include references to the shared libraries for o in ['dae_residual', 'initial_residual', 'variable_metadata', 'delay_arguments']: if isinstance(db[o], str): # Path to codegen'd library f = ca.external(o, db[o]) else: # Pickled CasADi Function; use as is assert isinstance(db[o], ca.Function) f = db[o] setattr(model, '_' + o + '_function', f) # Load variables per category variables_with_metadata = ['states', 'alg_states', 'inputs', 'parameters', 'constants'] variable_dict = {} for key in variables_with_metadata: variables = getattr(model, key) for i, d in enumerate(db[key]): variable = Variable.from_dict(d) variables.append(variable) variable_dict[variable.symbol.name()] = variable model.der_states = [Variable.from_dict(d) for d in db['der_states']] model.outputs = db['outputs'] model.delay_states = db['delay_states'] model.alias_relation = db['alias_relation'] # Evaluate variable metadata: parameter_vector = ca.veccat(*[v.symbol for v in model.parameters]) metadata = dict(zip(variables_with_metadata, model.variable_metadata_function(parameter_vector))) independent_metadata = dict(zip( variables_with_metadata, (np.array(x) for x in model.variable_metadata_function(ca.veccat(*[np.nan for v in model.parameters]))))) for k, key in enumerate(variables_with_metadata): m = db[key + "__metadata_dependent"] for i, d in enumerate(db[key]): variable = variable_dict[d['name']] for j, tmp in enumerate(CASADI_ATTRIBUTES): if m[i, j]: setattr(variable, tmp, metadata[key][i, j]) else: setattr(variable, tmp, independent_metadata[key][i, j]) # Evaluate delay arguments: if model.delay_states: args = [model.time, ca.veccat(*model._symbols(model.states)), ca.veccat(*model._symbols(model.der_states)), ca.veccat(*model._symbols(model.alg_states)), ca.veccat(*model._symbols(model.inputs)), ca.veccat(*model._symbols(model.constants)), ca.veccat(*model._symbols(model.parameters))] delay_arguments_raw = model.delay_arguments_function(*args) nan_args = [ca.repmat(np.nan, *arg.size()) for arg in args] independent_delay_arguments_raw = model.delay_arguments_function(*nan_args) delay_expressions_raw = delay_arguments_raw[::2] delay_durations_raw = delay_arguments_raw[1::2] independent_delay_durations_raw = independent_delay_arguments_raw[1::2] assert 1 == len({len(delay_expressions_raw), len(delay_durations_raw), len(independent_delay_durations_raw)}) all_symbols = [model.time, *model._symbols(model.states), *model._symbols(model.der_states), *model._symbols(model.alg_states), *model._symbols(model.inputs), *model._symbols(model.constants), *model._symbols(model.parameters)] duration_dependencies = db['__delay_duration_dependent'] # Get rid of false dependency symbols not used in any delay # durations. This significantly reduces the work the (slow) # substitute() calls have to do later on. actual_deps = sorted(set(np.array(duration_dependencies).ravel())) actual_dep_symbols = [np.nan] * len(all_symbols) for i in actual_deps: actual_dep_symbols[i] = all_symbols[i] delay_durations_simplified = ca.Function( 'replace_false_deps', all_symbols, delay_durations_raw).call( actual_dep_symbols) # Get rid of remaining hidden dependencies in the delay durations for i, expr in enumerate(delay_expressions_raw): if duration_dependencies[i]: dur = delay_durations_simplified[i] if len(duration_dependencies[i]) < len(actual_deps): deps = set(ca.symvar(dur)) actual_deps = {all_symbols[j] for j in duration_dependencies[i]} false_deps = deps - actual_deps if false_deps: [dur] = ca.substitute( [dur], list(false_deps), [np.nan] * len(false_deps)) else: # Already removed all false dependencies pass else: dur = independent_delay_durations_raw[i] model.delay_arguments.append(DelayArgument(expr, dur)) # Try to coerce parameters into their Python types for p in model.parameters: for attr in CASADI_ATTRIBUTES: v = getattr(p, attr) v_mx = ca.MX(v) if v_mx.is_constant() and v_mx.is_regular(): setattr(p, attr, p.python_type(v)) # Done return model
def repmat(inputobj, dim1, dim2): return ca.repmat(inputobj, dim1, dim2)
def check_and_set_initials(self, \ uN = None, \ pinit = None, \ xinit = None): ''' :param tbd: tbd :type tbd: tbd Define structures for the initial values for the several variables that build up the parameter estimation problem, and prepare the values provided with the arguments properly. Afterwards, the values are stored inside the class variable `Varsinit``. ''' # Define structures for initial values from the original # variable struct of the problem # Set controls values # (only if the number of controls is not 0, else set them nothing) if not self.nu == 0: if uN is None: uN = np.zeros((self.nu, self.nsteps)) uN = np.atleast_2d(uN) if uN.shape == (self.nsteps, self.nu): uN = uN.T if not uN.shape == (self.nu, self.nsteps): raise ValueError( \ "Wrong dimension for control values uN.") self.uN = uN else: self.uN = ca.DMatrix(0, self.nsteps) # Set initials for the parameters if pinit is None: pinit = np.zeros(self.np) pinit = np.atleast_1d(np.squeeze(pinit)) if not pinit.shape == (self.np,): raise ValueError( \ "Wrong dimension for argument pinit.") self.Pinit = pinit # If it's a dynamic problem, set initials and bounds for the states if type(self.system) is not systems.BasicSystem: if xinit is None: xinit = np.zeros((self.nx, self.nsteps + 1)) xinit = np.atleast_2d(xinit) if xinit.shape == (self.nsteps + 1, self.nx): xinit = xinit.T if not xinit.shape == (self.nx, self.nsteps + 1): raise ValueError( \ "Wrong dimension for argument xinit.") self.Xinit = ca.repmat(xinit[:,:-1], self.ntauroot+1, 1) self.XFinit = xinit[:,-1] else: self.Xinit = ca.DMatrix(0, 0) self.XFinit = ca.DMatrix(0, 0) self.Vinit = np.zeros(self.V.shape) self.EPS_Einit = np.zeros(self.EPS_E.shape) self.EPS_Uinit = np.zeros(self.EPS_U.shape)
def __init__(self, system = None, \ tu = None, uN = None, \ ty = None, yN = None, \ wv = None, weps_e = None, weps_u = None, \ pinit = None, \ xinit = None, \ linear_solver = None, \ scheme = None, \ order = None): intro.pecas_intro() print('\n' + 22 * '-' + \ ' PECas parameter estimation setup ' + 22 * '-') print('\nStarting parameter estimation problem setup ...') self.linear_solver = linear_solver if type(system) is systems.BasicSystem: self.pesetup = setups.BSsetup(system = system, \ tu = tu, uN = uN, \ pinit = pinit) elif type(system) is systems.ExplODE: self.pesetup = setups.ODEsetup(system = system, \ tu = tu, uN = uN, \ ty = ty, yN = yN, \ pinit = pinit, \ xinit = xinit, \ scheme = scheme, \ order = order) else: raise NotImplementedError( \ "The system type provided by the user is not supported.") # Store the parameter estimation problem setup # self.pesetup = pesetup # Check if the supported measurement data fits to the dimensions of # the output function yN = np.atleast_2d(yN) if yN.shape == (self.pesetup.tu.size, self.pesetup.nphi): yN = yN.T if not yN.shape == (self.pesetup.nphi, self.pesetup.tu.size): raise ValueError(''' The dimension of the measurement data given in yN does not match the dimension of output function and/or tu. Valid dimensions for yN for the given data are: {0} or {1}, but you supported yN of dimension: {2}.'''.format(str((self.pesetup.tu.size, self.pesetup.nphi)), \ str((self.pesetup.nphi, self.pesetup.tu.size)), str(yN.shape))) # Check if the supported standard deviations fit to the dimensions of # the measurement data wv = np.atleast_2d(wv) if wv.shape == yN.T.shape: wv = wv.T if not wv.shape == yN.shape: raise ValueError(''' The dimension of weights of the measurement errors given in wv does not match the dimensions of the measurement data. Valid dimensions for wv for the given data are: {0} or {1}, but you supported wv of dimension: {2}.'''.format(str(yN.shape), str(yN.T.shape), str(wv.shape))) # Get the measurement values and standard deviations into the # necessary order of apperance and dimensions self.yN = np.zeros(np.size(yN)) self.wv = np.zeros(np.size(yN)) for k in range(yN.shape[0]): self.yN[k:yN.shape[0]*yN.shape[1]+1:yN.shape[0]] = \ yN[k, :] self.wv[k:yN.shape[0]*yN.shape[1]+1:yN.shape[0]] = \ wv[k, :] self.weps_e = [] try: if self.pesetup.neps_e != 0: weps_e = np.atleast_2d(weps_e) try: if weps_e.shape == (1, self.pesetup.neps_e): weps_e = weps_e.T if not weps_e.shape == (self.pesetup.neps_e, 1): raise ValueError(''' The dimensions of the weights of the equation errors given in weps_e does not match the dimensions of the equation errors given in eps_e.''') self.weps_e = weps_e except AttributeError: pass try: self.weps_e = np.squeeze(ca.repmat(weps_e, self.pesetup.nsteps * \ (len(self.pesetup.tauroot)-1), 1)) except AttributeError: self.weps_e = [] except AttributeError: pass self.weps_u = [] try: if self.pesetup.neps_u != 0: weps_u = np.atleast_2d(weps_u) try: if weps_u.shape == (1, self.pesetup.neps_u): weps_u = weps_u.T if not weps_u.shape == (self.pesetup.neps_u, 1): raise ValueError(''' The dimensions of the weights of the input errors given in weps_u does not match the dimensions of the input errors given in eps_u.''') self.weps_u = weps_u except AttributeError: pass try: self.weps_u = np.squeeze(ca.repmat(weps_u, self.pesetup.nsteps * \ (len(self.pesetup.tauroot)-1), 1)) except AttributeError: self.weps_u = [] except AttributeError: pass # Set up the covariance matrix for the measurements # self.w = ca.diag(np.concatenate((self.wv, self.weps_e,self.weps_u))) self.w = ca.veccat((self.wv, self.weps_e,self.weps_u)) print('Setup of the parameter estimation problem sucessful.')