def __cost_saturation_l(self, x, x_ref, covar_x, u, covar_u, delta_u, Q, R, S): """ Stage Cost function: Expected Value of Saturating Cost """ Nx = ca.MX.size1(Q) Nu = ca.MX.size1(R) # Create symbols Q_s = ca.SX.sym('Q', Nx, Nx) R_s = ca.SX.sym('Q', Nu, Nu) x_s = ca.SX.sym('x', Nx) u_s = ca.SX.sym('x', Nu) covar_x_s = ca.SX.sym('covar_z', Nx, Nx) covar_u_s = ca.SX.sym('covar_u', ca.MX.size(R)) Z_x = ca.SX.eye(Nx) + 2 * covar_x_s @ Q_s Z_u = ca.SX.eye(Nu) + 2 * covar_u_s @ R_s cost_x = ca.Function('cost_x', [x_s, Q_s, covar_x_s], [ 1 - ca.exp(-(x_s.T @ ca.solve(Z_x.T, Q_s.T).T @ x_s)) / ca.sqrt(ca.det(Z_x)) ]) cost_u = ca.Function('cost_u', [u_s, R_s, covar_u_s], [ 1 - ca.exp(-(u_s.T @ ca.solve(Z_u.T, R_s.T).T @ u_s)) / ca.sqrt(ca.det(Z_u)) ]) return cost_x(x - x_ref, Q, covar_x) + cost_u(u, R, covar_u)
def test_SXconversion(self): self.message("Conversions from and to SX") y=SX.sym("y") x=SX.sym("x",3,3) SX(y) SX(x) c.det(x) y=array(DM(x)) c.det(y)
def test_SXconversion(self): self.message("Conversions from and to SXMatrix") y = SX("y") x = ssym("x", 3, 3) SXMatrix(y) SXMatrix(x) c.det(x) y = array(x) c.det(y)
def test_SXconversion(self): self.message("Conversions from and to SX") y=SXElement.sym("y") x=SX.sym("x",3,3) SX(y) SX(x) c.det(x) y=array(x) c.det(y)
def test_SXconversion(self): self.message("Conversions from and to SXMatrix") y=SX("y") x=ssym("x",3,3) SXMatrix(y) SXMatrix(x) c.det(x) y=array(x) c.det(y)
def setup_oed(self, outputs, parameters, sigma, time_points, design="A"): """ Transforms an Optimization Problem into an Optimal Experimental Design problem. Parameters:: outputs -- List of names for outputs. Type: [string] parameters -- List of names for parameters to estimate. Type: [string] sigma -- Experiment variance matrix. Type: [[float]] time_points -- List of measurement time points. Type: [float] design -- Design criterion. Possible values: "A", "T" """ # Augment sensitivities and add timed variables self.augment_sensitivities(parameters) timed_sens = self.create_timed_sensitivities(outputs, parameters, time_points) # Create sensitivity and Fisher matrices Q = [] for j in xrange(len(outputs)): Q.append(casadi.vertcat([casadi.horzcat([s.getVar() for s in timed_sens[i][j]]) for i in xrange(len(time_points))])) Fisher = sum([sigma[i, j] * casadi.mul(Q[i].T, Q[j]) for i in xrange(len(outputs)) for j in xrange(len(outputs))]) # Define the objective if design == "A": b = casadi.MX.sym("b", Fisher.shape[1], 1) Fisher_inv = casadi.jacobian(casadi.solve(Fisher, b), b) obj = casadi.trace(Fisher_inv) elif design == "T": obj = -casadi.trace(Fisher) elif design == "D": obj = -casadi.det(Fisher) raise NotImplementedError("D-optimal design is not supported.") else: raise ValueError("Invalid design %s." % design) old_obj = self.getObjective() self.setObjective(old_obj + obj)
def det(A): """ Returns the determinant of the matrix A. See: https://numpy.org/doc/stable/reference/generated/numpy.linalg.det.html """ if not is_casadi_type(A): return _onp.linalg.det(A) else: return _cas.det(A)
def test_SX(self): self.message("SX unary operations") x=SX.sym("x",3,2) x0=array([[0.738,0.2],[ 0.1,0.39 ],[0.99,0.999999]]) self.numpyEvaluationCheckPool(self.pool,[x],x0,name="SX") x=SX.sym("x",3,3) x0=array([[0.738,0.2,0.3],[ 0.1,0.39,-6 ],[0.99,0.999999,-12]]) #self.numpyEvaluationCheck(lambda x: c.det(x[0]), lambda x: linalg.det(x),[x],x0,name="det(SX)") self.numpyEvaluationCheck(lambda x: SX(c.det(x[0])), lambda x: linalg.det(x),[x],x0,name="det(SX)") self.numpyEvaluationCheck(lambda x: c.inv(x[0]), lambda x: linalg.inv(x),[x],x0,name="inv(SX)")
def test_SX(self): self.message("SX unary operations") x=SX.sym("x",3,2) x0=array([[0.738,0.2],[ 0.1,0.39 ],[0.99,0.999999]]) self.numpyEvaluationCheckPool(self.pool,[x],x0,name="SX") x=SX.sym("x",3,3) x0=array([[0.738,0.2,0.3],[ 0.1,0.39,-6 ],[0.99,0.999999,-12]]) #self.numpyEvaluationCheck(lambda x: c.det(x[0]), lambda x: linalg.det(x),[x],x0,name="det(SX)") self.numpyEvaluationCheck(lambda x: SX([c.det(x[0])]), lambda x: linalg.det(x),[x],x0,name="det(SX)") self.numpyEvaluationCheck(lambda x: c.inv(x[0]), lambda x: linalg.inv(x),[x],x0,name="inv(SX)")
def __cost_saturation_lf(self, x, x_ref, covar_x, P): """ Terminal Cost function: Expected Value of Saturating Cost """ Nx = ca.MX.size1(P) # Create symbols P_s = ca.SX.sym('P', Nx, Nx) x_s = ca.SX.sym('x', Nx) covar_x_s = ca.SX.sym('covar_z', Nx, Nx) Z_x = ca.SX.eye(Nx) + 2 * covar_x_s @ P_s cost_x = ca.Function('cost_x', [x_s, P_s, covar_x_s], [ 1 - ca.exp(-(x_s.T @ ca.solve(Z_x.T, P_s.T).T @ x_s)) / ca.sqrt(ca.det(Z_x)) ]) return cost_x(x - x_ref, P, covar_x)
def loss_sat(m, v, z, W=None): """ Saturating cost function Parameters ---------- m : dx1 ndarray[float | casadi.Sym] The mean of the input Gaussian v : dxd ndarray[float | casadi.Sym] z: dx1 ndarray[float | casadi.Sym] The target-state [optional] W: dxd ndarray[float | casadi.Sym] The weighting matrix factor for the cost-function (scaling) Returns ------- L: float The expected loss under the saturating cost function Warning: Solving the Matlab system W/(eye(D)+SW) via inversion. Can be instable TO-DO: Should be fixed """ D = np.shape(m)[0] if W is None: W = SX.eye(D) SW = mtimes(v, W) G = SX.eye(D) + SW inv_G = inv(G) iSpW = mtimes(W, inv_G) L = 1 - exp(mtimes(-(m - z).T, mtimes(iSpW, (m - z) / 2))) / sqrt(det(G)) return Function("l_sat", [m, v], [L]) # convert into MX function
def det(inputobj): return ca.det(inputobj)