Exemple #1
0
def blockdiag(*matrices_list):
    """Receives a list of matrices and return a block diagonal.

    :param DM|MX|SX matrices_list: list of matrices
    """

    size_1 = sum([m.size1() for m in matrices_list])
    size_2 = sum([m.size2() for m in matrices_list])
    matrix_types = [type(m) for m in matrices_list]

    if SX in matrix_types and MX in matrix_types:
        raise ValueError(
            "Can not mix MX and SX types. Types give: {}".format(matrix_types))

    if SX in matrix_types:
        matrix = SX.zeros(size_1, size_2)
    elif MX in matrix_types:
        matrix = MX.zeros(size_1, size_2)
    else:
        matrix = DM.zeros(size_1, size_2)

    index_1 = 0
    index_2 = 0

    for m in matrices_list:
        matrix[index_1:index_1 + m.size1(), index_2:index_2 + m.size2()] = m
        index_1 += m.size1()
        index_2 += m.size2()

    return matrix
Exemple #2
0
def J_qi(T, Q, R, P_f, N_pred, states, controls, rhs, func_type=1):

    # kriegen die Anzahl der Zeiler("shape" ist ein Tuple)
    n_states = states.shape[0]  # p 22.

    # Eingangsanzahl
    n_controls = controls.shape[0]

    # nonlinear mapping function f(x,u)
    f = Function('f', [states, controls], [rhs])
    # Decision variables (controls) u_pwm
    U = SX.sym('U', n_controls, N_pred)
    # parameters (which include the !!!initial and the !!!reference state of
    # the robot!!!reference input)
    P = SX.sym('P', n_states + n_states + n_controls)
    # number of x is always N+1   -> 0..N
    X = SX.sym('X', n_states, (N_pred + 1))

    X[:, 0] = P[0:n_states]
    obj = 0

    # compute predicted states and cost function
    for k in range(N_pred):
        st = X[:, k]
        con = U[:, k]
        f_value = f(st, con)
        # print(f_value)
        st_next = st + (T * f_value)
        X[:, k + 1] = st_next
        obj = obj + (st - P[n_states:2 * n_states]).T @ Q @ (
            st - P[n_states:2 * n_states]) + (
                con - P[2 * n_states:2 * n_states + n_controls]).T @ R @ (
                    con - P[2 * n_states:2 * n_states + n_controls])
    # print(obj)
    st = X[:, N_pred]
    obj = obj + (st - P[n_states:2 * n_states]).T @ P_f @ (
        st - P[n_states:2 * n_states])
    # create a dense matrix of state constraints. Size of g: n_states * (N_pred+1) +1
    g = SX.zeros(n_states * (N_pred + 1) + 1, 1)

    # Constrainsvariable spezifizieren
    for i in range(N_pred + 1):
        for j in range(n_states):
            g[n_states * i + j] = X[j, i]

    g[n_states *
      (N_pred + 1)] = (X[:, N_pred] - P[n_states:2 * n_states]).T @ P_f @ (
          X[:, N_pred] - P[n_states:2 * n_states])
    OPT_variables = reshape(U, N_pred, 1)
    nlp_prob = {'f': obj, 'x': OPT_variables, 'g': g, 'p': P}
    opts = {}
    opts['print_time'] = False
    opts['ipopt'] = {
        'max_iter': 100,
        'print_level': 0,
        'acceptable_tol': 1e-8,
        'acceptable_obj_change_tol': 1e-6
    }
    # print(opts)
    return f, nlpsol("solver", "ipopt", nlp_prob, opts)
Exemple #3
0
    def test_blockdiag(self):
        # Test blockdiag with DM
        correct_res = DM([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 1, 1, 1],
                          [0, 0, 1, 1, 1], [0, 0, 1, 1, 1]])

        a = DM.ones(2, 2)
        b = DM.ones(3, 3)
        res = blockdiag(a, b)
        self.assertTrue(is_equal(res, correct_res))

        # MX and DM mix
        a = MX.sym('a', 2, 2)
        b = DM.ones(1, 1)
        correct_res = MX.zeros(3, 3)
        correct_res[:2, :2] = a
        correct_res[2:, 2:] = b

        res = blockdiag(a, b)
        self.assertTrue(is_equal(res, correct_res, 30))

        # SX and DM mix
        a = SX.sym('a', 2, 2)
        b = DM.ones(1, 1)
        correct_res = SX.zeros(3, 3)
        correct_res[:2, :2] = a
        correct_res[2:, 2:] = b

        res = blockdiag(a, b)
        self.assertTrue(is_equal(res, correct_res, 30))

        # SX and MX
        a = SX.sym('a', 2, 2)
        b = MX.sym('b', 2, 2)
        self.assertRaises(ValueError, blockdiag, a, b)
Exemple #4
0
    def _sample_parameters(self):
        n_samples = self.n_samples
        n_uncertain = self.n_uncertain

        mean = vertcat(self.socp.p_unc_mean,
                       self.socp.uncertain_initial_conditions_mean)

        if self.socp.n_p_unc > 0 and self.socp.n_uncertain_initial_condition > 0:
            covariance = diagcat(self.socp.p_unc_cov,
                                 self.socp.uncertain_initial_conditions_cov)
        elif self.socp.n_p_unc > 0:
            covariance = self.socp.p_unc_cov
        elif self.socp.n_uncertain_initial_condition > 0:
            covariance = self.socp.uncertain_initial_conditions_cov
        else:
            raise ValueError("No uncertainties found n_p_unc = {}, "
                             "n_uncertain_initial_condition={}".format(
                                 self.socp.n_p_unc,
                                 self.socp.n_uncertain_initial_condition))

        dist = self.socp.p_unc_dist + self.socp.uncertain_initial_conditions_distribution

        for d in dist:
            if not d == 'normal':
                raise NotImplementedError(
                    'Distribution "{}" not implemented, only "normal" is available.'
                    .format(d))

        sampled_epsilon = sample_parameter_normal_distribution_with_sobol(
            DM.zeros(mean.shape), DM.eye(covariance.shape[0]), n_samples)
        sampled_parameters = SX.zeros(n_uncertain, n_samples)
        for s in range(n_samples):
            sampled_parameters[:, s] = mean + mtimes(sampled_epsilon[:, s].T,
                                                     chol(covariance)).T
        return sampled_parameters
Exemple #5
0
def gp_pred(x,
            kern,
            beta=None,
            x_train=None,
            k_inv_training=None,
            pred_var=True):
    """

    """
    n_pred, _ = np.shape(x)

    if beta is None:
        pred_mu = SX.zeros(n_pred, 1)
    else:
        k_star = kern(x, y=x_train)
        pred_mu = mtimes(k_star, beta)

    if pred_var:
        pred_sigm = kern(x, diag_only=True)

        if not beta is None:
            pred_sigm = pred_sigm - sum2(
                mtimes(k_star, k_inv_training) * k_star)

        return pred_mu, pred_sigm

    return pred_mu
Exemple #6
0
def export_chain_mass_model(n_mass, m, D, L):

    model_name = 'chain_mass_' + str(n_mass)
    x0 = np.array([0, 0, 0])  # fix mass (at wall)

    M = n_mass - 2  # number of intermediate masses

    nx = (2 * M + 1) * 3  # differential states
    nu = 3  # control inputs

    xpos = SX.sym('xpos', (M + 1) * 3, 1)  # position of fix mass eliminated
    xvel = SX.sym('xvel', M * 3, 1)
    u = SX.sym('u', nu, 1)
    xdot = SX.sym('xdot', nx, 1)

    f = SX.zeros(3 * M, 1)  # force on intermediate masses

    for i in range(M):
        f[3 * i + 2] = -9.81

    for i in range(M + 1):
        if i == 0:
            dist = xpos[i * 3:(i + 1) * 3] - x0
        else:
            dist = xpos[i * 3:(i + 1) * 3] - xpos[(i - 1) * 3:i * 3]

        scale = D / m * (1 - L / norm_2(dist))
        F = scale * dist

        # mass on the right
        if i < M:
            f[i * 3:(i + 1) * 3] -= F

        # mass on the left
        if i > 0:
            f[(i - 1) * 3:i * 3] += F

    # parameters
    p = []

    x = vertcat(xpos, xvel)

    # dynamics
    f_expl = vertcat(xvel, u, f)
    f_impl = xdot - f_expl

    model = AcadosModel()

    model.f_impl_expr = f_impl
    model.f_expl_expr = f_expl
    model.x = x
    model.xdot = xdot
    model.u = u
    model.p = p
    model.name = model_name

    return model
Exemple #7
0
 def construct_upd_z(self):
     # check if we have linear equality constraints
     self._lineq_updz, A, b = self._check_for_lineq()
     if not self._lineq_updz:
         self._construct_upd_z_nlp()
     x_i = struct_symSX(self.q_i_struct)
     x_j = struct_symSX(self.q_ij_struct)
     l_i = struct_symSX(self.q_i_struct)
     l_ij = struct_symSX(self.q_ij_struct)
     t = SX.sym('t')
     T = SX.sym('T')
     rho = SX.sym('rho')
     par = struct_symSX(self.par_struct)
     inp = [x_i.cat, l_i.cat, l_ij.cat, x_j.cat, t, T, rho, par.cat]
     t0 = t/T
     # put symbols in SX structs (necessary for transformation)
     x_i = self.q_i_struct(x_i)
     x_j = self.q_ij_struct(x_j)
     l_i = self.q_i_struct(l_i)
     l_ij = self.q_ij_struct(l_ij)
     # transform spline variables: only consider future piece of spline
     tf = lambda cfs, basis: shift_knot1_fwd(cfs, basis, t0)
     self._transform_spline([x_i, l_i], tf, self.q_i)
     self._transform_spline([x_j, l_ij], tf, self.q_ij)
     # fill in parameters
     A = A([par])[0]
     b = b([par])[0]
     # build KKT system
     E = rho*SX.eye(A.shape[1])
     l, x = vertcat([l_i.cat, l_ij.cat]), vertcat([x_i.cat, x_j.cat])
     f = -(l + rho*x)
     G = vertcat([horzcat([E, A.T]),
                  horzcat([A, SX.zeros(A.shape[0], A.shape[0])])])
     h = vertcat([-f, b])
     z = solve(G, h)
     l_qi = self.q_i_struct.shape[0]
     l_qij = self.q_ij_struct.shape[0]
     z_i_new = self.q_i_struct(z[:l_qi])
     z_ij_new = self.q_ij_struct(z[l_qi:l_qi+l_qij])
     # transform back
     tf = lambda cfs, basis: shift_knot1_bwd(cfs, basis, t0)
     self._transform_spline(z_i_new, tf, self.q_i)
     self._transform_spline(z_ij_new, tf, self.q_ij)
     out = [z_i_new.cat, z_ij_new.cat]
     # create problem
     prob, compile_time = self.father.create_function(
         'upd_z', inp, out, self.options)
     self.problem_upd_z = prob
Exemple #8
0
rhs = vertcat(h_p, k_L / m * ((k_V * (eta + eta_0) - A_B * h_p) / A_SP)**2 - g,
              -1 / T_M * eta + k_M / T_M * u_pwm)

f = Function('f', [states, controls], [rhs * T + states])

K = SX([[0.99518, 0.086597, 0.14783]])
# K = SX([[31.2043,  2.7128, 0.4824]])
phi_c = A_d - B_d @ K
print(phi_c)

P_f = SX([[5959.1, 517.93, 65.087], [517.93, 51.198, 5.6419],
          [65.087, 5.6419, 18.834]])
# P_f=SX([[68639,5891.4,616.42],[5891.4,519.84,53.599],[616.42,53.599,641.7]])
state_r = SX([[0.8], [0], [48.760258862]])
u_r = [157.291157619]
Q = SX.zeros(3, 3)
# Q[0, 0] = 1e3
Q[0, 0] = 1
Q[1, 1] = 1
Q[2, 2] = 1

R = SX.zeros(1, 1)
R[0, 0] = 1

Q_s = Q + K.T @ R @ K
# delta_Q=SX([[5000,0,0],[0,5,0],[0,0,5]])
delta_Q = SX([[50, 0, 0], [0, 5, 0], [0, 0, 5]])
K = reshape(K, 1, 3)
X = SX.sym('X', 3, 1)

psi = f(X, -K @ (X - state_r) + u_r) - ((phi_c) @ (X - state_r) + state_r)
Exemple #9
0
def sxvec(*args):
    n = len(args)
    v = SX.zeros(n)
    for i in range(n):
        v[i] = args[i]
    return v
Exemple #10
0
def get_flat_maps(ntrailers):
    '''
        returns expressions for state variables [x, y, phi, theta...]
        as functions of outputs [y0, y1, Dy0, Dy1, ...]
    '''
    out = SX.zeros(ntrailers + 3, 2)
    out[0, 0] = SX.sym(f'x{ntrailers - 1}')
    out[0, 1] = SX.sym(f'y{ntrailers - 1}')

    for i in range(1, ntrailers + 3):
        out[i, 0] = SX.sym(f'D{i}x{ntrailers-1}')
        out[i, 1] = SX.sym(f'D{i}y{ntrailers-1}')

    args = out[1:-1, :].T.reshape((-1, 1))
    dargs = out[2:, :].T.reshape((-1, 1))

    # 1. find all thetas
    p = out[0, :].T
    v = sxvec(args[0], args[1])
    theta = arctan2(v[1], v[0])
    dtheta = jtimes(theta, args, dargs)
    thetas = [theta]
    velocities = [v]
    positions = [p]
    dthetas = [dtheta]

    for i in range(ntrailers - 1):
        p_next = p
        v_next = v
        theta_next = theta
        dtheta_next = dtheta

        p = p_next + sxvec(cos(theta_next), sin(theta_next))
        v = v_next + sxvec(-sin(theta_next), cos(theta_next)) * dtheta_next
        theta = arctan2(v[1], v[0])
        dtheta = jtimes(theta, args, dargs)

        thetas += [theta]
        velocities += [v]
        dthetas += [dtheta]
        positions += [p]

    positions = positions[::-1]
    velocities = velocities[::-1]
    dthetas = dthetas[::-1]
    thetas = thetas[::-1]

    # 2. find phi
    v0 = velocities[0]
    dtheta0 = dthetas[0]
    theta0 = thetas[0]
    phi = arctan2(dtheta0, cos(theta0) * v0[0] + sin(theta0) * v0[1])

    # 3. find controls
    u1 = v0.T @ sxvec(cos(theta0), sin(theta0))
    u2 = jtimes(phi, args, dargs)

    return make_tuple('FlatMaps',
                      flat_out=out[0],
                      flat_derivs=out,
                      u1=u1,
                      u2=u2,
                      phi=phi,
                      theta=thetas,
                      velocities=velocities,
                      positions=positions)
    print(rhs)

    # nonlinear mapping function f(x,u)
    f = Function('f', [states, controls], [rhs])
    # Decision variables (controls) u_pwm
    U = SX.sym('U', n_controls, N_pred)
    # parameters (which include the !!!initial and the !!!reference state of
    # the robot)
    P = SX.sym('P', n_states + n_states)
    # number of x is always N+1   -> 0..N
    X = SX.sym('X', n_states, (N_pred + 1))

    X[:, 0] = P[0:3]
    obj = 0
    g = []  # constraints vector
    Q = SX.zeros(3, 3)
    Q[0, 0] = 1e7
    Q[1, 1] = 1
    Q[2, 2] = 1e-5

    R = SX.zeros(1, 1)
    R[0, 0] = 1e-5

    P_f = SX.zeros(3, 3)
    P_f[0, 0] = 1.0439e+007
    P_f[0, 1] = -3.0878e+007
    P_f[0, 2] = -1.6277e+009
    P_f[1, 0] = -3.0878e+007
    P_f[1, 1] = 1.6483e+008
    P_f[1, 2] = 6.4157e+009
    P_f[2, 0] = -1.6277e+009
Exemple #12
0
def acados_linear_quad_model_moving_eq(num_states, g=9.81):

    model_name = 'linear_quad_v2'
    tc = 0.59375
    # scaling_control_mat = np.array([1, 1, 1, (g/tc)])
    gravity = SX.zeros(3)
    gravity[2] = g

    x = SX.sym('x', 9)
    u = SX.sym('u', 4)
    xdot = SX.sym('xdot', 9)

    r1 = SX(3, 3)
    r1[0, 0] = 1
    r1[1, 1] = cos(x[3])
    r1[1, 2] = -sin(x[3])
    r1[2, 1] = sin(x[3])
    r1[2, 2] = cos(x[3])

    r2 = SX(3, 3)
    r2[0, 0] = cos(x[4])
    r2[0, 2] = sin(x[4])
    r2[1, 1] = 1
    r2[2, 0] = -sin(x[4])
    r2[2, 2] = cos(x[4])

    r3 = SX(3, 3)
    r3[0, 0] = cos(x[5])
    r3[0, 1] = -sin(x[5])
    r3[1, 0] = sin(x[5])
    r3[1, 1] = cos(x[5])
    r3[2, 2] = 1

    tau = SX(3, 3)
    tau[:, 0] = (inv(r2))[:, 0]
    tau[:, 1] = (inv(r2))[:, 1]
    tau[:, 2] = (inv(r2 @ r1))[:, 2]

    f_nl = vertcat(x[6:9],
                   inv(tau) @ u[0:3],
                   -((r3 @ r1 @ r2)[:, 2]) * u[3] * (g / tc) + gravity)

    # f_const = 20
    # f_nl = vertcat(
    #     x[6:9],
    #     inv(tau) @ u[0:3],
    #     - ((r3 @ r1 @ r2)[:,2]) * (u[3] * ((g + f_const)/tc) - f_const) + gravity
    # )

    A = jacobian(f_nl, x)
    B = jacobian(f_nl, u)

    p = []

    f_expl = mtimes(A, x) + mtimes(B, u)

    f_impl = xdot - f_expl

    model = AcadosModel()

    model.f_impl_expr = f_impl
    model.f_expl_expr = f_expl
    model.x = x
    model.xdot = xdot
    model.u = u
    # model.z = z
    model.p = p
    model.name = model_name

    return model
Exemple #13
0
def acados_nonlinear_quad_model(num_states, g=9.81):

    model_name = 'nonlinear_quad'

    position = SX.sym('o', 3)
    velocity = SX.sym('v', 3)
    phi = SX.sym('phi')
    theta = SX.sym('theta')
    psi = SX.sym('psi')

    gravity = SX.zeros(3)
    gravity[2] = g

    x = vertcat(position, phi, theta, psi, velocity)

    p = SX.sym('p')
    q = SX.sym('q')
    r = SX.sym('r')
    F = SX.sym('F')

    u = vertcat(p, q, r, F)

    r1 = SX(3, 3)
    r1[0, 0] = 1
    r1[1, 1] = cos(phi)
    r1[1, 2] = -sin(phi)
    r1[2, 1] = sin(phi)
    r1[2, 2] = cos(phi)

    r2 = SX(3, 3)
    r2[0, 0] = cos(theta)
    r2[0, 2] = sin(theta)
    r2[1, 1] = 1
    r2[2, 0] = -sin(theta)
    r2[2, 2] = cos(theta)

    r3 = SX(3, 3)
    r3[0, 0] = cos(psi)
    r3[0, 1] = -sin(psi)
    r3[1, 0] = sin(psi)
    r3[1, 1] = cos(psi)
    r3[2, 2] = 1

    tau = SX(3, 3)
    tau[:, 0] = (inv(r2))[:, 0]
    tau[:, 1] = (inv(r2))[:, 1]
    tau[:, 2] = (inv(r2 @ r1))[:, 2]
    # tau[:,0] = (inv(r1))[:,0]
    # tau[:,1] = (inv(r1))[:,1]
    # tau[:,2] = (inv(r1 @ r2))[:,2]

    # tau[0,0] = cos(theta)
    # tau[0,2] = -sin(theta)
    # tau[1,1] = 1
    # tau[1,2] = sin(phi) * cos(theta)
    # tau[2,0] = sin(theta)
    # tau[2,2] = cos(phi) * cos(theta)

    tc = 0.59375  # mapping control to throttle
    # f_const = 100
    f_const = 20
    # tc = 0.75
    #
    #  scaling_control_mat = np.array([1, 1, 1, (g/tc)])

    # f_expl = vertcat(
    #     velocity,
    #     inv(tau) @ u[0:3],
    #     - ((r3 @ r1 @ r2)[:,2]) * (u[3] * ((g)/tc)) + gravity
    # )

    f_expl = vertcat(
        velocity,
        inv(tau) @ u[0:3], -((r3 @ r1 @ r2)[:, 2]) *
        (u[3] * ((g + f_const) / tc) - f_const) + gravity)

    # f_expl = vertcat(
    #     velocity,
    #     inv(tau) @ u[0:3],
    #     - ((r3 @ r1 @ r2)[:,2]) * ((g - f_const)*(1 - tc) / (1 - u[3]) + f_const)  + gravity
    # )

    # xdot
    xdot = SX.sym('xdot', 9)

    #parameters
    p = []

    f_impl = xdot - f_expl

    model = AcadosModel()

    model.f_impl_expr = f_impl
    model.f_expl_expr = f_expl
    model.x = x
    model.xdot = xdot
    model.u = u
    # model.z = z
    model.p = p
    model.name = model_name

    return model
Exemple #14
0
                  k_L / m * ((k_V * (eta + eta_0) - A_B * h_p) / A_SP)**2 - g,
                  -1 / T_M * eta + k_M / T_M * u_pwm)

    f = Function('f', [states, controls], [rhs * T + states])

    # K = SX([[0.99581, 0.086879, 0.1499]])
    K = SX([[31.2043, 2.7128, 0.4824]])

    phi_c = A_d - B_d @ K

    P_f = SX([[5959.1, 517.92, 65.058], [517.92, 51.198, 5.6392],
              [65.058, 5.6392, 641.7]])
    # P_f=SX([[2.4944e+005,20941,1932.4],[20941,1839.4,168.05],[1932.4,168.05,641.7]])
    state_r = SX([[0.8], [0], [48.760258862]])
    u_r = [157.291157619]
    Q = SX.zeros(3, 3)
    # Q[0, 0] = 1e4
    Q[0, 0] = 1
    Q[1, 1] = 1
    Q[2, 2] = 1

    R = SX.zeros(1, 1)
    R[0, 0] = 1

    Q_s = Q + K.T @ R @ K
    # delta_Q=SX([[5000,0,0],[0,5,0],[0,0,5]])
    delta_Q = SX([[5, 0, 0], [0, 5, 0], [0, 0, 5]])
    K = reshape(K, 1, 3)
    X = SX.sym('X', 3, 1)

    psi = f(X, -K @ (X - state_r) + u_r) - ((phi_c) @ (X - state_r) + state_r)
Exemple #15
0
from casadi import SX,DM, vertcat, reshape, Function, nlpsol, inf, norm_2
import matplotlib.pyplot as plt

if __name__ == "__main__":
    #alpha_1 calculation
    n_states=3

    h_ub = 2
    h_lb = 0
    eta_ub = 200
    eta_lb = 0

    X_alpha1 = SX.sym('X', n_states,1 )
    P_f = SX.zeros(3, 3)
    P_f[0,0]=1.0439e+007
    P_f[0,1] =-3.0878e+007
    P_f[0,2] =-1.6277e+009
    P_f[1,0]=-3.0878e+007
    P_f[1,1] =1.6483e+008
    P_f[1,2] = 6.4157e+009
    P_f[2,0]=-1.6277e+009
    P_f[2,1] =6.4157e+009
    P_f[2,2] = 2.9567e+011

    obj_alpha1 = -X_alpha1.T@P_f@X_alpha1
    K=SX.zeros(1,3)
    K[0,0]=3162.3
    K[0,1]=256.11
    K[0,2]=12.303

    g=SX.zeros(1,1)
Exemple #16
0
 temp = Function(
     'temp', [states],
     [f((states), K @ (states - state_r) + u_r) - A @ (states - state_r)])
 print("f", f(state_r, u_r))
 state_temp = SX([0.8, 0, 48.760258862])
 print("phi_norm", phi(state_temp))
 print("x_norm", (state_temp - state_r).T @ P_cost @ (state_temp - state_r))
 print(
     "phi/x",
     phi(state_temp) /
     ((state_temp - state_r).T @ P_cost @ (state_temp - state_r)))
 print("K(X-X_r)+u_r", K @ (state_temp - state_r) + u_r)
 print("f(x,kx)", f(state_temp, K @ (state_temp - state_r) + u_r))
 print("A(X-X_r)", A @ (state_temp - state_r))
 print("phi", temp(state_temp))
 g = SX.zeros(1, 1)
 g = norm_2(states - P)
 nlp_prob = {'f': -obj, 'x': states, 'g': g, 'p': P}
 opts = {}
 opts['print_time'] = False
 opts['ipopt'] = {
     'max_iter': 200,
     'print_level': 0,
     'acceptable_tol': 1e-4,
     # 'fixed_variable_treatment':'make_constraint',
     'acceptable_constr_viol_tol': 1e-4,
     'print_info_string': 'no',
     'acceptable_obj_change_tol': 1e-5
 }
 # print(opts)
 solver = nlpsol("solver", "ipopt", nlp_prob, opts)
    # nonlinear mapping function f(x,u)
    f = Function('f', [states, controls], [rhs])
    # Decision variables (controls) u_pwm
    V = SX.sym('V', n_controls, N_pred)
    # parameters (which include the !!!initial and the !!!reference state of
    # the robot)
    P = SX.sym('P', n_states + n_states)
    # number of x is always N+1   -> 0..N
    Z = SX.sym('Z', n_states, (N_pred + 1))

    Z[:, 0] = P[0:3]
    obj_nom = 0

    g_nom = []  # constraints vector
    Q_nom = SX.zeros(3, 3)
    Q_nom[0, 0] = 1e7
    Q_nom[1, 1] = 1
    Q_nom[2, 2] = 1

    R_nom = SX.zeros(1, 1)
    R_nom[0, 0] = 1

    P_f = SX.zeros(3, 3)
    # compute predicted states and cost function
    for k in range(N_pred):
        st = Z[:, k]
        con = V[:, k]
        f_value = f(st, con)
        # print(f_value)
        st_next = st + (T * f_value)
Exemple #18
0
def acados_nonlinear_quad_model(num_states, g=9.81):
    '''
    A non-linear model of the quadrotor. We assume the input 
    u = [phi_dot,theta_dot,psi_dot,throttle]. Therefore, the system has only 9 
    states.
    '''

    model_name = 'nonlinear_quad'

    position = SX.sym('o', 3)
    velocity = SX.sym('v', 3)
    phi = SX.sym('phi')
    theta = SX.sym('theta')
    psi = SX.sym('psi')

    gravity = SX.zeros(3)
    gravity[2] = g

    x = vertcat(position, phi, theta, psi, velocity)

    p = SX.sym('p')
    q = SX.sym('q')
    r = SX.sym('r')
    F = SX.sym('F')

    u = vertcat(p, q, r, F)

    r1 = SX(3, 3)
    r1[0, 0] = 1
    r1[1, 1] = cos(phi)
    r1[1, 2] = -sin(phi)
    r1[2, 1] = sin(phi)
    r1[2, 2] = cos(phi)

    r2 = SX(3, 3)
    r2[0, 0] = cos(theta)
    r2[0, 2] = sin(theta)
    r2[1, 1] = 1
    r2[2, 0] = -sin(theta)
    r2[2, 2] = cos(theta)

    r3 = SX(3, 3)
    r3[0, 0] = cos(psi)
    r3[0, 1] = -sin(psi)
    r3[1, 0] = sin(psi)
    r3[1, 1] = cos(psi)
    r3[2, 2] = 1

    tau = SX(3, 3)
    tau[:, 0] = (inv(r2))[:, 0]
    tau[:, 1] = (inv(r2))[:, 1]
    tau[:, 2] = (inv(r2 @ r1))[:, 2]
    # tau[:,0] = (inv(r1))[:,0]
    # tau[:,1] = (inv(r1))[:,1]
    # tau[:,2] = (inv(r1 @ r2))[:,2]

    # tau[0,0] = cos(theta)
    # tau[0,2] = -sin(theta)
    # tau[1,1] = 1
    # tau[1,2] = sin(phi) * cos(theta)
    # tau[2,0] = sin(theta)
    # tau[2,2] = cos(phi) * cos(theta)

    tc = 0.59375  # mapping control to throttle
    # tc = 0.75
    # scaling_control_mat = np.array([1, 1, 1, (g/tc)])

    f_expl = vertcat(velocity,
                     inv(tau) @ u[0:3],
                     -((r3 @ r1 @ r2)[:, 2]) * u[3] * (g / tc) + gravity)

    # xdot
    xdot = SX.sym('xdot', 9)

    #parameters
    p = []

    f_impl = xdot - f_expl

    model = AcadosModel()

    model.f_impl_expr = f_impl
    model.f_expl_expr = f_expl
    model.x = x
    model.xdot = xdot
    model.u = u
    # model.z = z
    model.p = p
    model.name = model_name

    return model
Exemple #19
0
def acados_linear_quad_model_moving_eq(num_states, g=9.81):
    '''
    A linear model linearized about the moving point. (I am not certain what I
    am linearizing about here actually). I compute the Jacobian at every 
    execution basically.
    '''

    model_name = 'linear_quad_v2'
    tc = 0.59375
    # scaling_control_mat = np.array([1, 1, 1, (g/tc)])
    gravity = SX.zeros(3)
    gravity[2] = g

    x = SX.sym('x', 9)
    u = SX.sym('u', 4)
    xdot = SX.sym('xdot', 9)

    r1 = SX(3, 3)
    r1[0, 0] = 1
    r1[1, 1] = cos(x[3])
    r1[1, 2] = -sin(x[3])
    r1[2, 1] = sin(x[3])
    r1[2, 2] = cos(x[3])

    r2 = SX(3, 3)
    r2[0, 0] = cos(x[4])
    r2[0, 2] = sin(x[4])
    r2[1, 1] = 1
    r2[2, 0] = -sin(x[4])
    r2[2, 2] = cos(x[4])

    r3 = SX(3, 3)
    r3[0, 0] = cos(x[5])
    r3[0, 1] = -sin(x[5])
    r3[1, 0] = sin(x[5])
    r3[1, 1] = cos(x[5])
    r3[2, 2] = 1

    tau = SX(3, 3)
    tau[:, 0] = (inv(r2))[:, 0]
    tau[:, 1] = (inv(r2))[:, 1]
    tau[:, 2] = (inv(r2 @ r1))[:, 2]

    f_nl = vertcat(x[6:9],
                   inv(tau) @ u[0:3],
                   -((r3 @ r1 @ r2)[:, 2]) * u[3] * (g / tc) + gravity)

    A = jacobian(f_nl, x)
    B = jacobian(f_nl, u)

    p = []

    f_expl = mtimes(A, x) + mtimes(B, u)

    f_impl = xdot - f_expl

    model = AcadosModel()

    model.f_impl_expr = f_impl
    model.f_expl_expr = f_expl
    model.x = x
    model.xdot = xdot
    model.u = u
    # model.z = z
    model.p = p
    model.name = model_name

    return model
Exemple #20
0
    u_1 = SX.sym('u_1')
    u_2 = SX.sym("u_2")
    # Eingangsverktor
    controls = vertcat(u_1, u_2)

    # kriegen die Anzahl der Zeiler("shape" ist ein Tuple)
    n_states = states.shape[0]  # p 22.

    # Eingangsanzahl
    n_controls = controls.shape[0]

    # Zustandsdarstellung
    rhs = vertcat(sin(theta) * u_1, cos(theta) * u_1, u_2)
    print(rhs)

    Q = SX.zeros(3, 3)
    Q[0, 0] = 1
    Q[1, 1] = 1
    Q[2, 2] = 1

    R = SX.zeros(2, 2)
    R[0, 0] = 1
    R[1, 1] = 1

    f, solver = J(T, Q, R, N_pred, states, controls, rhs)

    #specific nonlinear optimal problem
    nl = {}
    nl['lbx'] = [0 for i in range(n_controls * N_pred)]
    nl['ubx'] = [0 for i in range(n_controls * N_pred)]
    # ??? better method?
Exemple #21
0
def qp_solve(prob, obj, p_init, x_init, y_init, lam_opt, mu_opt, case):
    """
    QP solver for path-following algorithm
    inputs: prob - problem description
            obj - problem equations
            p_init - initial parameter
            x_init - initial primal variable
            y_init - initial dual variable
            lam_opt - Lagrange multipliers of equality and active constraints
            mu_opt - Lagrange multipliers of inequality constraints
    outputs: y - solution primal variable
            qp_val - objective function value
            qp_exit - return status of QP solver
            deriv - derivatives of the problem
            k_zero_tilde - active set index
            k_plus_tilde - inactive set index
            grad - gradient of objective function
    """
    print 'Current point x:', x_init
    #Importing problem to be solved
    nx, np, neq, niq, name = prob()
    x, p, f, f_fun, con, conf, ubx, lbx, ubg, lbg = obj(
        x_init, y_init, p_init, neq, niq, nx, np)

    #Deteriming constraint types
    eq_con_ind = array([])  #indices of equality constraints
    iq_con_ind = array([])  #indices of inequality constraints
    eq_con = array([])  #equality constraints
    iq_con = array([])  #inequality constraints

    for i in range(0, len(lbg[0])):
        if lbg[0, i] == 0:
            eq_con = vertcat(eq_con, con[i])
            eq_con_ind = append(eq_con_ind, i)
        elif lbg[0, i] < 0:
            iq_con = vertcat(iq_con, con[i])
            iq_con_ind = append(iq_con_ind, i)
#    print 'Equality Constraint:', eq_con
#    print 'Inequality Constraint:', iq_con

#    if case == 'pure-predictor':

#       return qp_exit, optimal, x_qpopt, lam_qpopt, mu_qpopt
    if case == 'predictor-corrector':
        #Evaluating constraints at current iteration point
        con_vals = conf(x_init, p_init)
        #Determining which inequality constraints are active
        k_plus_tilde = array([])  #active constraint
        k_zero_tilde = array([])  #inactive constraint
        tol = 10e-5  #tolerance
        for i in range(0, len(iq_con_ind)):
            if ubg[0, i] - tol <= con_vals[i] and con_vals[i] <= ubg[0,
                                                                     i] + tol:
                k_plus_tilde = append(k_plus_tilde, i)
            else:
                k_zero_tilde = append(k_zero_tilde, i)
#        print 'Active constraints:', k_plus_tilde
#        print 'Inactive constraints:', k_zero_tilde
#        print 'Constraint values:', con_vals

        nk_pt = len(k_plus_tilde)  #number of active constraints
        nk_zt = len(k_zero_tilde)  #number of inactive constraints

        #Calculating Lagrangian
        lam = SX.sym('lam', neq)  #Lagrangian multiplier equality constraints
        mu = SX.sym('mu', niq)  #Lagrangian multiplier inequality constraints
        lag_f = f + mtimes(lam.T, eq_con) + mtimes(
            mu.T, iq_con)  #Lagrangian equation

        #Calculating derivatives
        g = gradient(f, x)  #Derivative of objective function
        g_fun = Function('g_fun', [x, p], [gradient(f, x)])
        H = 2 * jacobian(gradient(lag_f, x),
                         x)  #Second derivative of the Lagrangian
        H_fun = Function('H_fun', [x, p, lam, mu],
                         [jacobian(jacobian(lag_f, x), x)])

        if len(eq_con_ind) > 0:
            deq = jacobian(eq_con, x)  #Derivative of equality constraints
        else:
            deq = array([])
        if len(iq_con_ind) > 0:
            diq = jacobian(iq_con, x)  #Derivative of inequality constraints
        else:
            diq = array([])
        #Creating constraint matrices
        nc = niq + neq  #Total number of constraints
        if (niq > 0) and (neq > 0):  #Equality and inequality constraints
            #this part needs to be tested
            if (nk_zt > 0):  #Inactive constraints exist
                A = SX.zeros((nc, nx))
                print deq
                A[0, :] = deq  #A matrix
                lba = -1e16 * SX.zeros((nc, 1))
                lba[0, :] = -eq_con  #lower bound of A
                uba = 1e16 * SX.zeros((nc, 1))
                uba[0, :] = -eq_con  #upper bound of A
                for j in range(0, nk_pt):  #adding active constraints
                    A[neq + j + 1, :] = diq[int(k_plus_tilde[j]), :]
                    lba[neq + j + 1] = -iq_con[int(k_plus_tilde[j])]
                    uba[neq + j + 1] = -iq_con[int(k_plus_tilde[i])]
                for i in range(0, nk_zt):  #adding inactive constraints
                    A[neq + nk_pt + i + 1, :] = diq[int(k_zero_tilde[i]), :]
                    uba[neq + nk_pt + i + 1] = -iq_con[int(k_zero_tilde[i])]
                    #inactive constraints don't have lower bounds
            else:  #Active constraints only
                A = vertcat(deq, diq)
                lba = vertcat(-eq_con, -iq_con)
                uba = vertcat(-eq_con, -iq_con)
        elif (niq > 0) and (neq == 0):  #Inquality constraints
            if (nk_zt > 0):  #Inactive constraints exist
                A = SX.zeros((nc, nx))
                lba = -1e16 * SX.ones((nc, 1))
                uba = 1e16 * SX.ones((nc, 1))
                for j in range(0, nk_pt):  #adding active constraints
                    A[j, :] = diq[int(k_plus_tilde[j]), :]
                    lba[j] = -iq_con[int(k_plus_tilde[j])]
                    uba[j] = -iq_con[int(k_plus_tilde[j])]
                for i in range(0, nk_zt):  #adding inactive constraints
                    A[nk_pt + i, :] = diq[int(k_zero_tilde[i]), :]
                    uba[nk_pt + i] = -iq_con[int(k_zero_tilde[i])]
                    #inactive constraints don't have lower bounds
            else:
                raw_input()
                A = vertcat(deq, diq)
                lba = -iq_con
                uba = -iq_con
        elif (niq == 0) and (neq > 0):  #Equality constriants
            A = deq
            lba = -eq_con
            uba = -eq_con
        A_fun = Function('A_fun', [x, p], [A])
        lba_fun = Function('lba_fun', [x, p], [lba])
        uba_fun = Function('uba_fun', [x, p], [uba])
        #Checking that matrices are correct sizes and types
        if (H.size1() != nx) or (H.size2() != nx) or (H.is_dense() == 'False'):
            #H matrix should be a sparse (nxn) and symmetrical
            print(
                'WARNING: H matrix is not the correct dimensions or matrix type'
            )
        if (g.size1() != nx) or (g.size2() != 1) or g.is_dense() == 'True':
            #g matrix should be a dense (nx1)
            print(
                'WARNING: g matrix is not the correct dimensions or matrix type'
            )
        if (A.size1() !=
            (neq + niq)) or (A.size2() != nx) or (A.is_dense() == 'False'):
            #A should be a sparse (nc x n)
            print(
                'WARNING: A matrix is not the correct dimensions or matrix type'
            )
        if lba.size1() != (neq + niq) or (lba.size2() !=
                                          1) or lba.is_dense() == 'False':
            print(
                'WARNING: lba matrix is not the correct dimensions or matrix type'
            )
        if uba.size1() != (neq + niq) or (uba.size2() !=
                                          1) or uba.is_dense() == 'False':
            print(
                'WARNING: uba matrix is not the correct dimensions or matrix type'
            )

        #Evaluating QP matrices at optimal points
        H_opt = H_fun(x_init, p_init, lam_opt, mu_opt)
        g_opt = g_fun(x_init, p_init)
        A_opt = A_fun(x_init, p_init)
        lba_opt = lba_fun(x_init, p_init)
        uba_opt = uba_fun(x_init, p_init)

        #        print 'Lower bounds', lba_opt
        #        print 'Upper bounds', uba_opt
        #        print 'Bound matrix', A_opt

        #Defining QP structure
        qp = {}
        qp['h'] = H_opt.sparsity()
        qp['a'] = A_opt.sparsity()
        optimize = conic('optimize', 'qpoases', qp)
        optimal = optimize(h=H_opt,
                           g=g_opt,
                           a=A_opt,
                           lba=lba_opt,
                           uba=uba_opt,
                           x0=x_init)
        x_qpopt = optimal['x']
        if x_qpopt.shape == x_init.shape:
            qp_exit = 'optimal'
        else:
            qp_exit = ''
        lag_qpopt = optimal['lam_a']

        #Determing Lagrangian multipliers (lambda and mu)
        lam_qpopt = zeros(
            (nk_pt, 1))  #Lagrange multiplier of active constraints
        mu_qpopt = zeros(
            (nk_zt, 1))  #Lagrange multiplier of inactive constraints
        if nk_pt > 0:
            for j in range(0, len(k_plus_tilde)):
                lam_qpopt[j] = lag_qpopt[int(k_plus_tilde[j])]
        if nk_zt > 0:
            for k in range(0, len(k_zero_tilde)):
                print lag_qpopt[int(k_zero_tilde[k])]
        return qp_exit, optimal, x_qpopt, lam_qpopt, mu_qpopt
    print(rhs)

    # nonlinear mapping function f(x,u)
    f = Function('f', [states, controls], [rhs])
    # Decision variables (controls) u_pwm
    U = SX.sym('U', n_controls, N_pred)
    # parameters (which include the !!!initial and the !!!reference state of
    # the robot)
    P = SX.sym('P', n_states + n_states)
    # number of x is always N+1   -> 0..N
    X = SX.sym('X', n_states, (N_pred + 1))

    X[:, 0] = P[0:3]
    obj = 0
    g = []  # constraints vector
    Q = SX.zeros(3, 3)
    Q[0, 0] = 1000
    Q[1, 1] = 1
    Q[2, 2] = 1e-5

    R = SX.zeros(1, 1)
    R[0, 0] = 1e-5

    # compute predicted states and cost function
    for k in range(N_pred):
        st = X[:, k]
        con = U[:, k]
        f_value = f(st, con)
       # print(f_value)
        st_next = st + (T * f_value)
        X[:, k + 1] = st_next
Exemple #23
0
def get_ls_factor(n_uncertain, n_samples, pc_order, lamb=0.0):
    # Uncertain parameter design
    sobol_design = sobol_seq.i4_sobol_generate(n_uncertain, n_samples,
                                               ceil(np.log2(n_samples)))
    sobol_samples = np.transpose(sobol_design)
    for i in range(n_uncertain):
        sobol_samples[i, :] = norm(loc=0., scale=1).ppf(sobol_samples[i, :])

    # Polynomial function definition
    x = SX.sym('x')
    he0fcn = Function('He0fcn', [x], [1.])
    he1fcn = Function('He1fcn', [x], [x])
    he2fcn = Function('He2fcn', [x], [x**2 - 1])
    he3fcn = Function('He3fcn', [x], [x**3 - 3 * x])
    he4fcn = Function('He4fcn', [x], [x**4 - 6 * x**2 + 3])
    he5fcn = Function('He5fcn', [x], [x**5 - 10 * x**3 + 15 * x])
    he6fcn = Function('He6fcn', [x], [x**6 - 15 * x**4 + 45 * x**2 - 15])
    he7fcn = Function('He7fcn', [x], [x**7 - 21 * x**5 + 105 * x**3 - 105 * x])
    he8fcn = Function('He8fcn', [x],
                      [x**8 - 28 * x**6 + 210 * x**4 - 420 * x**2 + 105])
    he9fcn = Function('He9fcn', [x],
                      [x**9 - 36 * x**7 + 378 * x**5 - 1260 * x**3 + 945 * x])
    he10fcn = Function(
        'He10fcn', [x],
        [x**10 - 45 * x**8 + 640 * x**6 - 3150 * x**4 + 4725 * x**2 - 945])
    helist = [
        he0fcn, he1fcn, he2fcn, he3fcn, he4fcn, he5fcn, he6fcn, he7fcn, he8fcn,
        he9fcn, he10fcn
    ]

    # Calculation of factor for least-squares
    xu = SX.sym("xu", n_uncertain)
    exps = (p for p in product(range(pc_order + 1), repeat=n_uncertain)
            if sum(p) <= pc_order)
    next(exps)
    exps = list(exps)

    psi = SX.ones(
        int(
            factorial(n_uncertain + pc_order) /
            (factorial(n_uncertain) * factorial(pc_order))))
    for i in range(len(exps)):
        for j in range(n_uncertain):
            psi[i + 1] *= helist[exps[i][j]](xu[j])
    psi_fcn = Function('PSIfcn', [xu], [psi])

    nparameter = SX.size(psi)[0]
    psi_matrix = SX.zeros(n_samples, nparameter)
    for i in range(n_samples):
        psi_a = psi_fcn(sobol_samples[:, i])
        for j in range(SX.size(psi)[0]):
            psi_matrix[i, j] = psi_a[j]

    psi_t_psi = mtimes(psi_matrix.T, psi_matrix) + lamb * DM.eye(nparameter)
    chol_psi_t_psi = chol(psi_t_psi)
    inv_chol_psi_t_psi = solve(chol_psi_t_psi, SX.eye(nparameter))
    inv_psi_t_psi = mtimes(inv_chol_psi_t_psi, inv_chol_psi_t_psi.T)

    ls_factor = mtimes(inv_psi_t_psi, psi_matrix.T)
    ls_factor = DM(ls_factor)

    # Calculation of expectations for variance function
    n_sample_expectation_vector = 100000
    x_sample = np.random.multivariate_normal(np.zeros(n_uncertain),
                                             np.eye(n_uncertain),
                                             n_sample_expectation_vector)
    psi_squared_sum = DM.zeros(SX.size(psi)[0])
    for i in range(n_sample_expectation_vector):
        psi_squared_sum += psi_fcn(x_sample[i, :])**2
    expectation_vector = psi_squared_sum / n_sample_expectation_vector

    return ls_factor, expectation_vector, psi_fcn