def test_lqr_discrete(self):
        """Test overloading of lqr operator for discrete time systems"""
        csys = ct.rss(2, 1, 1)
        dsys = ct.drss(2, 1, 1)
        Q = np.eye(2)
        R = np.eye(1)

        # Calling with a system versus explicit A, B should be the sam
        K_csys, S_csys, E_csys = ct.lqr(csys, Q, R)
        K_expl, S_expl, E_expl = ct.lqr(csys.A, csys.B, Q, R)
        np.testing.assert_almost_equal(K_csys, K_expl)
        np.testing.assert_almost_equal(S_csys, S_expl)
        np.testing.assert_almost_equal(E_csys, E_expl)

        # Calling lqr() with a discrete time system should call dlqr()
        K_lqr, S_lqr, E_lqr = ct.lqr(dsys, Q, R)
        K_dlqr, S_dlqr, E_dlqr = ct.dlqr(dsys, Q, R)
        np.testing.assert_almost_equal(K_lqr, K_dlqr)
        np.testing.assert_almost_equal(S_lqr, S_dlqr)
        np.testing.assert_almost_equal(E_lqr, E_dlqr)

        # Calling lqr() with no timebase should call lqr()
        asys = ct.ss(csys.A, csys.B, csys.C, csys.D, dt=None)
        K_asys, S_asys, E_asys = ct.lqr(asys, Q, R)
        K_expl, S_expl, E_expl = ct.lqr(csys.A, csys.B, Q, R)
        np.testing.assert_almost_equal(K_asys, K_expl)
        np.testing.assert_almost_equal(S_asys, S_expl)
        np.testing.assert_almost_equal(E_asys, E_expl)

        # Calling dlqr() with a continuous time system should raise an error
        with pytest.raises(ControlArgument, match="dsys must be discrete"):
            K, S, E = ct.dlqr(csys, Q, R)
Example #2
0
 def gainCallBack(self, req):
     # start_time = time.time()
     self.updateModel(req.l)
     k1, _, _ = lqr(self.sys1, self.Q1, self.R1)
     k2, _, _ = lqr(self.sys2, self.Q2, self.R2)
     res = GainResponse(k1.flatten().tolist() + k2.flatten().tolist())
     print(res)
     # rospy.loginfo("--- %s seconds ---" % (time.time() - start_time))
     return res
Example #3
0
 def control(self, dt, x, goal_x, est_params):
     if hasattr(self, '_N'):
         K, S, E = control.lqr(self._model.A, self._model.B, self._Q, self._R, self._N)
     else:
         K, S, E = control.lqr(self._model.A, self._model.B, self._Q, self._R)
     
     self._K = K        
     output = - self._K @ x
     return output
    def getGainK(self):
        #S = self.getRiccatiSolution()
        #S1 = self.B.T.dot(S)
        #if self.N is not None: S1 += self.N.T
        #K = np.linalg.inv(self.R).dot(S1)

        if self.N is None:
            K, S, E = control.lqr(self.A, self.B, self.Q, self.R)
        else:
            K, S, E = control.lqr(self.A, self.B, self.Q, self.R, self.N)
        return K
    def test_lqr_integral_discrete(self):
        # Generate a discrete time system for testing
        sys = ct.drss(4, 4, 2, strictly_proper=True)
        sys.C = np.eye(4)  # reset output to be full state
        C_int = np.eye(2, 4)  # integrate outputs for first two states
        nintegrators = C_int.shape[0]

        # Generate a controller with integral action
        K, _, _ = ct.lqr(sys,
                         np.eye(sys.nstates + nintegrators),
                         np.eye(sys.ninputs),
                         integral_action=C_int)
        Kp, Ki = K[:, :sys.nstates], K[:, sys.nstates:]

        # Create an I/O system for the controller
        ctrl, clsys = ct.create_statefbk_iosystem(sys,
                                                  K,
                                                  integral_action=C_int)

        # Construct the state space matrices by hand
        A_ctrl = np.eye(nintegrators)
        B_ctrl = np.block(
            [[-C_int, np.zeros((nintegrators, sys.ninputs)), C_int]])
        C_ctrl = -K[:, sys.nstates:]
        D_ctrl = np.block([[Kp, np.eye(nintegrators), -Kp]])

        # Check to make sure everything matches
        assert ct.isdtime(clsys)
        np.testing.assert_array_almost_equal(ctrl.A, A_ctrl)
        np.testing.assert_array_almost_equal(ctrl.B, B_ctrl)
        np.testing.assert_array_almost_equal(ctrl.C, C_ctrl)
        np.testing.assert_array_almost_equal(ctrl.D, D_ctrl)
Example #6
0
def lqr_control():
    #ignore the platform mass and interia
    m = 10               #blue bass mass
    mw = 0.4             #wheel mass
    h = 0.8              #center of mass height(roughly)
    g = 9.8              #gravity coefficient
    r = 0.1              #wheel radius 
    Iw = 0.15            #moment of inertia of wheel about y
    Iz = 0.5 * mw * r**2 #moment of inertia of robotbase about z 
    # reference chapter 4 lqr model 
    t1 = 
    t2 = 
    t3 = 
    t4 =
    t5 =
    A = np.array([[0,1,0,0,0,0],
        [t1,0,0,0,0,0],
        [0,0,0,1,0,0],
        [t2,0,0,0,0,0],
        [0,0,0,0,0,1],
        [0,0,0,0,0,0]])
    B = np.array([[0,0],
        [t3,0],
        [0,0],
        [t4,0],
        [0,0],
        [0,t5]])
    Q = np.eye(6)
    R = np.eye(2)
    K,S,E = lqr(A,B,Q,R)
    print(K)

    return K
Example #7
0
    def lqr_control(self, traj, t):
        """
        Get the control input for a given instant, given 
        a desired trajectory

        Based on a Taylor linearization, so we're assuming the
        actual location is pretty close to the desired one.
        """

        A = np.array([[
            0, 0, -traj.vdes(t) * np.sin(traj.theta_des(t)),
            np.cos(traj.theta_des(t)), 0
        ],
                      [
                          0, 0,
                          traj.vdes(t) * np.cos(traj.theta_des(t)),
                          np.sin(traj.theta_des(t)), 0
                      ], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])

        B = np.array([[0, 0], [0, 0], [0, 0], [1, 0], [0, 1]])

        Q = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0],
                      [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]])

        R = np.array([[1, 0], [0, 1]])

        K, S, E = control.lqr(A, B, Q, R)

        x = self.current_extended_state(traj, t)

        u = np.matmul(-K, x)

        return u
Example #8
0
def lqr_sys(A, B, C, D, in_q, r=1, num_q=6):
    #parameter
    sys = control.StateSpace(A, B, C, D)
    Q = np.zeros((num_q, num_q))
    R = [r]
    for i in range(num_q):
        Q[i, i] = in_q[i]

    K, solu_R, Eig_sys = control.lqr(sys, Q, r)
    kr = -1 / np.matmul(np.matmul(C, npl.inv((A - np.matmul(B, K)))), B)[0]

    pole, eig_v = npl.eig(A - B * K)
    pole_real = [a.real for a in pole]
    minpol = -3 + min(pole_real)
    repol = []

    for i in range(num_q):
        repol.append(minpol + i + 1)
    repol = np.array(repol)
    L = scis.place_poles(A.transpose(), C.transpose(), repol).gain_matrix
    L = L.transpose()

    upCA = np.column_stack(((A - np.matmul(B, K)), (np.matmul(B, K))))
    downCA = np.column_stack((np.zeros((num_q, num_q)), (A - np.matmul(L, C))))
    CA = np.row_stack((upCA, downCA))
    CB = np.row_stack((B * kr, np.zeros((num_q, 1))))
    CC = np.column_stack((C, np.zeros((1, num_q))))
    CD = np.array([0])

    return CA, CB, CC, CD
def test_lqr():
    control_spec = {
        'feedback': 'LQR',
        'params': {
            'feedback': {
                'Q': np.array([[1, 0], [0, 1]]),
                'R': 1
            }
        }
    }

    model = Model(np.array([[0, 1], [1, 0]]), [[0], [1]])

    dt = 0.01
    x = np.array([[1], [1]])
    goal_x = np.array([[0], [0]])
    est_params = []

    test_controller = controller.Controller(control_spec, model)
    output = test_controller.control(dt, x, goal_x, est_params)

    # expected results
    K, S, E = control.lqr(model.A, model.B,
                          control_spec['params']['feedback']['Q'],
                          control_spec['params']['feedback']['R'])
    expec_output = -K @ x

    assert output == expec_output
Example #10
0
    def policy_rollout(self):
        A, B = self.get_system()
        Q = np.eye(self.hidden_dim)
        R = np.array([[0.01]])

        K, _, _ = control.lqr(A, B, Q, R)

        ref = torch.FloatTensor([[1.0, 0., 0.]])
        ref = model.encoder(ref).detach().numpy()

        obs_old = self.transform_state(self.env.reset())
        #obs_old[2] = obs_old[2] / 8.0
        for _ in range(100):
            if np.random.random() > 0.5:
                state = torch.FloatTensor(obs_old.reshape((1, -1)))
                y = model.encoder(state).detach().numpy()
                action = -np.dot(K, (y-ref).T)
                action = np.clip(np.array([action.item()]), self.env.action_space.low, self.env.action_space.high)
            else:
                action = self.env.action_space.sample() * 0.4
            #self.env.render()
            obs, reward, done, info = self.env.step(action)
            #obs[2] = obs[2] / 8.0
            obs = self.transform_state(obs)
            self.replay_buffer.push((obs_old, action / 2.0, obs))
            obs_old = obs
Example #11
0
def LQR(plant, x):

    A = np.matrix(
        [[0, 1, 0, 0],
         [0, 0, plant.params['g'] * plant.params['m'] / plant.params['M'], 0],
         [0, 0, 0, 1],
         [
             0, 0, (plant.params['M'] + plant.params['m']) *
             plant.params['g'] / (plant.params['l'] * plant.params['M']), 0
         ]])

    B = np.matrix([[0], [1 / plant.params['M']], [0],
                   [1 / (plant.params['l'] * plant.params['M'])]])

    # The Q and R matrices are emperically tuned. It is described further in the report
    Q = np.matrix([[1, 0, 0, 0], [0, 0.01, 0, 0], [0, 0, 15, 0], [0, 0, 0,
                                                                  11]])

    R = np.matrix([5])

    # The K matrix is calculated using the lqr function from the controls library
    K, S, E = control.lqr(A, B, Q, R)
    np.matrix(K)

    x = np.matrix([[-np.squeeze(np.asarray(x[0]))],
                   [np.squeeze(np.asarray(x[1]))],
                   [np.squeeze(np.asarray(np.arcsin(x[3])))],
                   [-np.squeeze(np.asarray(x[2]))]])

    desired = np.matrix([[0], [0], [0], [0]])

    F = (K * (x - desired))
    return np.array(F)
Example #12
0
 def clfqp_a(self, X, Xd, Ud):
     #Xn = np.array([1,np.pi/3,1,np.pi/3])
     #An = self.sdcA(Xn)
     #Bn = self.dynamicsg(Xn)
     e = X - Xd
     B = self.dynamicsg(X)
     Bd = self.dynamicsg(Xd)
     f = self.dynamicsf(X) + B @ Ud
     fd = self.dynamicsf(Xd) + Bd @ Ud
     Q = np.identity(self.n_states) * 10
     R = np.identity(self.m_inputs)
     A = self.sdcA(X)
     try:
         K, P, _ = control.lqr(A, B, Q, R)
         invR = np.linalg.inv(R)
         phi0 = 2 * e @ P @ (f - fd) + e @ (P @ B @ invR @ B.T @ P + Q) @ e
         phi1 = (2 * e @ P @ B).T
         if phi0 > 0:
             U = -(phi0 / (phi1.T @ phi1)) * phi1
             U = Ud + U.ravel()
         else:
             U = Ud
     except:
         U = Ud
         print("clfqp infeasible")
     return U
Example #13
0
    def callback(self, data):
        self.K, self.S, self.e = lqr(A, B, self.Q, self.R)
        y = data.orientation.y * 180 / 3.1416
        if y > self.yMax:
            self.yMax = y
        elif y < self.yMin:
            self.yMin = y
        vel = Twist()
        diff_yaw = y - self.y_
        np_x = np.array([[y], [diff_yaw]])

        xvel = self.K.dot(np_x)[0, 0]

        if xvel > self.xvelMax:
            self.xvelMax = xvel
        elif xvel < self.xvelMin:
            self.xvelMin = xvel
        vel.linear.x = xvel
        vel.linear.y = 0
        vel.linear.z = 0
        vel.angular.x = 0
        vel.angular.y = 0
        vel.angular.z = 0
        self.pub.publish(vel)
        #print "Max vel " + str(self.xvelMax) + " & Min vel " + str(self.xvelMin) + " Max y " + str(self.yMax*180/3.1416) +" & Min y" + str(self.yMin*180/3.1416)
        #print "Velocity "+ str(xvel)+ " & yaw " + str(y)
        self.y_ = y
        self.pub1.publish(data.orientation.y)
def compute_whipple_lqr_gain(velocity):
    _, A, B = benchmark_state_space_vs_speed(*benchmark_matrices(), velocity)
    Q = np.diag([1e5, 1e3, 1e3, 1e2])
    R = np.eye(2)

    gains = [control.lqr(Ai, Bi, Q, R)[0] for Ai, Bi in zip(A, B)]
    return gains
Example #15
0
def compute_whipple_lqr_gain(velocity):
    _, A, B = benchmark_state_space_vs_speed(*benchmark_matrices(), velocity)
    Q = np.diag([1e5, 1e3, 1e3, 1e2])
    R = np.eye(2)

    gains = [control.lqr(Ai, Bi, Q, R)[0] for Ai, Bi in zip(A, B)]
    return gains
    def update(self):
        t = self.simulationHandler.currentTime

        x, dx = self.controlInputStateSpaceHandler.getX_dx(t)

        q, v, a = self.stateHandler.getPositionVelocityAcceleration()

        A = self.linearizedModelHandler.getA()
        B = self.linearizedModelHandler.getB()

        F = self.constraintsHandler.getJacobian(DM(q))
        dF = self.constraintsHandler.getJacobianDerivative(DM(q), DM(v))

        G = horzcat(F, dF)

        N = scipy.linalg.null_space(G)

        An = N.T @ A @ N
        Bn = N.T @ B.T
        Qn = N.T @ self.Q @ N

        Kn, S, CLP = lqr(An, Bn, Qn, self.R)

        K = Kn @ N.T

        e = (self.stateSpaceHandler.x - x)

        u_FB = -K @ e

        u_FF = self.inverseDynamicsHandler.u

        self.u = u_FB + u_FF
Example #17
0
def lqr_gain(rotor):
    A = np.array([
        [0, 1, 0, 0, 0, 0],
        [0, 0, 0, 0, -rotor.g, 0],
        [0, 0, 0, 1, 0, 0],
        [0, 0, 0, 0, 0, 0],
        [0, 0, 0, 0, 0, 1],
        [0, 0, 0, 0, 0, 0],
    ],
                 dtype=float)

    B = np.array([
        [0, 0],
        [0, 0],
        [0, 0],
        [1, 1],
        [0, 0],
        [-rotor.r / rotor.I, rotor.r / rotor.I],
    ],
                 dtype=float)

    Q = np.eye(A.shape[1])
    R = np.eye(B.shape[1])

    K, _, _ = control.lqr(A, B, Q, R)

    return K
Example #18
0
    def diffeomorphism_full_state_control(self):
        d = 1
        xi_bar = np.array([[0, 0, 1]]).T

        zeta_bar = xi_bar + np.array([[0, 0, d]]).T

        m = self.M
        g = self.G
        R = self.rot_mat
        x = self.state
        xi = x[0:3]
        eta = x[3:6]
        xi_dot = x[6:9]
        eta_dot = x[9:12]
        phi = eta[0]
        theta = eta[1]
        psi = eta[2]
        phi_dot = eta_dot[0]
        theta_dot = eta_dot[1]
        psi_dot = eta_dot[2]

        zeta = (self * rigid.Vector(*(0, 0, -d))).array
        chi = np.vstack([zeta - zeta_bar, psi, xi_dot, psi_dot])
        W = mechanics.att_vel_to_ang_vel_jacobian(phi, theta)
        W_inv = np.linalg.inv(W)
        W_dot = mechanics.time_derivative_att_vel_to_ang_vel_jacobian(
            phi, theta, phi_dot, theta_dot)

        A = np.hstack(
            [np.zeros((8, 4)),
             np.vstack([np.identity(4), np.zeros((4, 4))])])
        b = np.array([[-1 / m * R[0, 2], d * R[0, 1], -d * R[0, 0]],
                      [-1 / m * R[1, 2], d * R[1, 1], -d * R[1, 0]],
                      [-1 / m * R[2, 2], d * R[2, 1], -d * R[2, 0]]])
        B = np.vstack([
            np.zeros((4, 4)),
            np.hstack([b, np.zeros((3, 1))]),
            np.array([[0, 0, 0, 1]])
        ])
        G = np.array([[0, 0, 0, 0, 0, 0, -g, 0]]).T

        QQ = np.diag([100, 100, 10, 1, 1, 1, 1, 1])
        RR = np.diag(np.array([1, 1, 1, 1]))
        K, _, _ = lqr(A, B, QQ, RR)
        # mu_bar = np.array([[-g*m*R[2, 2], g/d*R[2, 1], -g/d*R[2, 0], 0]]).T
        mu_bar = np.array([[g * m, 0, 0, 0]]).T
        mu_delta = -np.dot(K, chi)
        mu = mu_bar + mu_delta

        F = mu[0, 0]
        alpha = mu[1:4]

        J = generialized_coordinate_inertia(self.I, phi, theta, psi)
        C = quad_coriolis(self.I, phi, theta, phi_dot, theta_dot, psi_dot)
        tau_eta = np.dot(J, np.dot(W_inv,
                                   (alpha - np.dot(W_dot, eta_dot)))) + np.dot(
                                       C, eta_dot)

        return np.vstack([F, tau_eta])
Example #19
0
def LQRControl(plant, Q, R):

    plantStateSpace = control.tf2ss(plant)
    A, B, C, D = control.ssdata(plantStateSpace)
    print(A, B, C, D)
    K, P, E = control.lqr(plantStateSpace, Q, R)
    output = control.ss(A - B * K, B * K, C, D)
    return control.step_response(output, T)
Example #20
0
 def lqr(self,x,A,B,Q,R):
     K, S, E = control.lqr(A, B, Q, R)
     u = -scipy.linalg.inv(R)*(B.T*(S*x))
     #if B.size==2:
         #print("S",S)
         #print("x",x)
         #print("B",self.B)
     return u
Example #21
0
def calculate_LQR_gains(A, B, C, D, Q, R):
    # calculate LQR gains
    (K, X, E) = ctl.lqr(A, B, Q, R)

    # calculate Nu, Nx matrices for including reference input
    Nu, Nx = getInputMatrices(A, B, C, D)

    return K, Nu, Nx
Example #22
0
def get_linear_quadratic_regulator(linear_model, q=numpy.identity(3),
                                   r=numpy.identity(1)):
    ctrb_matrix = control.ctrb(linear_model.A, linear_model.B)
    if ctrb_matrix.shape[0] != linear_model.A.shape[0]:
        raise ValueError("System is not controllable!")
    k_matrix, s_matrix, e_matrix = control.lqr(linear_model.A, linear_model.B,
                                               q, r)
    return k_matrix, s_matrix, e_matrix
Example #23
0
    def diffeomorphism_full_state_control(self):
        d = 1
        xi_bar = np.array([[0, 0, 1]]).T

        zeta_bar = xi_bar + np.array([[0, 0, d]]).T

        m = self.M
        g = self.G
        R = self.rot_mat
        x = self.state
        xi = x[0:3]
        eta = x[3:6]
        xi_dot = x[6:9]
        eta_dot = x[9:12]
        phi = eta[0]
        theta = eta[1]
        psi = eta[2]
        phi_dot = eta_dot[0]
        theta_dot = eta_dot[1]
        psi_dot = eta_dot[2]

        zeta = (self * rigid.Vector(*(0, 0, -d))).array
        chi = np.vstack([zeta-zeta_bar, psi, xi_dot, psi_dot])
        W = mechanics.att_vel_to_ang_vel_jacobian(phi, theta)
        W_inv = np.linalg.inv(W)
        W_dot = mechanics.time_derivative_att_vel_to_ang_vel_jacobian(phi,
                                                                      theta,
                                                                      phi_dot,
                                                                      theta_dot)



        A = np.hstack([np.zeros((8, 4)),
                      np.vstack([np.identity(4), np.zeros((4, 4))])])
        b = np.array([[-1/m*R[0, 2], d*R[0, 1], -d*R[0, 0]],
                     [-1/m*R[1, 2], d*R[1, 1], -d*R[1, 0]],
                     [-1/m*R[2, 2], d*R[2, 1], -d*R[2, 0]]])
        B = np.vstack([np.zeros((4, 4)),
                      np.hstack([b, np.zeros((3, 1))]),
                      np.array([[0, 0, 0, 1]])])
        G = np.array([[0, 0, 0, 0, 0, 0, -g, 0]]).T

        QQ = np.diag([100, 100, 10, 1, 1, 1, 1, 1])
        RR = np.diag(np.array([1, 1, 1, 1]))
        K,_,_ = lqr(A, B, QQ, RR)
        # mu_bar = np.array([[-g*m*R[2, 2], g/d*R[2, 1], -g/d*R[2, 0], 0]]).T
        mu_bar = np.array([[g*m, 0, 0, 0]]).T
        mu_delta = -np.dot(K, chi)
        mu = mu_bar + mu_delta

        F = mu[0, 0]
        alpha = mu[1:4]

        J = generialized_coordinate_inertia(self.I, phi, theta, psi)
        C = quad_coriolis(self.I, phi, theta, phi_dot, theta_dot, psi_dot)
        tau_eta = np.dot(J, np.dot(W_inv, (alpha - np.dot(W_dot, eta_dot)))) + np.dot(C, eta_dot)

        return np.vstack([F, tau_eta])
Example #24
0
def _test_lqr_random(rng, n):
    # pylint: disable=import-outside-toplevel
    import control
    from jax.tree_util import tree_multimap

    A, B, Q, R, N = random_env(rng, n)
    actual = lqr_continuous_time_infinite_horizon(A, B, Q, R, N)
    expected = control.lqr(A, B, Q, R, N)
    assert tree_multimap(jp.allclose, actual, expected)
Example #25
0
 def one_step_sim(self,y):
     runge_num = self.runge_num
     A = self.Aekf(self.Z,self.t)
     R = 1*np.eye(1)
     Q = 100*np.eye(3)
     K,P,E = control.lqr(A.T,C.T,Q,R)
     L = K.T
     for num in range(0, runge_num)
         self.Z, self.t = self.rk4(L,y)
     return self.Z, self.t
def test_discrete_lqr():
    # oscillator model defined in 2D
    # Source: https://www.mpt3.org/UI/RegulationProblem
    A = [[0.5403, -0.8415], [0.8415, 0.5403]]
    B = [[-0.4597], [0.8415]]
    C = [[1, 0]]
    D = [[0]]

    # Linear discrete-time model with sample time 1
    sys = ct.ss2io(ct.ss(A, B, C, D, 1))

    # Include weights on states/inputs
    Q = np.eye(2)
    R = 1
    K, S, E = ct.lqr(A, B, Q, R)  # note: *continuous* time LQR

    # Compute the integral and terminal cost
    integral_cost = opt.quadratic_cost(sys, Q, R)
    terminal_cost = opt.quadratic_cost(sys, S, None)

    # Formulate finite horizon MPC problem
    time = np.arange(0, 5, 1)
    x0 = np.array([1, 1])
    optctrl = opt.OptimalControlProblem(sys,
                                        time,
                                        integral_cost,
                                        terminal_cost=terminal_cost)
    res1 = optctrl.compute_trajectory(x0, return_states=True)

    with pytest.xfail("discrete LQR not implemented"):
        # Result should match LQR
        K, S, E = ct.dlqr(A, B, Q, R)
        lqr_sys = ct.ss2io(ct.ss(A - B @ K, B, C, D, 1))
        _, _, lqr_x = ct.input_output_response(lqr_sys,
                                               time,
                                               0,
                                               x0,
                                               return_x=True)
        np.testing.assert_almost_equal(res1.states, lqr_x)

    # Add state and input constraints
    trajectory_constraints = [
        (sp.optimize.LinearConstraint, np.eye(3), [-10, -10, -1], [10, 10, 1]),
    ]

    # Re-solve
    res2 = opt.solve_ocp(sys,
                         time,
                         x0,
                         integral_cost,
                         constraints,
                         terminal_cost=terminal_cost)

    # Make sure we got a different solution
    assert np.any(np.abs(res1.inputs - res2.inputs) > 0.1)
Example #27
0
    def lqr_attitude_control(self):
        Q_rot = np.diag([80, 80, 2, 1, 1, 1])
        R_rot = np.diag(np.array([1000, 1000, 1000]))
        K_rot,_,_ = lqr(self.A_rot, self.B_rot, Q_rot, R_rot)
        X_rot = np.vstack([self.state[3:6], self.state[9:12]])

        torque_attitude = -np.dot(K_rot, X_rot)

        force = self.G*self.M

        return np.vstack([force, torque_attitude])
    def __init__(self, plant, state0, t0, Q=None, R=None):
        A = plant.df(state0, t0)
        B = plant.control_matrix(state0, t0)
        Q = Q or np.identity(A.shape[1]) * 1.0
        R = R or np.identity(B.shape[1]) * 0.00000001

        K, S, E = control.lqr(A, B, Q, R)
        self.state0 = state0.copy()
        self.K = K
        self.S = S
        self.E = E
    def __init__(self, plant, state0, t0, Q=None, R=None):
        A = plant.df(state0, t0)
        B = plant.control_matrix(state0, t0)
        Q = Q or np.identity(A.shape[1]) * 1.0
        R = R or np.identity(B.shape[1]) * 0.00000001

        K, S, E = control.lqr(A, B, Q, R)
        self.state0 = state0.copy()
        self.K = K
        self.S = S
        self.E = E
Example #30
0
 def __init__(self, intopic, outtopic,
              A, B, target=None, Q=None, R=None, realizable=True):
     if Q is None:
         Q = np.eye(A.shape[0])
     if R is None:
         R = np.eye(B.shape[1])
     self.target_state = np.zeros(A.shape[0])
     if target is not None:
         self.target_state[:target.shape[0]] = target
     K, S, E = lqr(A,B,Q,R)
     StateFeedback.__init__(self, intopic, outtopic, K, realizable=realizable)
def LQR():
    A = np.array([[0, 1], [g / lp, 0]])

    B = np.array([[0], [1]])

    Q = np.array([[3000., 0], [0, 1.]])
    R = .1

    K, S, E = lqr(A, B, Q, R)

    return np.array(K)
Example #32
0
    def lqr_attitude_control(self):
        Q_rot = np.diag([80, 80, 2, 1, 1, 1])
        R_rot = np.diag(np.array([1000, 1000, 1000]))
        K_rot, _, _ = lqr(self.A_rot, self.B_rot, Q_rot, R_rot)
        X_rot = np.vstack([self.state[3:6], self.state[9:12]])

        torque_attitude = -np.dot(K_rot, X_rot)

        force = self.G * self.M

        return np.vstack([force, torque_attitude])
Example #33
0
    def calc_lqr_gain(self):

        # Choose state and input weights for cost function
        self.Q = np.eye(4)
        self.R = 0.0001

        # Derive the optimal controller
        K, S, E = control.lqr(self.A, self.B, self.Q, self.R)

        # Set gain matrix
        self.gain[:] = K
Example #34
0
    def solve_lqr(self, A, B, Q, R):
        '''
        Returns:

            K: 2-d array : State feedback gains

            S: 2-d array : Solution to Riccati equation

            E: 1-d array : Eigenvalues of the closed loop system

        '''
        return control.lqr(A, B, Q, R)
Example #35
0
def generateLQRTable(A_table, B_table, Q_table, R_table):
    count = A_table.shape[0]
    n = A_table.shape[2]
    m = B_table.shape[2]

    K_table = np.zeros((count, m, n))

    for i in range(count):
        K, S, CLP = lqr(A_table[i], B_table[i], Q_table[i], R_table[i])
        K_table[i] = K

    return K_table
Example #36
0
 def construct(self):
   d_stat = 9
   d_ctrl = self.n_contacts*6
   self.A = np.zeros((d_stat,d_stat))
   self.A[:3,3:6] = 1./self.mass*np.eye(3)
   self.A[6:,:3] = self.cross_mat(self.mass*np.array([0., 0., -9.81]))
   self.B = np.zeros((d_stat,d_ctrl))
   for c in range(self.n_contacts):
     self.B[3:,6*c:6*c+6] = np.eye(6)
     self.B[6:,6*c:6*c+3] = self.cross_mat(self.cog_to_cont[c])
   self.K,self.p,self.e = lqr(self.A,self.B,self.Q,self.R)
   # dh = Gain*err + h_des
   self.Gain = np.array(np.matrix(self.B[3:,:])*np.matrix(self.K))
Example #37
0
    def control_eq(self):
        self.equilibrium_point = hstack(( 0, pi / 2 * ones(len(self.q) - 1), zeros(len(self.u)) ))
        equilibrium_dict = dict(zip(self.q + self.u, self.equilibrium_point))
        parameter_dict = dict(zip(self.parameters, self.parameter_vals))

        # symbolically linearize about arbitrary equilibrium
        self.linear_state_matrix, self.linear_input_matrix, inputs = self.kane.linearize()
        # sub in the equilibrium point and the parameters
        self.f_A_lin = self.linear_state_matrix.subs(parameter_dict).subs(equilibrium_dict)
        self.f_B_lin = self.linear_input_matrix.subs(parameter_dict).subs(equilibrium_dict)
        m_mat = self.kane.mass_matrix_full.subs(parameter_dict).subs(equilibrium_dict)
        # compute A and B
        from numpy import matrix
        A = matrix(m_mat.inv() * self.f_A_lin)
        B = matrix(m_mat.inv() * self.f_B_lin)

        assert matrix_rank(control.ctrb(A, B)) == A.shape[0]

        self.K, self.X, self.E = control.lqr(A, B, ones(A.shape), 1);
	def calcK(self, Plant):
		#Calculates the K matrix using python control systems and lqr pole placement
		#Define A matrix
		A = [ [0, 1], [-Plant.g / Plant.l, -Plant.b / (Plant.m * (Plant.l ** 2))] ]
		#Define B matrix
		B = [ [0], [1 / (Plant.m * (Plant.l ** 2))] ]
		#define C matrix. It's just the identity
		C = np.identity(2)
		#The D matrix is just 0
		D = [ [0], [0] ]

		#Create the statespace representation
		sys = control.ss(A, B, C, D)

		#Create the state weight matrix, weight position 20000, and speed 100
		Q = [ [20000, 0], [0, 100] ]
		#Create input weight matrix
		R = [ [1] ]
		#Compute LQR
		LQR_vals = control.lqr(sys, Q, R)
		return LQR_vals[0] #return K values of LQR return
Example #39
0
    def lqr_position_attitude_control(self):
        m = self.M
        g = self.G
        q = self.state

        xi = q[0:3]
        xi_dot = q[6:9]
        phi = q[3,0]
        theta = q[4,0]
        psi = mmath.wrap_to_2pi(q[5,0])
        eta = np.array([[phi, theta, psi]]).T
        chi = np.vstack([xi, xi_dot])

        xi_bar = self.state_desired[0:3]
        psi_bar = mmath.wrap_to_2pi(self.state_desired[5,0])
        xi_dot_bar = self.state_desired[6:9]
        chi_bar = np.vstack([xi_bar, xi_dot_bar])

        chi_delta = (chi - chi_bar)

        A_pos = np.vstack([np.hstack([np.zeros((3,3)), np.identity(3)]),
                           np.zeros((3, 6))])

        B_pos = np.vstack([np.zeros((3, 3)), np.identity(3)])
        Q_pos = np.diag([2, 2, 2, 20, 20, 20])
        R_pos = np.diag(np.array([5, 5, 5]))
        K_pos,_,_ = lqr(A_pos, B_pos, Q_pos, R_pos)

        acc_vec = -np.dot(K_pos, chi_delta) + np.array([[0, 0, g]]).T

        Z = -acc_vec / np.linalg.norm(acc_vec)
        a = Z[0, 0]*sin(psi_bar) + Z[1, 0]*cos(psi_bar)
        b = -Z[2, 0]
        c = -a

        if np.allclose(a, 0):
            X3 = 0
        elif abs((-b+sqrt(b**2 - 4*a*c))/(2*a)) <= 1:
            X3 = (-b+sqrt(b**2 - 4*a*c))/(2*a)
        else:
            X3 = (-b-sqrt(b**2 - 4*a*c))/(2*a)

        d = sqrt(1 - X3**2)

        if psi_bar >= 0 and psi_bar < PI:
            X1 = sqrt(d**2 * (sin(psi_bar))**2)
        else:
            X1 = -sqrt(d**2 * (sin(psi_bar))**2)
        if (psi_bar >= 0 and psi_bar < PI/2) or psi_bar >= 3*PI/2:
            X2 = sqrt(d**2 * (cos(psi_bar))**2)
        else:
            X2 = -sqrt(d**2 * (cos(psi_bar))**2)

        X = np.array([[X1, X2, X3]]).T
        X = X / np.linalg.norm(X)
        Y = np.atleast_2d(np.cross(np.squeeze(Z), np.squeeze(X))).T
        Y = Y / np.linalg.norm(Y)

        R = np.hstack([X, Y, Z])

        (phi_bar, theta_bar, _) = mmath.rot2euler(np.dot(self.nominal_frame.rot_mat, R))
        eta_bar = np.array([[phi_bar, theta_bar, psi_bar]]).T
        eta_delta = eta - eta_bar
        eta_delta = np.array([[mmath.wrap_to_pi(ang) for ang in eta_delta]]).T


        Q_rot = np.diag([1, 1, 1, 1, 1, 1])
        R_rot = np.diag(np.array([5, 5, 5]))
        K_rot,_,_ = lqr(self.A_rot, self.B_rot, Q_rot, R_rot)
        X_rot = np.vstack([eta_delta, self.state[9:12]])
        torque_attitude = -np.dot(K_rot, X_rot)

        force = np.linalg.norm(acc_vec)*m

        u = np.vstack([force, torque_attitude])

        return u
Example #40
0
           [0, -(I+m*l**2)*b/p,  (m**2*g*l**2)/p,   0],
           [0,      0,              0,           1],
           [0, -(m*l*b)/p,       m*g*l*(M+m)/p,  0]])
B = array([[0],
           [(I+m*l**2)/p],
           [0],
           [m*l/p]])
C = array([[1, 0, 0, 0],
           [0, 0, 1, 0]])
D = array([[0],
           [0]])

poles,vect = eig(A)
# Print vectors to verify that the system is unstable
print poles

K = control.place(A,B, [-1,-1,-1,-1])
print 'K(place)=',K

Q = C.transpose().dot(C)
Q[1,1]=5000
Q[3,3] = 100
R = 1
K,S,E = control.lqr(A,B,Q,R)
print 'K(lqr)=',K

# New control matrix
Ac = A-B.dot(K)
poles,vect = eig(Ac)
print "Ac poles=", poles