Exemple #1
0
def test_lqr_slew_rate():
    n_batch = 2
    n_state, n_ctrl = 3, 4
    n_sc = n_state + n_ctrl
    T = 5
    alpha = 0.2

    torch.manual_seed(1)
    C = torch.randn(T, n_batch, n_sc, n_sc)
    C = C.transpose(2,3).matmul(C)
    c = torch.randn(T, n_batch, n_sc)
    x_init = torch.randn(n_batch, n_state)
    R = torch.eye(n_state) + alpha*torch.randn(n_state, n_state)
    S = torch.randn(n_state, n_ctrl)
    f = torch.randn(n_state)
    C, c, x_init, R, S, f = map(Variable, (C, c, x_init, R, S, f))

    dynamics = AffineDynamics(R, S, f)

    x, u, objs = mpc.MPC(
        n_state, n_ctrl, T,
        u_lower=None, u_upper=None, u_init=None,
        lqr_iter=10,
        backprop=False,
        verbose=1,
        exit_unconverged=False,
        eps=1e-4,
    )(x_init, QuadCost(C, c), dynamics)

    # The solution should be the same when the slew rate approaches 0.
    x_slew_eps, u_slew_eps, objs_slew_eps = mpc.MPC(
        n_state, n_ctrl, T,
        u_lower=None, u_upper=None, u_init=None,
        lqr_iter=10,
        backprop=False,
        verbose=1,
        exit_unconverged=False,
        eps=1e-4,
        slew_rate_penalty=1e-6,
    )(x_init, QuadCost(C, c), dynamics)

    npt.assert_allclose(x.data.numpy(), x_slew_eps.data.numpy(), atol=1e-3)
    npt.assert_allclose(u.data.numpy(), u_slew_eps.data.numpy(), atol=1e-3)

    x_slew, u_slew, objs_slew= mpc.MPC(
        n_state, n_ctrl, T,
        u_lower=None, u_upper=None, u_init=None,
        lqr_iter=10,
        backprop=False,
        verbose=1,
        exit_unconverged=False,
        eps=1e-4,
        slew_rate_penalty=1.,
    )(x_init, QuadCost(C, c), dynamics)

    assert np.alltrue((objs < objs_slew).numpy())

    d = torch.norm(u[:-1] - u[1:]).item()
    d_slew = torch.norm(u_slew[:-1] - u_slew[1:]).item()
    assert d_slew < d
def test_lqr_linear_bounded_delta():
    npr.seed(1)

    n_batch = 2
    n_state, n_ctrl, T = 3, 4, 5
    n_sc = n_state + n_ctrl
    C = npr.randn(T, n_batch, n_sc, n_sc)
    C = np.matmul(C.transpose(0, 1, 3, 2), C)
    c = npr.randn(T, n_batch, n_sc)
    alpha = 0.2
    R = np.tile(
        np.eye(n_state) + alpha * np.random.randn(n_state, n_state),
        (T, n_batch, 1, 1))
    S = 0.01 * np.tile(np.random.randn(n_state, n_ctrl), (T, n_batch, 1, 1))
    F = np.concatenate((R, S), axis=3)
    f = np.tile(npr.randn(n_state), (T, n_batch, 1))
    x_init = npr.randn(n_batch, n_state)
    u_lower = -npr.random((T, n_batch, n_ctrl))
    u_upper = npr.random((T, n_batch, n_ctrl))

    tau_cp, objs_cp = lqr_cp(
        C[:, 0],
        c[:, 0],
        F[:, 0],
        f[:, 0],
        x_init[0],
        T,
        n_state,
        n_ctrl,
        u_lower[:, 0],
        u_upper[:, 0],
    )
    tau_cp = tau_cp.T
    x_cp = tau_cp[:, :n_state]
    u_cp = tau_cp[:, n_state:]

    C, c, R, S, F, f, x_init, u_lower, u_upper = [
        Variable(torch.Tensor(x).double()) if x is not None else None
        for x in [C, c, R, S, F, f, x_init, u_lower, u_upper]
    ]
    dynamics = AffineDynamics(R[0, 0], S[0, 0], f[0, 0])

    delta_u = 0.1
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state,
        n_ctrl,
        T,
        x_init,
        u_lower,
        u_upper,
        lqr_iter=1,
        verbose=1,
        delta_u=delta_u,
        backprop=False,
        exit_unconverged=False,
    )(C, c, dynamics)

    u_lqr = util.get_data_maybe(u_lqr)
    assert torch.abs(u_lqr).max() <= delta_u
Exemple #3
0
def test_lqr_linear_unbounded():
    npr.seed(1)

    n_batch = 2
    n_state, n_ctrl = 3, 4
    n_sc = n_state + n_ctrl
    T = 5
    C = npr.randn(T, n_batch, n_sc, n_sc)
    C = np.matmul(C.transpose(0, 1, 3, 2), C)
    c = npr.randn(T, n_batch, n_sc)
    alpha = 0.2
    R = np.tile(np.eye(n_state)+alpha*np.random.randn(n_state, n_state),
                (T, n_batch, 1, 1))
    S = np.tile(np.random.randn(n_state, n_ctrl), (T, n_batch, 1, 1))
    F = np.concatenate((R, S), axis=3)
    f = np.tile(npr.randn(n_state), (T, n_batch, 1))
    x_init = npr.randn(n_batch, n_state)
    # u_lower = -100.*npr.random((T, n_batch, n_ctrl))
    # u_upper = 100.*npr.random((T, n_batch, n_ctrl))
    u_lower = -1e4*np.ones((T, n_batch, n_ctrl))
    u_upper = 1e4*np.ones((T, n_batch, n_ctrl))

    tau_cp, objs_cp = lqr_cp(
        C[:,0], c[:,0], F[:,0], f[:,0], x_init[0], T, n_state, n_ctrl,
        None, None
    )
    tau_cp = tau_cp.T
    x_cp = tau_cp[:,:n_state]
    u_cp = tau_cp[:,n_state:]

    C, c, R, S, F, f, x_init, u_lower, u_upper = [
        Variable(torch.Tensor(x).double()) if x is not None else None
        for x in [C, c, R, S, F, f, x_init, u_lower, u_upper]
    ]

    dynamics = AffineDynamics(R[0,0], S[0,0], f[0,0])

    u_lqr = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, u_lower, u_upper, u_lqr,
        lqr_iter=10,
        backprop=False,
        verbose=1,
        exit_unconverged=True,
    )(x_init, QuadCost(C, c), dynamics)
    tau_lqr = torch.cat((x_lqr, u_lqr), 2)
    tau_lqr = util.get_data_maybe(tau_lqr)
    npt.assert_allclose(tau_cp, tau_lqr[:,0].numpy(), rtol=1e-3)

    u_lqr = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, None, None, u_lqr,
        lqr_iter=10,
        backprop=False,
        exit_unconverged=False,
    )(x_init, QuadCost(C, c), dynamics)
    tau_lqr = torch.cat((x_lqr, u_lqr), 2)
    tau_lqr = util.get_data_maybe(tau_lqr)
    npt.assert_allclose(tau_cp, tau_lqr[:,0].numpy(), rtol=1e-3)
Exemple #4
0
def test_lqr_backward_cost_affine_dynamics_module_constrained():
    npr.seed(0)
    torch.manual_seed(0)
    n_batch, n_state, n_ctrl, T = 1, 2, 2, 2
    hidden_sizes = [10]
    n_sc = n_state + n_ctrl

    C = 10.*npr.randn(T, n_batch, n_sc, n_sc).astype(np.float64)
    C = np.matmul(C.transpose(0, 1, 3, 2), C)
    c = 10.*npr.randn(T, n_batch, n_sc).astype(np.float64)

    x_init = npr.randn(n_batch, n_state).astype(np.float64)
    # beta = 0.5
    beta = 2.0
    u_lower = -beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
    u_upper = beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)

    _C, _c, _x_init, _u_lower, _u_upper = [
        Variable(torch.Tensor(x).double(), requires_grad=True)
        if x is not None else None
        for x in [C, c, x_init, u_lower, u_upper]
    ]
    F = Variable(
        torch.randn(1, 1, n_state, n_sc).repeat(T-1, 1, 1, 1).double(),
        requires_grad=True)
    dynamics = AffineDynamics(F[0,0,:,:n_state], F[0,0,:,n_state:])

    u_init = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
        lqr_iter=20,
        verbose=1,
    )(_x_init, QuadCost(_C, _c), LinDx(F))
    u_lqr_flat = u_lqr.view(-1)

    du_dF = []
    for i in range(len(u_lqr_flat)):
        dF = grad(u_lqr_flat[i], [F], create_graph=True)[0].view(-1)
        du_dF.append(dF)
    du_dF = torch.stack(du_dF).data.numpy()

    u_init = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
        lqr_iter=20,
        verbose=1,
    )(_x_init, QuadCost(_C, _c), dynamics)
    u_lqr_flat = u_lqr.view(-1)

    du_dF_ = []
    for i in range(len(u_lqr_flat)):
        dF = grad(u_lqr_flat[i], [F], create_graph=True)[0].view(-1)
        du_dF_.append(dF)
    du_dF_ = torch.stack(du_dF_).data.numpy()

    npt.assert_allclose(du_dF, du_dF_, atol=1e-4)