Example #1
0
# economic mpc controller
ctrls = {}
sys  = tuner.sys
ctrls['economic'] = tuner.create_mpc('economic',N = N)

# normal tracking mpc controller
tuningTn = {'H': [np.diag([10.0, 10.0, 0.1, 0.1])], 'q': S['q']}
ctrls['tracking'] = tuner.create_mpc('tracking',N = N, tuning = tuningTn)

# tuned tracking mpc controller
ctrls['tuned'] = tuner.create_mpc('tuned',N = N)

# check feedback policy equivalence
alpha = np.linspace(0.0, 1.0, 10) # sweep factor
dP2 = 1.0 # initial state perturbation
log = clt.check_equivalence(ctrls, objective(x,u,data), sys['h'], wsol['x',0], ca.vertcat(0.0, dP2), alpha)

# plot feedback controls to check equivalence
ctrl_name = u.keys()
for name in list(ctrls.keys()):
    for i in range(nu):
        plt.figure(i)
        plt.plot([
            a*dP2 for a in alpha],
            [log[j]['u'][name][0][i] - log[j]['u']['economic'][0][i] \
            for j in range(len(alpha))])
        plt.legend(list(ctrls.keys()))
        plt.xlabel('dP2')
        plt.ylabel('u0 - u0_economic: {}'.format(ctrl_name[i]))
        plt.title('Feedback policy deviation')
        plt.grid(True)
Example #2
0
alpha = np.linspace(-1.0, 1.0, alpha_steps + 1)  # deviation sweep grid
dz = 8  # max. deviation
x0 = wsol['x', 0]
tgrid = [1 / user_input['p'] * i for i in range(Nmpc)]
tgridx = tgrid + [tgrid[-1] + 1 / user_input['p']]

# open loop simulation
import copy
log = []
for alph in alpha:
    x_init = copy.deepcopy(x0)
    x_init[2] = x_init[2] + alph * dz
    x_init[0] = np.sqrt(-x_init[2]**2 - x_init[1]**2 + (l_t)**2)
    x_init[5] = -(x_init[0] * x_init[3] + x_init[1] * x_init[4]) / x_init[2]
    log.append(
        clt.check_equivalence(ctrls, user_input['l'], user_input['h'], x0,
                              x_init - x0, [1.0])[-1])
    for name in list(ctrls.keys()):
        ctrls[name].reset()

# optimal stage cost and constraints for comparison
lOpt, hOpt = [], []
for k in range(Nmpc):
    lOpt.append(user_input['l'](wsol['x', k % user_input['p']],
                                wsol['u', k % user_input['p']]).full()[0][0])
    hOpt.append(user_input['h'](wsol['x', k % user_input['p']],
                                wsol['u', k % user_input['p']]).full())

# plotting options
alpha_plot = -1
lw = 2
ctrls_colors = {
Example #3
0
lOpt, hOpt = [], []
for k in range(Nmpc):
    lOpt.append(user_input['l'](sol['wsol']['x', k%user_input['p']], sol['wsol']['u',k%user_input['p']]).full()[0][0])
    hOpt.append(user_input['h'](sol['wsol']['x', k%user_input['p']], sol['wsol']['u',k%user_input['p']]).full())

# open loop simulation
import copy
log = []
log_acados = []
for alph in alpha:
    x_init = copy.deepcopy(x0)
    x_init[2] = x_init[2] + alph*dz
    x_init[0] = np.sqrt(-x_init[2]**2-x_init[1]**2+(l_t)**2)
    x_init[5] = -(x_init[0]*x_init[3] + x_init[1]*x_init[4]) / x_init[2]
    if TUNEMPC_SIM:
        log.append(clt.check_equivalence(ctrls, user_input['l'], user_input['h'], x0, x_init-x0, [1.0])[-1])
    if ACADOS_SIM:
        log_acados.append(clt.check_equivalence(
            ctrls_acados,
            user_input['l'],
            user_input['h'],
            x0,
            x_init-x0,
            [1.0],
            flag = 'acados')[-1])
    for name in list(ctrls.keys()):
        ctrls[name].reset()

# plotting options
alpha_plot = -1
lw = 2
Example #4
0
    ctrls_acados = {'tuned_acados': ctrls['tuned']}

# check equivalence
alpha = np.linspace(-0.1, 1.0, 10)
x0 = wsol['x', 0]
dx_diehl = np.array([1.0, 0.5, 100.0, 100.0]) - wsol['x', 0]

# compute open-loop prediction for list of deviations in different state directions
log = []
log_acados = []
for dist in [0]:
    dx = np.zeros((nx, 1))
    dx[dist] = dx_diehl[dist]
    log.append(
        clt.check_equivalence(ctrls, cost, tuner.sys['h'], x0, dx, alpha))

    if ACADOS_CODEGENERATE:
        log_acados.append(
            clt.check_equivalence(ctrls_acados,
                                  cost,
                                  tuner.sys['h'],
                                  x0,
                                  dx,
                                  alpha,
                                  flag='acados'))

fig_num = 1
alpha_plot = -1  # alpha value of interest
dx_plot = 0  # state direction of interest
ctrl_list = list(ctrls.keys())
Example #5
0
# nmpc horizon length
N = 200

# gradient
[H, q, _, _, _]  = tuner.pocp.get_sensitivities()

# economic mpc controller
ctrls = {}
sys  = tuner.sys
ctrls['economic'] = tuner.create_mpc('economic',N = N)

# normal tracking mpc controller
tuningTn = {'H': [np.diag([10.0, 10.0, 0.1, 0.1])], 'q': q}
ctrls['tracking'] = tuner.create_mpc('tracking',N = N, tuning = tuningTn)

# tuned tracking mpc controller
ctrls['tuned'] = tuner.create_mpc('tuned',N = N)

alpha = [0.1, 0.5, 1.0]
log = clt.check_equivalence(ctrls, objective(x,u,data), sys['h'], wsol['x',0], ca.vertcat(0.0, 10.0), alpha)

# plot feedback controls to check equivalence
for name in list(ctrls.keys()):
    for i in range(nu):
        plt.figure(i)
        plt.plot(alpha, [log[j]['u'][name][0][i] for j in range(len(alpha))])
        plt.legend(list(ctrls.keys()))

plt.show()
Example #6
0
# tuned tracking mpc controller
ctrls['tuned'] = tuner.create_mpc('tuned', N, opts=opts)

# check equivalence
alpha = np.linspace(-0.1, 1.0, 2)
x0 = wsol['x', 0]
dx_diehl = np.array([1.0, 0.5, 100.0, 100.0]) - wsol['x', 0]

# compute open-loop prediction for list of deviations in different state directions
log = []
for dist in [0]:
    dx = np.zeros((nx, 1))
    dx[dist] = dx_diehl[dist]
    log.append(
        clt.check_equivalence(ctrls, cost, tuner.sys['h'], x0, dx, alpha))

fig_num = 1
alpha_plot = -1  # alpha value of interest
dx_plot = 0  # state direction of interest
for name in list(ctrls.keys()):

    # plot controls
    plt.figure(fig_num)
    for i in range(nu):
        plt.subplot(nu, 1, i + 1)
        plt.step(tgrid, [
            log[dx_plot][alpha_plot]['u'][name][j][i]
            for j in range(len(tgrid))
        ],
                 where='post')
Example #7
0
                                sol['wsol']['u',
                                            k % user_input['p']]).full()[0][0])
    hOpt.append(user_input['h'](sol['wsol']['x', k % user_input['p']],
                                sol['wsol']['u', k % user_input['p']]).full())

# open loop simulation
import copy
log = []
log_acados = []
for alph in alpha:
    x_init = copy.deepcopy(x0)
    x_init[2] = x_init[2] + alph * dz
    x_init[0] = np.sqrt(-x_init[2]**2 - x_init[1]**2 + (l_t)**2)
    x_init[5] = -(x_init[0] * x_init[3] + x_init[1] * x_init[4]) / x_init[2]
    log.append(
        clt.check_equivalence(ctrls, user_input['l'], user_input['h'], x0,
                              x_init - x0, [1.0])[-1])
    if ACADOS_CODEGENERATE:
        log_acados.append(
            clt.check_equivalence({'TUNEMPC_ACADOS': ctrls['TUNEMPC']},
                                  user_input['l'],
                                  user_input['h'],
                                  x0,
                                  x_init - x0, [1.0],
                                  flag='acados')[-1])
    for name in list(ctrls.keys()):
        ctrls[name].reset()

# plotting options
alpha_plot = -1
lw = 2
ctrls_colors = {