예제 #1
0
def SSP(T, X0, Q, R, Qf, U_guess):
    #State space planning

    control = algo.SSP(T, X0, Xg, R, Q, Qf, params.Xmin, params.Xmax)

    #U_guess = np.zeros((robot.nu,T))

    U_guess = np.reshape(U_guess.T, (robot.nu * T, 1))

    print("Solving state space problem...")
    U_opti_ss = control.solve_SSP(robot, X0, np.zeros(robot.nu), U_guess)
    print("Done.")
    #print("Uss:",U_opti_ss)
    X_path_ss = np.zeros((robot.nx, T + 1))
    X_path_ss[:, 0] = X0

    for i in range(T):

        X_path_ss[:, i + 1] = np.reshape(
            robot.kinematics(X_path_ss[:, i], U_opti_ss[:, i]),
            (robot.nx,
             ))  #to make it compatible in dimension reshaping from (3,1) to 3

    #print "U nom:", U_opti_ss
    return U_opti_ss, X_path_ss
예제 #2
0
def TPFC_run_varying_epsilon():

    global OL_time
    #PFC gain
    Wx = c.DM([[10, 0, 0, 0, 0, 0], [0, 10, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
               [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0,
                                                        1]])  #10
    Wxf = c.DM([[100, 0, 0, 0, 0, 0], [0, 100, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
                [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0,
                                                         1]])  #100
    Wu = c.DM([[1, 0], [0, 1]])  #10

    OL_start = time.time()
    U_opti, X_nom = SSP(Q, R, Qf)

    control = algo.PFC(Q, R, Qf, T, Xg)

    print("Solving for PFC gain...")
    K_pfc = control.solve_K(robot, X_nom, U_opti)

    OL_end = time.time()

    OL_time = OL_end - OL_start

    epsi_range = np.linspace(.45, 1.60, 24)
    print "Epsilon range:", epsi_range

    for epsilon in epsi_range:
        TPFC(control, U_opti, X_nom, K_pfc, epsilon)
def TLQR_run_varying_epsilon():

    global OL_time
    #LQR gain
    Wx = c.DM([[10, 0, 0, 0, 0, 0], [0, 10, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
               [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0,
                                                        1]])  #10
    Wxf = c.DM([[50, 0, 0, 0, 0, 0], [0, 50, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
                [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0,
                                                         1]])  #100
    Wu = c.DM([[1, 0], [0, 1]])  #10

    OL_start = time.time()
    U_opti, X_nom = SSP(Q, R, Qf)

    control = algo.LQR(Wx, Wu, Wxf, T)

    print("Solving for LQR gain...")
    K_lqr = control.solve_K(robot, X_nom, U_opti)

    OL_end = time.time()

    OL_time = OL_end - OL_start

    epsi_range = np.linspace(0., 0.40, 41)
    epsi_range = np.append(epsi_range, np.linspace(0.45, 1.00, 12), axis=0)
    print "Epsilon range:", epsi_range

    for epsilon in epsi_range:
        TLQR(control, U_opti, X_nom, K_lqr, epsilon)
def TLQR_run_varying_epsilon():

    global OL_time
    #LQR gain
    Wx = c.DM.eye(robot.nx) #10
    Wx[0,0] = 10; Wx[1,1] = 10; Wx[2,2] = 10;
    Wxf = 100*c.DM.eye(robot.nx)  #100
    Wu = c.DM.eye(robot.nu)

    OL_start = time.time()
    U_opti, X_nom = SSP(Q,R,Qf)

    control = algo.LQR(Wx,Wu,Wxf,T)

    print "U_nom:", U_opti
    #print "X_nom:", X_nom

    print("Solving for LQR gain...")
    K_lqr = control.solve_K(robot, X_nom, U_opti)

    OL_end = time.time()

    OL_time = OL_end - OL_start

    epsi_range = np.linspace(0.0,.2,21)
    #epsi_range = np.append(epsi_range, np.linspace(0.45,1.00,12), axis=0)
    print "Epsilon range:", epsi_range

    for epsilon in epsi_range:
        TLQR(control, U_opti, X_nom, K_lqr, epsilon)
def SSP(T, X0, Q, R, Qf, U_guess):
    #State space planning

    control = algo.SSP(T, X0, Xg, R, Q, Qf, params.Xmin, params.Xmax)

    #U_guess = np.zeros((robot.nu,T))

    U_guess = np.reshape(U_guess.T, (robot.nu * T, 1))

    print("Solving state space problem...")
    U_opti_ss = control.solve_SSP(robot, X0, np.zeros(robot.nu), U_guess)
    print("Done.")
    #print("Uss:",U_opti_ss)
    X_path_ss = np.zeros((robot.nx, T + 1))
    X_path_ss[:, 0] = X0
    Cost_OL_vec = np.zeros(T + 1)
    Cost_OL = 0

    for i in range(T):

        X_path_ss[:, i + 1] = np.reshape(
            robot.kinematics(X_path_ss[:, i], U_opti_ss[:, i]),
            (robot.nx,
             ))  #to make it compatible in dimension reshaping from (3,1) to 3

        Cost_OL = Cost_OL + c.mtimes(c.mtimes(c.reshape(X_path_ss[:,i],(1,robot.nx)),Q),c.reshape(X_path_ss[:,i],(robot.nx,1))) + \
                            c.mtimes(c.mtimes(c.reshape(U_opti_ss[:,i],(1,robot.nu)),R),c.reshape(U_opti_ss[:,i],(robot.nu,1)))

        if params.OBSTACLES:
            Obstacle_cost = control.obstacle_cost_func(X_path_ss[0:2, i])
            Cost_OL = Cost_OL + Obstacle_cost

        Cost_OL_vec[i] = Cost_OL

    Cost_OL = Cost_OL + c.mtimes(
        c.mtimes(c.reshape(Xg - X_path_ss[:, T], (1, robot.nx)), Qf),
        c.reshape(Xg - X_path_ss[:, T], (robot.nx, 1)))
    Cost_OL_vec[T] = Cost_OL

    print "U nom:", U_opti_ss

    return U_opti_ss, X_path_ss, Cost_OL_vec
def TLQR2_run_varying_epsilon():

    global OL_time
    T = 35
    X0 = np.array([0, 0, m.pi / 3, 0, 0, 0])

    #LQR gain
    Wx = c.DM([[10, 0, 0, 0, 0, 0], [0, 10, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
               [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0,
                                                        1]])  #10
    Wxf = c.DM([[50, 0, 0, 0, 0, 0], [0, 50, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
                [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0,
                                                         1]])  #100
    Wu = c.DM([[1, 0], [0, 1]])  #10

    U_guess = np.zeros((robot.nu, T))

    OL_start = time.time()
    U_opti, X_nom, Cost_nom = SSP(T, X0, Q, R, Qf, U_guess)

    control = algo.LQR(Wx, Wu, Wxf, T)

    print("Solving for LQR gain...")
    K_lqr = control.solve_K(robot, X_nom, U_opti)

    OL_end = time.time()

    OL_time = OL_end - OL_start

    replan_bound_vec = np.array([.02])
    print "replan bound range:", replan_bound_vec

    epsi_range = np.linspace(0.21, .40, 20)
    print "Epsilon range:", epsi_range

    blockPrint()
    for replan_bound in replan_bound_vec:
        for epsilon in epsi_range:
            TLQR2(control, T, X0, U_opti, X_nom, K_lqr, Cost_nom, replan_bound,
                  epsilon)
def TLQR2_run_varying_epsilon():

    global OL_time
    T = 30
    X0 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])

    #LQR gain
    Wx = c.DM.eye(robot.nx)  #10
    Wx[0, 0] = 10
    Wx[1, 1] = 10
    Wx[2, 2] = 10
    Wxf = 100 * c.DM.eye(robot.nx)  #100
    Wu = c.DM.eye(robot.nu)

    U_guess = np.zeros((robot.nu, T))

    OL_start = time.time()
    U_opti, X_nom, Cost_nom = SSP(T, X0, Q, R, Qf, U_guess)

    control = algo.LQR(Wx, Wu, Wxf, T)

    print("Solving for LQR gain...")
    K_lqr = control.solve_K(robot, X_nom, U_opti)

    OL_end = time.time()

    OL_time = OL_end - OL_start

    replan_bound_vec = np.array([.05])
    print "replan bound range:", replan_bound_vec

    epsi_range = np.linspace(0.09, .12, 4)
    print "Epsilon range:", epsi_range

    blockPrint()
    for replan_bound in replan_bound_vec:
        for epsilon in epsi_range:
            TLQR2(control, T, X0, U_opti, X_nom, K_lqr, Cost_nom, replan_bound,
                  epsilon)
def TLQR2_hprc(epsilon, replan_bound):

    T = 40
    X0 = np.array([0, 0, m.pi / 3, 0, 0, 0])

    #LQR gain
    Wx = c.DM([[10, 0, 0, 0, 0, 0], [0, 10, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
               [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0,
                                                        1]])  #10
    Wxf = c.DM([[50, 0, 0, 0, 0, 0], [0, 50, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
                [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0,
                                                         1]])  #100
    Wu = c.DM([[1, 0], [0, 1]])  #10

    U_guess = np.zeros((robot.nu, T))

    OL_start = time.time()
    U_opti, X_nom, Cost_nom = SSP(T, X0, Q, R, Qf, U_guess)

    control = algo.LQR(Wx, Wu, Wxf, T)

    print("Solving for LQR gain...")
    K_lqr = control.solve_K(robot, X_nom, U_opti)

    OL_end = time.time()

    OL_time = OL_end - OL_start

    print("Executing Plan...")
    #Execution of plan

    start_exec = time.time()
    COMPLETE = False
    while not COMPLETE:

        try:

            X_act_CL = np.zeros((robot.nx, T + 1))

            X_act_CL[:, 0] = X0  #initial condition

            U_CL = np.zeros((robot.nu, T))
            Cost_CL = 0
            replan_count = 0
            replan = False
            replan_index = []

            #closed loop implementation
            for i in range(T):
                #Applying LQR control

                #print "X nom:", X_nom[:,i], " X act:", X_act_CL[:,i]
                if replan:
                    replan_count += 1

                    replan_index.append(i)

                    U_guess = U_opti[:, i:]
                    print "Replanning at step:", i
                    U_opti[:, i:T], X_nom[:, i:T + 1], _ = SSP(
                        T - i, X_act_CL[:, i], Q, R, Qf,
                        U_guess)  #calculating new nominal

                    #print "Cost_nom:", calc_cost(U_opti,X_nom,T)

                    control.T = T - i  #setting new horizon.
                    K_lqr[:, i:T + 1] = control.solve_K(
                        robot, X_nom[:, i:T + 1],
                        U_opti[:, i:T])  #calculating new LQR gains

                    replan = False

                U_temp = U_opti[:, i] - c.mtimes(
                    c.reshape(K_lqr[:, i], robot.nu, robot.nx),
                    (X_act_CL[:, i] - X_nom[:, i]))

                U_CL[:, i] = np.reshape(control.U_bounds(robot, U_temp),
                                        (robot.nu, ))

                X_act_CL[:, i + 1] = np.reshape(
                    robot.kinematics(X_act_CL[:, i], U_CL[:, i], epsilon),
                    (robot.nx, ))

                Cost_CL = Cost_CL + c.mtimes(c.mtimes(c.reshape(X_act_CL[:,i],(1,robot.nx)),Q),c.reshape(X_act_CL[:,i],(robot.nx,1))) + \
                                    c.mtimes(c.mtimes(c.reshape(U_CL[:,i],(1,robot.nu)),R),c.reshape(U_CL[:,i],(robot.nu,1)))

                if params.OBSTACLES:
                    Obstacle_cost = control.obstacle_cost_func(X_act_CL[0:2,
                                                                        i])
                    Cost_CL = Cost_CL + Obstacle_cost

                #enablePrint()
                print "Cost_CL:", Cost_CL, " Cost_nom:", Cost_nom[
                    i], " deviation:", abs(Cost_CL - Cost_nom[i]) / Cost_nom[i]
                blockPrint()

                if abs(Cost_CL - Cost_nom[i]) / Cost_nom[i] > replan_bound:
                    replan = True

            Cost_CL = Cost_CL + c.mtimes(
                c.mtimes(c.reshape(Xg - X_act_CL[:, T], (1, robot.nx)), Qf),
                c.reshape(Xg - X_act_CL[:, T], (robot.nx, 1)))
            #enablePrint()
            print "Cost_CL:", Cost_CL, " Cost_nom:", Cost_nom[
                T], " deviation:", abs(Cost_CL - Cost_nom[T]) / Cost_nom[T]
            blockPrint()

            COMPLETE = True

        except:
            print "Unexpected error:", sys.exc_info()

    end_exec = time.time()
    time_taken = OL_time + (end_exec - start_exec)

    return (Cost_CL, time_taken, replan_count)