def multi_waypoint_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
    pos_obs = np.array([[50,0,0]])
    quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [0,-17,-10], pos_obs = pos_obs)
    quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [20,0,0], pos_goal = [-20,-15,-10], pos_obs = pos_obs)
    quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'red', pos_ini = [-20,-10,0], pos_goal = [-10,0,-20], pos_obs = pos_obs)
    quads = [quad0, quad1, quad2]
    return pos_obs,quads
def full_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
    pos_obs = np.array([[1, 5, -2], [8, 2, -8], [5, 8, -9], [0, 0, -2], [3, 3, -1],[3, 9, -17],[5, 7, -18],[0, 0, -10],[5, 10, -16],[10,10,-12],[13,13,-13]])
    quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [15,15,-15], pos_obs = pos_obs)
    quad1 = Quadcopter(Ti, Ts*90, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='guided', id_targ = -1, color = 'green', pos_ini = [0,3,0], pos_goal = [15,10,-15], pos_obs = pos_obs)
    quad2 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2,  mode='track', id_targ = 0, color = 'pink', pos_ini = [3,0,0], pos_goal = [15,20,-15], pos_obs = pos_obs)
    quads = [quad0, quad1, quad2]
    return pos_obs,quads
    def gazebo_track():
        pos_obs = np.array([[-50, 0, 0]])

        quad0 = Quadcopter(Ts=Ts,
                           quad_id=0,
                           mode='ennemy',
                           id_targ=-1,
                           pos_goal=[120, 0, -10],
                           pos_obs=pos_obs,
                           channel_id='udp:127.0.0.1:14551',
                           global_frame=None)
        # Initialize the frame at the origin by initializing the drone located at the origin
        global_frame = quad0.global_frame
        quad1 = Quadcopter(Ts=Ts,
                           quad_id=1,
                           mode='track',
                           id_targ=0,
                           pos_goal=[0, 0, -10],
                           pos_obs=pos_obs,
                           channel_id='udp:127.0.0.1:14561',
                           global_frame=global_frame)
        #quad2 = Quadcopter(Ts = Ts, quad_id = 2, mode='ennemy', id_targ = -1, pos_goal= [-100,0,-10], pos_obs = pos_obs, channel_id='udp:127.0.0.1:14571', global_frame=global_frame)
        #quad3 = Quadcopter(Ts = Ts, quad_id = 3, mode='ennemy', id_targ = -1, pos_goal= [-100,0,-10], pos_obs = pos_obs, channel_id='udp:127.0.0.1:14581', global_frame=global_frame)
        #quad4 = Quadcopter(Ts = Ts, quad_id = 4, mode='ennemy', id_targ = -1, pos_goal= [-100,0,-10], pos_obs = pos_obs, channel_id='udp:127.0.0.1:14591', global_frame=None)
        #quad5 = Quadcopter(Ts = Ts, quad_id = 5, mode='ennemy', id_targ = -1, pos_goal= [-100,0,-10], pos_obs = pos_obs, channel_id='udp:127.0.0.1:14601', global_frame=None)
        #quad6 = Quadcopter(Ts = Ts, quad_id = 6, mode='ennemy', id_targ = -1, pos_goal= [-100,0,-10], pos_obs = pos_obs, channel_id='udp:127.0.0.1:14611', global_frame=None)

        quads = [quad0, quad1]  #, quad2, quad3]

        return pos_obs, quads
def multi_tracking_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
    pos_obs = np.array([[-10,-10,0]])
    quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue',  pos_ini = [0,0,0],  pos_goal = [15,15,-15],  pos_obs = pos_obs)
    quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [4,0,0], pos_goal = [4,4,-10],  pos_obs = pos_obs)
    quad2 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 0, color = 'green', pos_ini = [4,4,0], pos_goal = [4,4,-10],  pos_obs = pos_obs)
    quad3 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='track', id_targ = 0, color = 'green', pos_ini = [4,-4,0], pos_goal = [4,4,-10],  pos_obs = pos_obs)
    quads = [quad0, quad1, quad2, quad3]
    return pos_obs,quads
def tracking_loop_scenario(x,Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
    pos_obs = np.array([[x/2,x/2,-10]])
    quad0 = Quadcopter(Ti, Ts*99, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='track', id_targ = 1, color = 'blue',  pos_ini = [0,0,-10],  pos_goal = [0,x,-10],  pos_obs = pos_obs)
    quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 2, color = 'green', pos_ini = [x,0,-10], pos_goal = [0,0,-10],  pos_obs = pos_obs)
    quad2 = Quadcopter(Ti, Ts*101, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 3, color = 'orange', pos_ini = [x,x,-10],pos_goal = [x,0,-10], pos_obs = pos_obs)
    quad3 = Quadcopter(Ti, Ts*102, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='track', id_targ = 0, color = 'pink', pos_ini = [0,x,-10], pos_goal = [x,x,-10],pos_obs = pos_obs)
    quads = [quad0, quad1,quad2,quad3]
    return pos_obs,quads
def dynamic_CA_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
    #Tf =8s
    pos_obs = np.array([[50,0,0]])
    quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue',  pos_ini = [0,10,-5],pos_goal = [30,10,-5], pos_obs = pos_obs)
    quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [3,0,-5], pos_goal = [3,20,-5], pos_obs = pos_obs)
    quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [8,0,-5], pos_goal = [8,20,-5], pos_obs = pos_obs)
    quad3 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [15,0,-5], pos_goal = [15,20,-5], pos_obs = pos_obs)
    quads = [quad0, quad1,quad2,quad3]
    return pos_obs,quads
def real_map(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):

    xs = [-1,0,1]
    ys = [-1,0,1]
    zs = [0,-1,-2,-3,-4,-5,-6,-7,-8,-9,-10]
    tower = [[x,y,z] for x in xs for y in ys for z in zs]

    xs = [-20,5,10]
    ys = [5,-10,10]
    zs = [0,-1,-2,-3]
    trees = [[x,y,z] for x in xs for y in ys for z in zs]

    xs = [-20,5,10]
    ys = [5,-10,10]
    zs = [-4,-5]

    tops = []
    for i in range(3):
        x, y = xs[i], ys[i]
        for z in zs:
            tops = tops + [[x-1,y,z],[x+1,y,z],[x,y,z],[x,y-1,z],[x,y+1,z]]
            print(tops)

    pos_obs = np.array(tower + trees + tops)

    quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue',  pos_ini = [0,0,-5],  pos_goal = [-10,-10,-10],  pos_obs = pos_obs)
    quads = [quad0]
    return pos_obs,quads
def ROS_simu(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
    fire_station=[]
    fire_truck=[]
    tree_1=[]
    tree_2=[]
    pos_obs=[]
    for i in range(20):
        x = random.sample(range(-10, 10), 1)[0]
        y = random.sample(range(-55, -45), 1)[0]
        z = random.sample(range(-12, 0), 1)[0]
        fire_station.append([x,y,z])
    
    for i in range(5):
        x = random.sample(range(-19, 21), 1)[0]
        y = random.sample(range(-55, -45), 1)[0]
        z = random.sample(range(-3, 0), 1)[0]
        fire_truck.append([x,y,z])

    for i in range(5):
        x = random.sample(range(-12, -8), 1)[0]
        y = random.sample(range(-42,-38), 1)[0]
        z = random.sample(range(-5, 0), 1)[0]
        tree_1.append([x,y,z])
    for i in range(5):
        x = random.sample(range(8, 12), 1)[0]
        y = random.sample(range(-42,-38), 1)[0]
        z = random.sample(range(-5, 0), 1)[0]
        tree_2.append([x,y,z])

    pos_obs = fire_station + fire_truck + tree_1 + tree_2
    pos_obs = np.array(pos_obs)
    quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue',  pos_ini = [0,0,0],  pos_goal = [0,-100,-10],  pos_obs = pos_obs)
    quads = [quad0]
    return(pos_obs,quads)
def simple_guided_for_PF(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
    pos_obs = []
    for i in range(20):
        pos_obs.append(random.sample(range(-10, 0), 3))
    pos_obs = np.array(pos_obs)
    quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue',  pos_ini = [0,0,-5],  pos_goal = [-10,-10,-10],  pos_obs = pos_obs)
    quads = [quad0]
    return pos_obs,quads
def static_OA_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
    pos_obs = []
    for i in range(30):
        pos_obs.append(random.sample(range(-10, 0), 3))
    pos_obs = np.array(pos_obs)
    print(pos_obs)
    quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [-10,-10,-10], pos_obs = pos_obs)
    quads = [quad0]
    return pos_obs,quads
    def simple_wp_cycle():

        pos_obs = np.array([1, 1, -20])

        quad0 = Quadcopter(Ts=Ts,
                           quad_id=0,
                           mode='ennemy',
                           id_targ=-1,
                           pos_goal=[10, -10, -10],
                           pos_obs=pos_obs,
                           channel_id='udp:127.0.0.1:14551',
                           global_frame=None)
        quads = [quad0]
        return pos_obs, quads
    def gazebo_oa():
        pos_obs = []
        for i in range(30):
            pos_obs.append(random.sample(range(-20, 0), 3))
        pos_obs = np.array(pos_obs)

        quad0 = Quadcopter(Ts=Ts,
                           quad_id=0,
                           mode='ennemy',
                           id_targ=-1,
                           pos_goal=[-20, -20, -20],
                           pos_obs=pos_obs,
                           channel_id='udp:127.0.0.1:14551',
                           global_frame=None)
        # Initialize the frame at the origin by initializing the drone located at the origin
        global_frame = quad0.global_frame
        #quad1 = Quadcopter(Ts = Ts, quad_id = 1, mode='ennemy', id_targ = -1, pos_goal= [5,-100,-15], pos_obs = pos_obs, channel_id='udp:127.0.0.1:14561', global_frame=global_frame)
        #quad2 = Quadcopter(Ts = Ts, quad_id = 2, mode='ennemy', id_targ = -1, pos_goal= [-5,-100,-8], pos_obs = pos_obs, channel_id='udp:127.0.0.1:14571', global_frame=global_frame)

        quads = [quad0]  #, quad1, quad2]

        return pos_obs, quads
Exemple #13
0
def main():
    start_time = time.time()

    # Simulation Setup
    # ---------------------------
    Ti = 0
    Ts = 0.005
    Tf = 18
    ifsave = 1

    # Choose trajectory settings
    # ---------------------------
    ctrlOptions = ["xyz_pos", "xy_vel_z_pos", "xyz_vel"]
    trajSelect = np.zeros(3)

    # Select Control Type             (0: xyz_pos,                  1: xy_vel_z_pos,            2: xyz_vel)
    ctrlType = ctrlOptions[0]
    # Select Position Trajectory Type (0: hover,                    1: pos_waypoint_timed,      2: pos_waypoint_interp,
    #                                  3: minimum velocity          4: minimum accel,           5: minimum jerk,           6: minimum snap
    #                                  7: minimum accel_stop        8: minimum jerk_stop        9: minimum snap_stop
    #                                 10: minimum jerk_full_stop   11: minimum snap_full_stop
    #                                 12: pos_waypoint_arrived
    trajSelect[0] = 6  # (changed): was 6
    # Select Yaw Trajectory Type      (0: none                      1: yaw_waypoint_timed,      2: yaw_waypoint_interp     3: follow          4: zero)
    trajSelect[1] = 6  # (changed): was 6
    # Select if waypoint time is used, or if average speed is used to calculate waypoint time   (0: waypoint time,   1: average speed)
    trajSelect[2] = 1
    print("Control type: {}".format(ctrlType))

    # Initialize Quadcopter, Controller, Wind, Result Matrixes
    # ---------------------------
    quad = Quadcopter(Ti)
    traj = Trajectory(quad, ctrlType, trajSelect)
    ctrl = Control(quad, traj.yawType)
    wind = Wind("None", 2.0, 90, -15)

    # Trajectory for First Desired States
    # ---------------------------
    sDes = traj.desiredState(0, Ts, quad)  # np.zeros(19)

    # Generate First Commands
    # ---------------------------
    ctrl.controller(traj, quad, sDes, Ts)

    # Initialize Result Matrixes
    # ---------------------------
    numTimeStep = int(Tf / Ts + 1)
    (
        t_all,
        s_all,
        pos_all,
        vel_all,
        quat_all,
        omega_all,
        euler_all,
        sDes_traj_all,
        sDes_calc_all,
        w_cmd_all,
        wMotor_all,
        thr_all,
        tor_all,
    ) = init_data(quad, traj, ctrl, numTimeStep)

    t_all[0] = Ti
    s_all[0, :] = quad.state
    pos_all[0, :] = quad.pos
    vel_all[0, :] = quad.vel
    quat_all[0, :] = quad.quat
    omega_all[0, :] = quad.omega
    euler_all[0, :] = quad.euler
    sDes_traj_all[0, :] = traj.sDes
    sDes_calc_all[0, :] = ctrl.sDesCalc
    w_cmd_all[0, :] = ctrl.w_cmd
    wMotor_all[0, :] = quad.wMotor
    thr_all[0, :] = quad.thr
    tor_all[0, :] = quad.tor

    # Run Simulation
    # ---------------------------
    t = Ti
    i = 1
    while round(t, 3) < Tf:

        t = quad_sim(t, Ts, quad, ctrl, wind, traj)

        # print("{:.3f}".format(t))
        t_all[i] = t
        s_all[i, :] = quad.state
        pos_all[i, :] = quad.pos
        vel_all[i, :] = quad.vel
        quat_all[i, :] = quad.quat
        omega_all[i, :] = quad.omega
        euler_all[i, :] = quad.euler
        sDes_traj_all[i, :] = traj.sDes
        sDes_calc_all[i, :] = ctrl.sDesCalc
        w_cmd_all[i, :] = ctrl.w_cmd
        wMotor_all[i, :] = quad.wMotor
        thr_all[i, :] = quad.thr
        tor_all[i, :] = quad.tor

        i += 1

    end_time = time.time()
    print("Simulated {:.2f}s in {:.6f}s.".format(t, end_time - start_time))

    # View Results
    # ---------------------------

    # utils.fullprint(sDes_traj_all[:,3:6])
    utils.makeFigures(
        quad.params,
        t_all,
        pos_all,
        vel_all,
        quat_all,
        omega_all,
        euler_all,
        w_cmd_all,
        wMotor_all,
        thr_all,
        tor_all,
        sDes_traj_all,
        sDes_calc_all,
    )
    ani = utils.sameAxisAnimation(
        t_all, traj.wps, pos_all, quat_all, sDes_traj_all, Ts, quad.params, traj.xyzType, traj.yawType, ifsave
    )
    plt.show()
def tracking_and_kill_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
    pos_obs = np.array([[-10,-10,0]])
    quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue',  pos_ini = [0,0,-5],  pos_goal = [20,15,-20],  pos_obs = pos_obs)
    quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [5,0,0], pos_goal = [4,4,-10],  pos_obs = pos_obs)
    quads = [quad0, quad1]
    return pos_obs,quads
def main():
    start_time = time.time()

    # Simulation Setup
    # ---------------------------
    Ti = 0
    Ts = 0.005
    Tf = 18
    ifsave = 0

    # Choose trajectory settings
    # ---------------------------
    ctrlOptions = ["xyz_pos", "xy_vel_z_pos", "xyz_vel"]
    trajSelect = np.zeros(3)

    # Select Control Type             (0: xyz_pos,                  1: xy_vel_z_pos,            2: xyz_vel)
    ctrlType = ctrlOptions[0]
    # Select Position Trajectory Type (0: hover,                    1: pos_waypoint_timed,      2: pos_waypoint_interp,
    #                                  3: minimum velocity          4: minimum accel,           5: minimum jerk,           6: minimum snap
    #                                  7: minimum accel_stop        8: minimum jerk_stop        9: minimum snap_stop
    #                                 10: minimum jerk_full_stop   11: minimum snap_full_stop
    trajSelect[0] = 6
    # Select Yaw Trajectory Type      (0: none                      1: yaw_waypoint_timed,      2: yaw_waypoint_interp     3: follow          4: zero)
    trajSelect[1] = 3
    # Select if waypoint time is used, or if average speed is used to calculate waypoint time   (0: waypoint time,   1: average speed)
    trajSelect[2] = 0
    print("Control type: {}".format(ctrlType))

    # Initialize Quadcopter, Controller, Wind, Result Matrixes
    # ---------------------------
    quad = Quadcopter(Ti)
    traj = Trajectory(quad, ctrlType, trajSelect)
    ctrl = Control(quad, traj.yawType)
    wind = Wind('None', 2.0, 90, -15)

    # Trajectory for First Desired States
    # ---------------------------
    sDes = traj.desiredState(0, Ts, quad)

    # Generate First Commands
    # ---------------------------
    ctrl.controller(traj, quad, sDes, Ts)

    # Initialize Result Matrixes
    # ---------------------------
    t_all = Ti
    s_all = quad.state.T
    pos_all = quad.pos.T
    vel_all = quad.vel.T
    quat_all = quad.quat.T
    omega_all = quad.omega.T
    euler_all = quad.euler.T
    sDes_traj_all = traj.sDes.T
    sDes_calc_all = ctrl.sDesCalc.T
    w_cmd_all = ctrl.w_cmd.T
    wMotor_all = quad.wMotor.T
    thr_all = quad.thr.T
    tor_all = quad.tor.T

    # Run Simulation
    # ---------------------------
    t = Ti
    i = 0
    while round(t, 3) < Tf:

        t = quad_sim(t, Ts, quad, ctrl, wind, traj)

        # print("{:.3f}".format(t))
        t_all = np.vstack((t_all, t))
        s_all = np.vstack((s_all, quad.state.T))
        pos_all = np.vstack((pos_all, quad.pos.T))
        vel_all = np.vstack((vel_all, quad.vel.T))
        quat_all = np.vstack((quat_all, quad.quat.T))
        omega_all = np.vstack((omega_all, quad.omega.T))
        euler_all = np.vstack((euler_all, quad.euler.T))
        sDes_traj_all = np.vstack((sDes_traj_all, traj.sDes.T))
        sDes_calc_all = np.vstack((sDes_calc_all, ctrl.sDesCalc.T))
        w_cmd_all = np.vstack((w_cmd_all, ctrl.w_cmd.T))
        wMotor_all = np.vstack((wMotor_all, quad.wMotor.T))
        thr_all = np.vstack((thr_all, quad.thr.T))
        tor_all = np.vstack((tor_all, quad.tor.T))
        i += 1

    end_time = time.time()
    print("Simulated {:.2f}s in {:.6f}s.".format(t, end_time - start_time))

    # View Results
    # ---------------------------

    # utils.fullprint(sDes_traj_all[:,3:6])
    utils.makeFigures(quad.params, t_all, pos_all, vel_all, quat_all,
                      omega_all, euler_all, w_cmd_all, wMotor_all, thr_all,
                      tor_all, sDes_traj_all, sDes_calc_all)
    ani = utils.sameAxisAnimation(t_all, traj.wps, pos_all, quat_all,
                                  sDes_traj_all, Ts, quad.params, traj.xyzType,
                                  traj.yawType, ifsave)
    plt.show()
ctrlType = ctrlOptions[1]
# Select Position Trajectory Type (0: hover,                    1: pos_waypoint_timed,      2: pos_waypoint_interp,
#                                  3: minimum velocity          4: minimum accel,           5: minimum jerk,           6: minimum snap
#                                  7: minimum accel_stop        8: minimum jerk_stop        9: minimum snap_stop
#                                 10: minimum jerk_full_stop   11: minimum snap_full_stop
#                                 12: pos_waypoint_arrived
trajSelect[0] = 6  # (changed): was 6
# Select Yaw Trajectory Type      (0: none                      1: yaw_waypoint_timed,      2: yaw_waypoint_interp     3: follow          4: zero)
trajSelect[1] = 6  # (changed): was 6
# Select if waypoint time is used, or if average speed is used to calculate waypoint time   (0: waypoint time,   1: average speed)
trajSelect[2] = 0
print("Control type: {}".format(ctrlType))

# Initialize Quadcopter, Controller, Wind, Result Matrixes
# ---------------------------
quad = Quadcopter(Ti)
traj = Trajectory(quad, ctrlType, trajSelect)
ctrl = Control(quad, traj.yawType)
wind = Wind("None", 2.0, 90, -15)

# Trajectory for First Desired States
# ---------------------------
sDes = traj.desiredState(0, Ts, quad)  # np.zeros(19)

# Generate First Commands
# ---------------------------
ctrl.controller(traj, quad, sDes, Ts)

# Initialize Result Matrixes
# ---------------------------
numTimeStep = int(Tf / Ts + 1)
Exemple #17
0
 def __init__(self):
     self.default_range = default_range = (-10, 10)
     self.velocity_range = velocity_range = (-10, 10)
     self.ang_velocity_range = ang_velocity_range = (-1, 1)
     self.q_range = qrange = (-0.3, 0.3)
     self.motor_range = motor_range = (400, 700)
     self.motor_d_range = motor_d_range = (-2000, 2000)
     self.observation_space_domain = {
         "x": default_range,
         "y": default_range,
         "z": default_range,
         "q0": (0.9, 1),
         "q1": qrange,
         "q2": qrange,
         "q3": qrange,
         "u": velocity_range,
         "v": velocity_range,
         "w": velocity_range,
         "p": ang_velocity_range,
         "q": ang_velocity_range,
         "r": ang_velocity_range,
         "wM1": motor_range,
         "wdotM1": motor_d_range,
         "wM2": motor_range,
         "wdotM2": motor_d_range,
         "wM3": motor_range,
         "wdotM3": motor_d_range,
         "wM4": motor_range,
         "wdotM4": motor_d_range,
         "deltacol_p": motor_range,
         "deltalat_p": motor_range,
         "deltalon_p": motor_range,
         "deltaped_p": motor_range,
     }
     self.states_str = list(self.observation_space_domain.keys())
     #  observation space to [-1,1]
     self.low_obs_space = np.array(tuple(
         zip(*self.observation_space_domain.values()))[0],
                                   dtype=float) * 0 - 1
     self.high_obs_space = np.array(tuple(
         zip(*self.observation_space_domain.values()))[1],
                                    dtype=float) * 0 + 1
     self.observation_space = spaces.Box(low=self.low_obs_space,
                                         high=self.high_obs_space,
                                         dtype=float)
     self.default_act_range = default_act_range = motor_range
     self.action_space_domain = {
         "deltacol": default_act_range,
         "deltalat": default_act_range,
         "deltalon": default_act_range,
         "deltaped": default_act_range,
         # "f1": (0.1, 5), "f2": (0.5, 20), "f3": (0.5, 20), "f4": (0.5, 10),
         # "lambda1": (0.5, 10), "lambda2": (0.1, 5), "lambda3": (0.1, 5), "lambda4": (0.1, 5),
         # "eta1": (0.2, 5), "eta2": (0.1, 5), "eta3": (0.1, 5), "eta4": (0.1, 5),
     }
     self.low_action = np.array(tuple(
         zip(*self.action_space_domain.values()))[0],
                                dtype=float)
     self.high_action = np.array(tuple(
         zip(*self.action_space_domain.values()))[1],
                                 dtype=float)
     self.low_action_space = self.low_action * 0 - 1
     self.high_action_space = self.high_action * 0 + 1
     self.action_space = spaces.Box(low=self.low_action_space,
                                    high=self.high_action_space,
                                    dtype=float)
     self.min_reward = -11
     self.high_action_diff = 0.2
     obs_header = str(list(self.observation_space_domain.keys()))[1:-1]
     act_header = str(list(self.action_space_domain.keys()))[1:-1]
     self.header = "time, " + act_header + ", " + obs_header + ", reward" + ", control reward"
     self.saver = save_files()
     self.reward_array = np.array((0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                                  dtype=float)
     self.constant_dict = {
         "u": 0.0,
         "v": 0.0,
         "w": 0.0,
         "p": 0.0,
         "q": 0.0,
         "r": 0.0,
         "fi": 0.0,
         "theta": 0.0,
         "si": 0.0,
         "x": 0.0,
         "y": 0.0,
         "z": 0.0,
         "a": 0.0,
         "b": 0.0,
         "c": 0.0,
         "d": 0.0,
     }
     self.start_time = time.time()
     self.Ti = 0
     self.Ts = 0.005
     self.Tf = 5
     self.reward_check_time = 0.7 * self.Tf
     self.ifsave = 0
     self.ctrlOptions = ["xyz_pos", "xy_vel_z_pos", "xyz_vel"]
     self.trajSelect = np.zeros(3)
     self.ctrlType = self.ctrlOptions[0]
     self.trajSelect[0] = 0
     self.trajSelect[1] = 4
     self.trajSelect[2] = 0
     self.quad = Quadcopter(self.Ti)
     self.traj = Trajectory(self.quad, self.ctrlType, self.trajSelect)
     self.ctrl = Control(self.quad, self.traj.yawType)
     self.wind = Wind("None", 2.0, 90, -15)
     self.numTimeStep = int(self.Tf / self.Ts + 1)
     self.longest_num_step = 0
     self.best_reward = float("-inf")
     self.diverge_counter = 0
     self.diverge_list = []
Exemple #18
0
class CustomEnv(gym.Env, ABC):
    def __init__(self):
        self.default_range = default_range = (-10, 10)
        self.velocity_range = velocity_range = (-10, 10)
        self.ang_velocity_range = ang_velocity_range = (-1, 1)
        self.q_range = qrange = (-0.3, 0.3)
        self.motor_range = motor_range = (400, 700)
        self.motor_d_range = motor_d_range = (-2000, 2000)
        self.observation_space_domain = {
            "x": default_range,
            "y": default_range,
            "z": default_range,
            "q0": (0.9, 1),
            "q1": qrange,
            "q2": qrange,
            "q3": qrange,
            "u": velocity_range,
            "v": velocity_range,
            "w": velocity_range,
            "p": ang_velocity_range,
            "q": ang_velocity_range,
            "r": ang_velocity_range,
            "wM1": motor_range,
            "wdotM1": motor_d_range,
            "wM2": motor_range,
            "wdotM2": motor_d_range,
            "wM3": motor_range,
            "wdotM3": motor_d_range,
            "wM4": motor_range,
            "wdotM4": motor_d_range,
            "deltacol_p": motor_range,
            "deltalat_p": motor_range,
            "deltalon_p": motor_range,
            "deltaped_p": motor_range,
        }
        self.states_str = list(self.observation_space_domain.keys())
        #  observation space to [-1,1]
        self.low_obs_space = np.array(tuple(
            zip(*self.observation_space_domain.values()))[0],
                                      dtype=float) * 0 - 1
        self.high_obs_space = np.array(tuple(
            zip(*self.observation_space_domain.values()))[1],
                                       dtype=float) * 0 + 1
        self.observation_space = spaces.Box(low=self.low_obs_space,
                                            high=self.high_obs_space,
                                            dtype=float)
        self.default_act_range = default_act_range = motor_range
        self.action_space_domain = {
            "deltacol": default_act_range,
            "deltalat": default_act_range,
            "deltalon": default_act_range,
            "deltaped": default_act_range,
            # "f1": (0.1, 5), "f2": (0.5, 20), "f3": (0.5, 20), "f4": (0.5, 10),
            # "lambda1": (0.5, 10), "lambda2": (0.1, 5), "lambda3": (0.1, 5), "lambda4": (0.1, 5),
            # "eta1": (0.2, 5), "eta2": (0.1, 5), "eta3": (0.1, 5), "eta4": (0.1, 5),
        }
        self.low_action = np.array(tuple(
            zip(*self.action_space_domain.values()))[0],
                                   dtype=float)
        self.high_action = np.array(tuple(
            zip(*self.action_space_domain.values()))[1],
                                    dtype=float)
        self.low_action_space = self.low_action * 0 - 1
        self.high_action_space = self.high_action * 0 + 1
        self.action_space = spaces.Box(low=self.low_action_space,
                                       high=self.high_action_space,
                                       dtype=float)
        self.min_reward = -11
        self.high_action_diff = 0.2
        obs_header = str(list(self.observation_space_domain.keys()))[1:-1]
        act_header = str(list(self.action_space_domain.keys()))[1:-1]
        self.header = "time, " + act_header + ", " + obs_header + ", reward" + ", control reward"
        self.saver = save_files()
        self.reward_array = np.array((0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                                     dtype=float)
        self.constant_dict = {
            "u": 0.0,
            "v": 0.0,
            "w": 0.0,
            "p": 0.0,
            "q": 0.0,
            "r": 0.0,
            "fi": 0.0,
            "theta": 0.0,
            "si": 0.0,
            "x": 0.0,
            "y": 0.0,
            "z": 0.0,
            "a": 0.0,
            "b": 0.0,
            "c": 0.0,
            "d": 0.0,
        }
        self.start_time = time.time()
        self.Ti = 0
        self.Ts = 0.005
        self.Tf = 5
        self.reward_check_time = 0.7 * self.Tf
        self.ifsave = 0
        self.ctrlOptions = ["xyz_pos", "xy_vel_z_pos", "xyz_vel"]
        self.trajSelect = np.zeros(3)
        self.ctrlType = self.ctrlOptions[0]
        self.trajSelect[0] = 0
        self.trajSelect[1] = 4
        self.trajSelect[2] = 0
        self.quad = Quadcopter(self.Ti)
        self.traj = Trajectory(self.quad, self.ctrlType, self.trajSelect)
        self.ctrl = Control(self.quad, self.traj.yawType)
        self.wind = Wind("None", 2.0, 90, -15)
        self.numTimeStep = int(self.Tf / self.Ts + 1)
        self.longest_num_step = 0
        self.best_reward = float("-inf")
        self.diverge_counter = 0
        self.diverge_list = []

    def reset(self):
        #  initialization
        self.t = 0
        self.sDes = self.traj.desiredState(self.t, self.Ts, self.quad)
        self.ctrl.controller(self.traj, self.quad, self.sDes, self.Ts)
        self.control_input = self.ctrl.w_cmd.copy()
        (
            self.t_all,
            self.s_all,
            self.pos_all,
            self.vel_all,
            self.quat_all,
            self.omega_all,
            self.euler_all,
            self.sDes_traj_all,
            self.sDes_calc_all,
            self.w_cmd_all,
            self.wMotor_all,
            self.thr_all,
            self.tor_all,
        ) = init_data(self.quad, self.traj, self.ctrl, self.numTimeStep)
        self.all_actions = np.zeros(
            (self.numTimeStep, len(self.high_action_space)))
        self.control_rewards = np.zeros((self.numTimeStep, 1))
        self.all_rewards = np.zeros((self.numTimeStep, 1))
        self.t_all[0] = self.Ti
        observation = self.quad.state.copy()
        observation = np.concatenate((observation, self.control_input), axis=0)
        self.pos_all[0, :] = self.quad.pos
        self.vel_all[0, :] = self.quad.vel
        self.quat_all[0, :] = self.quad.quat
        self.omega_all[0, :] = self.quad.omega
        self.euler_all[0, :] = self.quad.euler
        self.sDes_traj_all[0, :] = self.traj.sDes
        self.sDes_calc_all[0, :] = self.ctrl.sDesCalc
        self.w_cmd_all[0, :] = self.control_input
        self.wMotor_all[0, :] = self.quad.wMotor
        self.thr_all[0, :] = self.quad.thr
        self.tor_all[0, :] = self.quad.tor
        self.counter = 1
        self.save_counter = 0
        self.find_next_state()
        self.jj = 0
        # self.quad.state[0:12] = self.initial_states = list((np.random.rand(12) * 0.02 - 0.01))
        self.quad.state[0:12] = [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
        self.done = False
        for iii in range(len(self.high_obs_space)):
            current_range = self.observation_space_domain[self.states_str[iii]]
            observation[iii] = 2 * (observation[iii] - current_range[0]) / (
                current_range[1] - current_range[0]) - 1
        self.s_all[0, :] = observation
        self.integral_error = 0
        return list(observation)

    def action_wrapper(self, current_action: list) -> np.array:
        current_action = np.clip(current_action, -1, 1)
        self.normilized_actions = current_action
        self.control_input = ((current_action + 1) *
                              (self.high_action - self.low_action) / 2 +
                              self.low_action)

    def find_next_state(self) -> list:
        self.quad.update(self.t, self.Ts, self.control_input,
                         self.wind)  # self.control_input = self.ctrl.w_cmd
        # self.quad.update(self.t, self.Ts, self.ctrl.w_cmd, self.wind)
        self.t += self.Ts
        # self.control_input = self.ctrl.w_cmd.copy()
        self.sDes = self.traj.desiredState(self.t, self.Ts, self.quad)
        # print('state', self.quad.state)
        # print(' ctrl', self.ctrl.w_cmd)
        self.ctrl.controller(self.traj, self.quad, self.sDes, self.Ts)

    def observation_function(self) -> list:
        i = self.counter
        quad = self.quad
        self.t_all[i] = self.t
        self.s_all[i, :] = np.concatenate((quad.state, self.control_input),
                                          axis=0)
        self.pos_all[i, :] = quad.pos
        self.vel_all[i, :] = quad.vel
        self.quat_all[i, :] = quad.quat
        self.omega_all[i, :] = quad.omega
        self.euler_all[i, :] = quad.euler
        self.sDes_traj_all[i, :] = self.traj.sDes
        self.sDes_calc_all[i, :] = self.ctrl.sDesCalc
        self.w_cmd_all[i, :] = self.control_input
        self.wMotor_all[i, :] = quad.wMotor
        self.thr_all[i, :] = quad.thr
        self.tor_all[i, :] = quad.tor
        observation = quad.state.copy()
        observation = np.concatenate((observation, self.control_input), axis=0)
        for iii in range(len(self.high_obs_space)):
            current_range = self.observation_space_domain[self.states_str[iii]]
            observation[iii] = 2 * (observation[iii] - current_range[0]) / (
                current_range[1] - current_range[0]) - 1
        self.s_all[i, :] = observation
        return list(observation)

    def reward_function(self, observation) -> float:
        # add reward slope to the reward
        # TODO: normalizing reward
        # TODO: adding reward gap
        # error_x = np.linalg.norm((abs(self.quad.state[0:3]).reshape(12)), 1)
        # error_ang = np.linalg.norm((abs(self.quad.state[3:7]).reshape(12)), 1)
        # error_v = np.linalg.norm((abs(self.quad.state[7:10]).reshape(12)), 1)
        # error_vang = np.linalg.norm((abs(self.quad.state[10:12]).reshape(12)), 1)
        # error = -np.linalg.norm((abs(self.s_all[self.counter, 0:12]).reshape(12)), 1) + 0.05
        error = 10 * (-np.linalg.norm(observation[0:12], 1) + 1)
        self.control_rewards[self.counter] = error
        self.integral_error = 0.1 * (
            0.99 * self.control_rewards[self.counter - 1] +
            0.99 * self.integral_error)
        reward = error.copy()
        reward += self.integral_error
        reward += -self.min_reward / self.numTimeStep
        reward -= 0.000001 * sum(
            abs(self.control_input - self.w_cmd_all[self.counter - 1, :]))
        reward += -0.0001 * np.linalg.norm(self.normilized_actions, 2)
        self.all_rewards[self.counter] = reward
        # for i in range(12):
        #     self.reward_array[i] = abs(self.current_states[i]) / self.default_range[1]
        # reward = self.all_rewards[self.counter] = -sum(self.reward_array) + 0.17 / self.default_range[1]
        # # control reward
        # reward += 0.05 * float(
        #     self.control_rewards[self.counter] - self.control_rewards[self.counter - 1]
        # )  # control slope
        # reward += -0.005 * sum(abs(self.all_actions[self.counter]))  # input reward
        # for i in (self.high_action_diff - self.all_actions[self.counter] - self.all_actions[self.counter - 1]) ** 2:
        #     reward += -min(0, i)
        if self.counter > 100:
            print(reward)
        return reward

    def check_diverge(self) -> bool:
        for i in range(len(self.high_obs_space)):
            if (abs(self.s_all[self.counter, i])) > self.high_obs_space[i]:
                self.diverge_list.append(
                    (tuple(self.observation_space_domain.keys())[i],
                     self.quad.state[i]))
                self.saver.diverge_save(
                    tuple(self.observation_space_domain.keys())[i],
                    self.quad.state[i])
                self.diverge_counter += 1
                if self.diverge_counter == 2000:
                    self.diverge_counter = 0
                    print((tuple(self.observation_space_domain.keys())[i],
                           self.quad.state[i]))
                self.jj = 1
                return True
        if self.counter >= self.numTimeStep - 1:  # number of timesteps
            print("successful")
            return True
        # after self.reward_check_time it checks whether or not the reward is decreasing
        # if self.counter > self.reward_check_time / self.Ts:
        #     prev_time = int(self.counter - self.reward_check_time / self.Ts)
        #     diverge_criteria = (
        #         self.all_rewards[self.counter] - sum(self.all_rewards[prev_time:prev_time - 10]) / 10
        #     )
        #     if diverge_criteria < -1:
        #         print("reward_diverge")
        #         self.jj = 1
        #         return True
        bool_1 = any(np.isnan(self.quad.state))
        bool_2 = any(np.isinf(self.quad.state))
        if bool_1 or bool_2:
            self.jj = 1
            print("state_inf_nan_diverge")
        return False

    def done_jobs(self) -> None:
        counter = self.counter
        self.save_counter += 1
        current_total_reward = sum(self.all_rewards)
        if self.save_counter >= 100:
            print("current_total_reward: ", current_total_reward)
            self.save_counter = 0
            self.saver.reward_step_save(self.best_reward,
                                        self.longest_num_step,
                                        current_total_reward, counter)
        if counter >= self.longest_num_step:
            self.longest_num_step = counter
        if current_total_reward >= self.best_reward:
            self.best_reward = sum(self.all_rewards)
            ii = self.counter + 1
            self.saver.best_reward_save(
                self.t_all[0:ii],
                self.w_cmd_all[0:ii],
                self.s_all[0:ii],
                self.all_rewards[0:ii],
                self.control_rewards[0:ii],
                self.header,
            )

    def step(self, current_action):
        self.action_wrapper(current_action)
        try:
            self.find_next_state()
        except OverflowError or ValueError or IndexError:
            self.jj = 1
        observation = self.observation_function()
        reward = self.reward_function(observation)
        self.done = self.check_diverge()
        if self.jj == 1:
            reward = self.min_reward
        if self.done:
            self.done_jobs()

        self.counter += 1
        # self.make_constant(list(self.constant_dict.values()))
        return list(observation), float(reward), self.done, {}

    def make_constant(self, true_list):
        for i in range(len(true_list)):
            if i == 1:
                self.quad.state[i] = self.initial_states[i]
def main():
    start_time = time.time()

    # Simulation Setup
    # ---------------------------
    Ti = 0
    Ts = 0.005
    Tf = 95
    ifsave = 0

    # Choose trajectory settings
    # ---------------------------
    ctrlOptions = ["xyz_pos", "xy_vel_z_pos", "xyz_vel"]
    trajSelect = np.zeros(3)

    # Select Control Type             (0: xyz_pos,       1: xy_vel_z_pos,            2: xyz_vel)
    ctrlType = ctrlOptions[0]
    # Select Position Trajectory Type (0: hover,         1: pos_waypoint_timed,      2: pos_waypoint_arrived
    trajSelect[0] = 2
    # Select Yaw Trajectory Type      (0: none,          1: yaw_waypoint_timed,      2: yaw_waypoint_interp,       3: zero,       4: Follow)
    trajSelect[1] = 4
    # Select if waypoint time is used, or if average speed is used to calculate waypoint time   (0: waypoint time,   1: average speed)
    trajSelect[2] = 0
    print("Control type: {}".format(ctrlType))

    # Initialize Quadcopter, Controller, Wind, Result Matrixes
    # ---------------------------
    quad = Quadcopter(Ti)
    traj = Trajectory(quad, ctrlType, trajSelect)
    potfld = PotField(1)
    ctrl = Control(quad, traj.yawType)
    wind = Wind('None', 2.0, 90, -15)

    # Trajectory for First Desired States
    # ---------------------------
    traj.desiredState(0, Ts, quad)

    # First Potential Field Calculation
    # ---------------------------
    potfld.isWithinRange(quad)
    potfld.isWithinField(quad)
    potfld.rep_force(quad, traj)

    # Generate First Commands
    # ---------------------------
    ctrl.controller(traj, quad, potfld, Ts)

    # Initialize Result Matrixes
    # ---------------------------
    numTimeStep = int(Tf / Ts + 1)

    t_all = np.zeros(numTimeStep)
    s_all = np.zeros([numTimeStep, len(quad.state)])
    pos_all = np.zeros([numTimeStep, len(quad.pos)])
    vel_all = np.zeros([numTimeStep, len(quad.vel)])
    quat_all = np.zeros([numTimeStep, len(quad.quat)])
    omega_all = np.zeros([numTimeStep, len(quad.omega)])
    euler_all = np.zeros([numTimeStep, len(quad.euler)])
    sDes_traj_all = np.zeros([numTimeStep, len(traj.sDes)])
    sDes_calc_all = np.zeros([numTimeStep, len(ctrl.sDesCalc)])
    w_cmd_all = np.zeros([numTimeStep, len(ctrl.w_cmd)])
    wMotor_all = np.zeros([numTimeStep, len(quad.wMotor)])
    thr_all = np.zeros([numTimeStep, len(quad.thr)])
    tor_all = np.zeros([numTimeStep, len(quad.tor)])
    notInRange_all = np.zeros([numTimeStep, potfld.num_points], dtype=bool)
    inRange_all = np.zeros([numTimeStep, potfld.num_points], dtype=bool)
    inField_all = np.zeros([numTimeStep, potfld.num_points], dtype=bool)
    minDist_all = np.zeros(numTimeStep)

    t_all[0] = Ti
    s_all[0, :] = quad.state
    pos_all[0, :] = quad.pos
    vel_all[0, :] = quad.vel
    quat_all[0, :] = quad.quat
    omega_all[0, :] = quad.omega
    euler_all[0, :] = quad.euler
    sDes_traj_all[0, :] = traj.sDes
    sDes_calc_all[0, :] = ctrl.sDesCalc
    w_cmd_all[0, :] = ctrl.w_cmd
    wMotor_all[0, :] = quad.wMotor
    thr_all[0, :] = quad.thr
    tor_all[0, :] = quad.tor
    notInRange_all[0, :] = potfld.notWithinRange
    inRange_all[0, :] = potfld.inRangeNotField
    inField_all[0, :] = potfld.withinField
    minDist_all[0] = potfld.distanceMin

    # Run Simulation
    # ---------------------------
    t = Ti
    i = 1
    while round(t, 3) < Tf:

        t = quad_sim(t, Ts, quad, ctrl, wind, traj, potfld)

        # print("{:.3f}".format(t))
        t_all[i] = t
        s_all[i, :] = quad.state
        pos_all[i, :] = quad.pos
        vel_all[i, :] = quad.vel
        quat_all[i, :] = quad.quat
        omega_all[i, :] = quad.omega
        euler_all[i, :] = quad.euler
        sDes_traj_all[i, :] = traj.sDes
        sDes_calc_all[i, :] = ctrl.sDesCalc
        w_cmd_all[i, :] = ctrl.w_cmd
        wMotor_all[i, :] = quad.wMotor
        thr_all[i, :] = quad.thr
        tor_all[i, :] = quad.tor
        notInRange_all[i, :] = potfld.notWithinRange
        inRange_all[i, :] = potfld.inRangeNotField
        inField_all[i, :] = potfld.withinField
        minDist_all[i] = potfld.distanceMin

        i += 1

    end_time = time.time()
    print("Simulated {:.2f}s in {:.6f}s.".format(t, end_time - start_time))

    # View Results
    # ---------------------------

    def figures():
        utils.makeFigures(quad.params, t_all, pos_all, vel_all, quat_all,
                          omega_all, euler_all, w_cmd_all, wMotor_all, thr_all,
                          tor_all, sDes_traj_all, sDes_calc_all, potfld,
                          minDist_all)
        plt.show()

    utils.third_PV_animation(t_all, traj.wps, pos_all, quat_all, euler_all,
                             sDes_traj_all, Ts, quad.params, traj.xyzType,
                             traj.yawType, potfld, notInRange_all, inRange_all,
                             inField_all, ifsave, figures)
Exemple #20
0
def main():

    start_time = time.time()

    # Import the simulation setup
    # ---------------------------
    config = simConfig.config()

    # Initialize wind (untested)
    # --------------------------
    wind = Wind('None', 2.0, 90, -15)

    # Initialize list for objects
    # ---------------------------
    numTimeStep = int(config.Tf / config.Ts + 1)
    quadList = []
    trajList = []
    ctrlList = []
    sDesList = []
    obsPFList = []
    myDataList = []

    # For the number of vehicles
    # --------------------------
    for objectIndex in range(0, config.nVeh):

        quadList.append(Quadcopter(config))
        trajList.append(
            Trajectory(quadList[objectIndex], config.ctrlType,
                       config.trajSelect, config))
        ctrlList.append(
            Control(quadList[objectIndex], trajList[objectIndex].yawType))
        sDesList.append(trajList[objectIndex].desiredState(
            0, config.Ts, quadList[objectIndex]))
        obsPFList.append(
            pf(trajList[objectIndex],
               np.vstack((config.o1, config.o2, config.o3,
                          quadList[objectIndex - 1].state[0:3])).transpose(),
               gamma=config.gamma,
               eta=config.eta,
               obsRad=config.obsRad,
               obsRange=config.obsRange))
        # generate first command
        ctrlList[objectIndex].controller(trajList[objectIndex],
                                         quadList[objectIndex],
                                         sDesList[objectIndex], config)

    # Create learning object
    # ---------------------------
    fala = falaObj(config)

    # Initialize Result Matrixes
    # ---------------------------
    for objectIndex in range(0, config.nVeh):
        # initialize result matrices
        myDataList.append(
            collect.quadata(quadList[objectIndex], trajList[objectIndex],
                            ctrlList[objectIndex], fala, config.Ti,
                            numTimeStep))

    # Run Simulation
    # ---------------------------
    t = config.Ti * np.ones(config.nVeh)

    i = 1
    while round(t[0], 3) < config.Tf:

        # Update the obstacle positions
        # -----------------------------
        if t[0] > 0.1:

            # recall these, as they could move with time
            o1 = config.o1  # np.array([-2.1, 0, -3],)           # obstacle 1 (x,y,z)
            o2 = config.o2  # np.array([2, -1.2, 0.9])           # obstacle 2 (x,y,z)
            o3 = config.o3  # np.array([0, 2.5, -2.5])           # obstacle 2 (x,y,z)

            # if just one vehicle
            if config.nVeh == 1:
                obsPFList[0].Po = np.vstack((o1, o2, o3)).transpose()

            # if two vehicles, make sure to avoid other dudeBot
            #if config.nVeh == 2:
            #    obsPFList[0].Po = np.vstack((o1,o2,o3,quadList[1].state[0:3])).transpose()
            #    obsPFList[1].Po = np.vstack((o1,o2,o3,quadList[0].state[0:3])).transpose()

            # if more than one vehicle, make sure to avoid other dudeBot
            for io in range(0, config.nVeh):
                for jo in range(0, config.nVeh - 1):
                    obsPFList[io].Po = np.vstack(
                        (o1, o2, o3,
                         quadList[io - jo].state[0:3])).transpose()

        # Integrate through the dynamics
        # ------------------------------
        for objectIndex in range(0, config.nVeh):

            t[objectIndex] = quad_sim(t[objectIndex], config.Ts,
                                      quadList[objectIndex],
                                      ctrlList[objectIndex], wind,
                                      trajList[objectIndex], fala,
                                      obsPFList[objectIndex], config)
            myDataList[objectIndex].collect(t[objectIndex],
                                            quadList[objectIndex],
                                            trajList[objectIndex],
                                            ctrlList[objectIndex], fala, i)

        i += 1

    end_time = time.time()
    print("Simulated {:.2f}s in {:.6f}s.".format(t[0], end_time - start_time))

    # View Results
    # ---------------------------
    if config.ifsave:

        # make figures
        utils.makeFigures(quadList[0].params, myDataList[0])

        # make animation (this should be generalized later)
        if config.ifsaveplots:
            if config.nVeh == 1:
                ani = sameAxisAnimation(config,
                                        myDataList[0],
                                        trajList[0],
                                        quadList[0].params,
                                        obsPFList[0],
                                        myColour='blue')
            #if config.nVeh == 2:
            #    ani = sameAxisAnimation2(config, myDataList[0], trajList[0], quadList[0].params, myDataList[1], trajList[1], quadList[1].params, obsPFList[0], 'blue', 'green')
            if config.nVeh > 1:
                ani = sameAxisAnimationN(config, myDataList, trajList,
                                         quadList, obsPFList[0], 'blue')

        plt.show()

        # dump the learned parameters
        if config.doLearn:
            np.savetxt("Data/Qtable.csv",
                       fala.Qtable,
                       delimiter=",",
                       header=" ")
            np.savetxt("Data/errors.csv",
                       myDataList[0].falaError_all,
                       delimiter=",",
                       header=" ")

        # save energy draw
        torque0 = myDataList[0].tor_all
        eDraw0 = np.cumsum(np.cumsum(torque0, axis=0), axis=1)[:, 3]
        np.save('Data/energyDepletion_veh0', eDraw0)
        if config.nVeh == 2:
            torque1 = myDataList[1].tor_all
            eDraw1 = np.cumsum(np.cumsum(torque1, axis=0), axis=1)[:, 3]
            np.save('Data/energyDepletion_veh1', eDraw1)

        # save the data
        if config.ifsavedata:
            #save states
            x = myDataList[0].pos_all[:, 0]
            y = myDataList[0].pos_all[:, 1]
            z = myDataList[0].pos_all[:, 2]
            x_sp = myDataList[0].sDes_calc_all[:, 0]
            y_sp = myDataList[0].sDes_calc_all[:, 1]
            z_sp = myDataList[0].sDes_calc_all[:, 2]
            states0 = np.vstack([x, y, z, x_sp, y_sp, z_sp]).transpose()
            np.save('Data/states0', states0)
            if config.nVeh == 2:
                x = myDataList[1].pos_all[:, 0]
                y = myDataList[1].pos_all[:, 1]
                z = myDataList[1].pos_all[:, 2]
                x_sp = myDataList[1].sDes_calc_all[:, 0]
                y_sp = myDataList[1].sDes_calc_all[:, 1]
                z_sp = myDataList[1].sDes_calc_all[:, 2]
                states1 = np.vstack([x, y, z, x_sp, y_sp, z_sp]).transpose()
                np.save('Data/states1', states1)