gui=True,
            record=True)
    elif ARGS.exp.split("-")[1] == 'payloadcoop':
        test_env = PayloadCoop(
            num_drones=NUM_DRONES,
            aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
            obs=OBS,
            act=ACT,
            gui=True,
            record=True)
    else:
        print("[ERROR] environment not yet implemented")
        exit()

    #### Show, record a video, and log the model's performance #
    obs = test_env.reset()
    logger = Logger(logging_freq_hz=int(test_env.SIM_FREQ /
                                        test_env.AGGR_PHY_STEPS),
                    num_drones=NUM_DRONES)
    if ACT in [
            ActionType.ONE_D_RPM, ActionType.ONE_D_DYN, ActionType.ONE_D_PID
    ]:
        action = {i: np.array([0]) for i in range(NUM_DRONES)}
    elif ACT in [
            ActionType.RPM, ActionType.DYN, ActionType.VEL, ActionType.XYZ_YAW
    ]:
        action = {i: np.array([0, 0, 0, 0]) for i in range(NUM_DRONES)}
    elif ACT in [ActionType.PID, ActionType.XY_YAW]:
        action = {i: np.array([0, 0, 0]) for i in range(NUM_DRONES)}
    elif ACT in [ActionType.JOYSTICK]:
        action = {i: 0 for i in range(NUM_DRONES)}
예제 #2
0
                                     record=False
                                     )
 elif ARGS.exp.split("-")[1] == 'meetup':
     test_env = MeetupAviary(num_drones=NUM_DRONES,
                             aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
                             obs=OBS,
                             act=ACT,
                             gui=True,
                             record=True
                             )
 else:
     print("[ERROR] environment not yet implemented")
     exit()
 
 #### Show, record a video, and log the model's performance #
 obs = test_env.reset()
 logger = Logger(logging_freq_hz=int(test_env.SIM_FREQ/test_env.AGGR_PHY_STEPS),
                 num_drones=NUM_DRONES
                 )
 if ACT in [ActionType.ONE_D_RPM, ActionType.ONE_D_DYN, ActionType.ONE_D_PID]:
     action = {i: np.array([0]) for i in range(NUM_DRONES)}
 elif ACT in [ActionType.RPM, ActionType.DYN, ActionType.VEL]:
     action = {i: np.array([0, 0, 0, 0]) for i in range(NUM_DRONES)}
 elif ACT==ActionType.PID:
      action = {i: np.array([0, 0, 0]) for i in range(NUM_DRONES)}
 else:
     print("[ERROR] unknown ActionType")
     exit()
 start = time.time()
 for i in range(6*int(test_env.SIM_FREQ/test_env.AGGR_PHY_STEPS)): # Up to 6''
     #### Deploy the policies ###################################
예제 #3
0
    agent.restore(ARGS.file)

    #### Extract and print policies ####################################################################
    policy0 = agent.get_policy("pol0")
    policy1 = agent.get_policy("pol1")
    print(policy0.model.action_model)
    print(policy0.model.value_model)
    print(policy1.model.action_model)
    print(policy1.model.value_model)

    #### Create test environment ########################################################################
    env = FlockAviary(num_drones=ARGS.num_drones,
                      gui=True,
                      record=True,
                      obstacles=True)
    obs = env.reset()
    action = {i: np.array([0, 0, 0, 0]) for i in range(ARGS.num_drones)}
    start = time.time()
    for i in range(10 * env.SIM_FREQ):

        #### Deploy the policies ###########################################################################
        # print("Debug Obs", obs)
        temp = {}
        temp[0] = policy0.compute_single_action(
            np.hstack([obs[0], obs[1], action[1]]))
        temp[1] = policy1.compute_single_action(
            np.hstack([obs[1], obs[0], action[0]]))
        # print("Debug Act", temp)
        action = {0: temp[0][0], 1: temp[1][0]}
        obs, reward, done, info = env.step(action)
        env.render()