Esempio n. 1
0
def test_action_encode():
    env = SoccerEnv()
    action1, action2 = 1, 2
    x = env.encode_action(1,2)
    assert (action1, action2) == env.decode_action(x)
# Q errors for plotting
Foe_Q_P1_Q_errors = []

for i_episode in range(n_episodes_MAX):
    state = env.reset()

    P1_Q_ref = Foe_Q_agent.Q[ref_state, ref_P1_action, ref_P2_action]

    for t in range(steps_MAX):
        joint_action = np.random.randint(num_actions)

        # Take action A, observe R, S'
        state_new, reward, done, info = env.step(joint_action)

        # Update Q
        P1_action, P2_action = env.decode_action(joint_action)
        P1_reward, P2_reward = env.decode_reward(state, reward)
        Foe_Q_agent.learn(P1_reward, state, state_new, P1_action, P2_action)

        state = state_new

        if done:
            # if verbose:
            #     print("Episode finished after {} timesteps".format(t + 1))
            break

    # calc error at end of episode update
    Foe_Q_P1_Q_errors.append(
        np.abs(Foe_Q_agent.Q[ref_state, ref_P1_action, ref_P2_action] -
               P1_Q_ref))