Example #1
0
def collect_stats(agent: DQNAgent, n_games=1000):
    MAX_STEPS = 1000
    lenghts = []
    looped = 0
    for i in range(1, n_games+1):
        env = gym.make('snake-v0')
        # env.__init__(human_mode=False)
        observation = env.reset()
        done = False
        steps = 0
        agent.epsilon = 0.0
        state = agent.get_last_observations(observation)
        while not done and steps < MAX_STEPS:
            action = agent.act(state)
            next_observation, _, done, _ = env.step(action)
            state = agent.get_last_observations(next_observation)
            steps += 1

        if steps == MAX_STEPS:
            looped += 1
        else:
            lenghts.append(len(env.game.snake.body))

        if i % (n_games//10) == 0:
            print(f"Avg len: {sum(lenghts) / len(lenghts):.2f}, looped {looped}/{i}")
Example #2
0
def watch_agent(agent: DQNAgent):
    env = gym.make('snake-v0')
    env.__init__(human_mode=True)
    observation = env.reset()
    renderer=Renderer(env.game)
    try:
        done = False
        steps = 0
        agent.epsilon = 0
        state = agent.get_last_observations(observation)
        while not done:
            # time.sleep(0.001)
            renderer.render_frame()
            action = agent.act(state)
            next_observation, _, done, _ = env.step(action)
            state = agent.get_last_observations(next_observation)
            steps += 1
    finally:
        renderer.close_window()
    print(f"Snake length: {len(env.game.snake.body)}")
    print(f"Simulation ended after {steps} steps.")
Example #3
0
    target_network = DQNAgent(state_shape, n_actions).to(device)
    target_network.load_state_dict(agent.state_dict())
    opt = torch.optim.Adam(agent.parameters(), lr=1e-4)
    exp_replay = ReplayBuffer(buffer_size)

    print('test_buffer')
    for i in range(100):
        play_and_record(state, agent, env, exp_replay, n_steps=10**2)
        if len(exp_replay) == buffer_size:
            break
    print(len(exp_replay))

    state = env.reset()
    for step in trange(step, total_steps + 1):

        agent.epsilon = linear_decay(init_epsilon, final_epsilon, step,
                                     decay_steps)

        # play
        _, state = play_and_record(state, agent, env, exp_replay,
                                   timesteps_per_epoch)

        # train
        obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(
            batch_size)

        loss = compute_td_loss(obs_batch,
                               act_batch,
                               reward_batch,
                               next_obs_batch,
                               is_done_batch,
                               agent,