-
Notifications
You must be signed in to change notification settings - Fork 0
/
test.py
65 lines (55 loc) · 1.77 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import parl
from parl.utils import logger
import cv2
import numpy as np
from flappy_bird_model import FlappyBirdModel
from flappy_bird_agent import FlappyBirdAgent
from ple.games.flappybird import FlappyBird
from ple import PLE
LEARNING_RATE = 0.001
GAMMA = 0.99
game = FlappyBird()
env = PLE(game, fps=30, display_screen=False)
action_dim = len(env.getActionSet())
obs_shape = len(env.getGameState())
model = FlappyBirdModel(act_dim=action_dim)
algorithm = parl.algorithms.DQN(model, act_dim=action_dim, gamma=GAMMA, lr=LEARNING_RATE)
agent = FlappyBirdAgent(
algorithm,
obs_dim=obs_shape,
act_dim=action_dim,
e_greed=0.2,
e_greed_decrement=1e-6
)
# load model
save_path = './fb_dqn_model.ckpt'
agent.restore(save_path)
# evaluate agent, total reward is the mean of 5 episodes
def evaluate(agent):
eval_reward = []
for i in range(5):
env.init()
obs = list(env.getGameState().values())
episode_reward = 0
while True:
# display the score
score = int(env.score())
picture = env.getScreenRGB()
picture = cv2.transpose(picture)
font = cv2.FONT_HERSHEY_TRIPLEX
picture = cv2.putText(picture, str(score), (0, 25), font, 1, (255, 0, 0), 2)
cv2.imshow("flappy_bird", picture)
action = agent.predict(obs)
reward = env.act(env.getActionSet()[action])
obs = list(env.getGameState().values())
done = env.game_over()
episode_reward += reward
if done:
env.reset_game()
break
eval_reward.append(episode_reward)
cv2.destroyAllWindows()
return np.mean(eval_reward)
# It's show time!
eval_reward = evaluate(agent)
logger.info(f'test_reward:{eval_reward}')