def test_grid_value_iteration(): pygame.init() w = 6 h = 5 rewards = ((24, 1), (2, -1), (11, -1), (27, -1)) terminal = [2, 11, 24] new_pos = {"top": 2, "bot": 3, "left": 0, "right": 1} S, A, T, P = create_grid_world(w, h, rewards, terminal) start_time = time() V, Pi = value_iteration(S, A, P, T) print("--- %s seconds ---" % (time() - start_time)) win = pygame.display.set_mode((w * 100, h * 100)) for i in range(w * h): if i % w == 0 and i != 0: print("") print(round(V[i], 7), end=" ") print("") for i in range(w * h): if i % w == 0 and i != 0: print("") print(Pi[i], end=" ") st = reset_grid(w, h) while not is_terminal(st, T): display_grid(win, w, h) event_loop() display_reward_grid(win, rewards, w, h) display_mouse_grid(win, st, w, h) sleep(1) positions = {"top": st - w, "bot": st + w, "left": st - 1, "right": st + 1 } positions_bis = {key: Pi[st][new_pos[key]] for key, value in positions.items() if 0 <= value < w * h and Pi[st][new_pos[key]] > 0} action = max(positions_bis, key=positions_bis.get) a = new_pos[action] st, r, term = step(st, a, T, S, P) display_grid(win, w, h) display_reward_grid(win, rewards, w, h) display_mouse_grid(win, st, w, h)
def test_grid_q_learning(): pygame.init() w = 6 h = 5 rewards = ((24, 1), (2, -1), (11, -1), (27, -1)) terminal = [2, 11, 24] new_pos = {"top": 2, "bot": 3, "left": 0, "right": 1} S, A, T, P = create_grid_world(w, h, rewards, terminal) start_time = time() Q, Pi = tabular_q_learning_control(T, S, P, len(S), len(A), reset_line, is_terminal, step, episodes_count=10000, max_steps_per_episode=100) print("--- %s seconds ---" % (time() - start_time)) win = pygame.display.set_mode((w * 100, h * 100)) for i in range(w * h): if i % w == 0 and i != 0: print("") print(Pi[i], end=" ") st = reset_grid(w, h) while not is_terminal(st, T): display_grid(win, w, h) event_loop() display_reward_grid(win, rewards, w, h) display_mouse_grid(win, st, w, h) sleep(1) positions = {"top": st - w, "bot": st + w, "left": st - 1, "right": st + 1 } a = np.argmax(Q[st]) st, r, term = step(st, a, T, S, P) display_grid(win, w, h) display_reward_grid(win, rewards, w, h) display_mouse_grid(win, st, w, h)
def test_line_iterative_policy_evaluation(): pygame.init() num_states = 15 rewards = ((0, -1), (14, 1)) terminal = [0, 14] S, A, T, P = create_line_world(num_states, rewards, terminal) Pi = tabular_uniform_random_policy(S.shape[0], A.shape[0]) start_time = time() V = iterative_policy_evaluation(S, A, P, T, Pi) print("--- %s seconds ---" % (time() - start_time)) print(V) win = pygame.display.set_mode((num_states * 100, 100)) st = reset_line(num_states) while not is_terminal(st, T): display_line(win, num_states) event_loop() display_reward_line(win, rewards, num_states) display_mouse_line(win, st, num_states) sleep(1) if V[st + 1] > V[st - 1] or V[st + 1] == 0: a = 1 elif V[st + 1] < V[st - 1] or V[st - 1] == 0: a = 0 st, r, term = step(st, a, T, S, P) display_line(win, num_states) display_reward_line(win, rewards,num_states) display_mouse_line(win, st, num_states) sleep(1)
def test_line_sarsa(): pygame.init() num_states = 15 rewards = ((0, -1), (14, 1)) terminal = [0, 14] S, A, T, P = create_line_world(num_states, rewards, terminal) start_time = time() Q, Pi = tabular_sarsa_control(T, S, P, len(S), len(A), reset_line, is_terminal, step, episodes_count=10000, max_steps_per_episode=100) print("--- %s seconds ---" % (time() - start_time)) for i in range(num_states): print(Q[i], end=" ") win = pygame.display.set_mode((num_states * 100, 100)) st = reset_line(num_states) while not is_terminal(st, T): display_line(win, num_states) event_loop() display_reward_line(win, rewards, num_states) display_mouse_line(win, st, num_states) sleep(1) a = np.argmax(Q[st]) st, r, term = step(st, a, T, S, P) display_line(win, num_states) display_reward_line(win, rewards,num_states) display_mouse_line(win, st, num_states) sleep(1)