def restart(event): global games_played global done global currently_passing # Increment game counter games_played += 1 done = False # Generate new initial coordinates and reset agent env.reset() # Clear figure plt.clf() # Plot initial point draw.pitch() plt.scatter(env.x * 100, env.y * 100, color='C1') # Additional information and stats to the player plt.text(0, 105, rules_text, color='black') reward_text = 'Games Played: ' + str( games_played) + '\nTotal Rewards: ' + str(round( rewards_cumulative, 3)) + '\nRewards pg: ' + str( round(rewards_cumulative / games_played, 3)) plt.text(100, 105, reward_text, color='black', horizontalalignment='right') # Save frame for update plt.savefig(img_dir) # Update frame update_screen()
def update_screen(): global team_1_players global team_2_players plt.figure() draw.pitch() for coordinates in team_1_players: plt.scatter(coordinates[0], coordinates[1], s=100, color='navy', edgecolors='navy', alpha=0.7, linewidths=1, zorder=100) for coordinates in team_2_players: plt.scatter(coordinates[0], coordinates[1], s=100, color='firebrick', edgecolors='firebrick', alpha=0.7, linewidth=1, zorder=100) plt.savefig('_tmp/b.png') plt.clf() img = ImageTk.PhotoImage(Image.open('_tmp/b.png')) panel.configure(image=img) panel.image = img
def draw_play(agent, dpi=144, directory='__default', plot_now=False): draw.pitch(dpi=dpi) for action in range(len(agent.actions)): draw_actions(agent, action) if plot_now: return plt.savefig('img/' + directory + '.png') plt.clf()
def draw_play_from_agent_set(agent_set): draw.pitch() max_len = 0 for agent in agent_set: if len(agent.actions) > max_len: max_len = len(agent.actions) for action in range(max_len): draw_actions_for_set(agent_set, action) plt.show()
def draw_play_from_agent_set(agent_set, dpi=144, plot_now=True, directory='a.png'): draw.pitch(dpi=dpi) max_len = 0 for agent in agent_set: if len(agent.actions) > max_len: max_len = len(agent.actions) for action in range(max_len): draw_actions_for_set(agent_set, action) if plot_now: return plt.savefig(directory, format='png') plt.clf()
def draw_play(agent, dpi=144, directory='__default', plot_now=False, current_action_color='C0'): draw.pitch(dpi=dpi) for action in range(len(agent.actions)): draw_actions(agent, action) if not agent.end: plt.scatter(agent.x * 100, agent.y * 100, color=current_action_color, zorder=100) if plot_now: return plt.savefig('img/' + directory + '.png') plt.clf()
def plot_team(self, dpi=120): draw.pitch(dpi=dpi) attacking_team = [(player.x, player.y, 0) for player in self.players] for i, player in enumerate(attacking_team): plt.scatter(player[0], player[1], s=75, color='blue', edgecolors='navy', linewidths=1, zorder=10) plt.text(player[0], player[1], i, zorder=11, fontsize='xx-small', color='white', ha='center', va='center') plt.scatter(attacking_team[0][0] + 1, attacking_team[0][1], s=48, color='black', zorder=11) plt.scatter(attacking_team[0][0] + 1, attacking_team[0][1], s=48, marker='*', color='white', zorder=11) plt.scatter(attacking_team[0][0] + 1, attacking_team[0][1], s=32, marker='+', color='black', zorder=11)
def plot_result(self, proposed_team, save_fig_dir='', dpi=120): attacking_team = [(player.x, player.y, 0) for player in self.players] defensive_team = [(player.x, player.y, 1) for player in proposed_team.players] draw.pitch(dpi=dpi) # Draw initial state for i, player in enumerate(attacking_team): plt.scatter(player[0], player[1], s=75, color='blue', edgecolors='navy', linewidths=1, zorder=10) plt.text(player[0], player[1], i, zorder=11, fontsize='xx-small', color='white', ha='center', va='center') for i, player in enumerate(defensive_team): plt.scatter(player[0], player[1], s=75, color='orange', edgecolors='C1', linewidths=1, zorder=10) plt.text(player[0], player[1], i, zorder=11, fontsize='xx-small', color='white', ha='center', va='center') # Draw all passing lines for passline in self.passlines: passline.plot(color='tomato') # Draw all goal-defense lines for goalline in self.goallines: goalline.plot(color='green') # Distance between players #plt.plot([defensive_team[3][0], attacking_team[8][0]], [defensive_team[3][1], attacking_team[8][1]], color = 'C0') #plt.plot([defensive_team[4][0], attacking_team[3][0]], [defensive_team[4][1], attacking_team[3][1]], color = 'C0') #plt.plot([defensive_team[9][0], attacking_team[2][0]], [defensive_team[9][1], attacking_team[2][1]], color = 'C0') #plt.plot([defensive_team[5][0], attacking_team[7][0]], [defensive_team[5][1], attacking_team[7][1]], color = 'C0') #plt.plot([defensive_team[1][0], attacking_team[9][0]], [defensive_team[1][1], attacking_team[9][1]], color = 'C0') #plt.plot([defensive_team[2][0], attacking_team[10][0]], [defensive_team[2][1], attacking_team[10][1]], color = 'C0') #plt.plot([defensive_team[6][0], attacking_team[6][0]], [defensive_team[6][1], attacking_team[6][1]], color = 'C0') #plt.plot([defensive_team[7][0], attacking_team[4][0]], [defensive_team[7][1], attacking_team[4][1]], color = 'C0') #plt.plot([defensive_team[8][0], attacking_team[5][0]], [defensive_team[8][1], attacking_team[5][1]], color = 'C0') #plt.plot([defensive_team[8][0], attacking_team[1][0]], [defensive_team[8][1], attacking_team[1][1]], color = 'C0') # Pitch control print_df = pd.DataFrame([[i, j / (120 / 75)] for i in range(101) for j in range(101)], columns=['x', 'y']) df = pd.DataFrame(attacking_team + defensive_team, columns=['x', 'y', 'team']) df.y /= 120 / 75 clf = KNeighborsClassifier(n_neighbors=1) clf.fit(df[['x', 'y']], df.team) print_df['team'] = clf.predict(print_df[['x', 'y']]) print_df.y *= 120 / 75 print_df_t1 = print_df[print_df.team == 0] plt.scatter(print_df_t1['x'], print_df_t1['y'], 3, c='C0', marker='s', alpha=0.5) print_df_t2 = print_df[print_df.team == 1] plt.scatter(print_df_t2['x'], print_df_t2['y'], 3, c='C1', marker='s', alpha=0.5) if save_fig_dir != '': plt.savefig(save_fig_dir, bbox_inches='tight', dpi=dpi) plt.clf() else: plt.show() return
# Register process every TEST_ITERS iterations if iteration % TEST_ITERS == 0: rewards, steps = test_net(act_net, device=device) print("%d, reward %.5f, steps %d" % (iteration, rewards, steps)) iteration_stats.append([iteration, rewards, steps]) pd.DataFrame(iteration_stats).to_csv('tmp/log' + ATTEMPT + '.csv') if (best_reward is None or best_reward < rewards) and ENABLE_VIZ: # Progress visualizer viz_df = pd.DataFrame([[i, j] for i in np.arange(0, 1.01, 0.01) for j in np.arange(0, 1, 0.015)], columns = ['x','y']) viz_df[['Shot', 'Pass', 'r', 'a']] = act_net(torch.FloatTensor(viz_df[['x','y']].to_numpy())).cpu().detach().numpy() viz_df['x'] *= 100 viz_df['y'] *= 100 ## Discrete action decisons draw.pitch() shot_action = viz_df[viz_df.Shot > viz_df.Pass] plt.scatter(shot_action.x, shot_action.y, s = 15, c = 'C0', alpha = 0.5, marker = 's', linewidth=0, zorder = 10) pass_action = viz_df[viz_df.Shot < viz_df.Pass] plt.scatter(pass_action.x, pass_action.y, s = 15, c = 'C1', alpha = 0.5, marker = 's', linewidth=0, zorder = 10) plt.savefig('img/decision_maps_' + ATTEMPT + '/passshotmap_' + str(iteration) + '.png', dpi = 900) plt.clf() ## Continuous action decisions draw.pitch() for i, row in viz_df.iterrows(): if i % 3 == 0: plt.arrow(row['x'], row['y'], row.r/abs(row.r) * np.cos((row['a'] - 0.5) * 2 * np.pi), row.r/abs(row.r) * np.sin((row['a'] - 0.5) * 2 * np.pi), length_includes_head = True, head_width = .5, head_length = .5, color = 'white') viz_df['i'] = [i for i in range(len(viz_df))] plt.scatter(viz_df[(viz_df.i % 3) == 0].x, viz_df[(viz_df.i % 3) == 0].y, s=(viz_df[(viz_df.i % 3) == 0].r + 1) ** 2, zorder = 99) plt.savefig('img/decision_maps_' + ATTEMPT + '/orientationmap_' + str(iteration) + '.png', dpi = 900)