# Setup agents  #
#################
players[0] = DQNAgent(
    player_num=0,
    map_name=map_name,
    train=False,
    network_save_name=None,
    network_load_name=None,
)
names[0] = "DQN Agent"

# Create an array of all agents that could be used during training
opposing_agents = [
    {
        'name': 'Random Agent Delay',
        'agent': random_actions_delay(env.num_actions_per_turn, 1, map_name)
    },
    {
        'name': 'Random Agent',
        'agent': random_actions(env.num_actions_per_turn, 1, map_name)
    },
    {
        'name': 'Bull Rush',
        'agent': bull_rush(env.num_actions_per_turn, 1)
    },
    {
        'name': 'All Cycle',
        'agent': all_cycle(env.num_actions_per_turn, 1)
    },
    {
        'name': 'Base Rush v1',
env = gym.make('everglades-v0')
players = {}
names = {}

#################
# Setup agents  #
#################
players[0] = DQNAgent(
    player_num=0,
    map_name=map_name,
    train=TRAIN,
    network_save_name='./agents/Smart_State/saved_models/local',
    network_load_name=None,
)
names[0] = "DQN Agent"
players[1] = random_actions_delay(env.num_actions_per_turn, 1, map_name)
names[1] = 'Random Agent Delay'
#################

actions = {}

## Set high episode to test convergence
# Change back to resonable setting for other testing
n_episodes = 2500

#########################
# Statistic variables   #
#########################
k = 100
stats = AgentStatistics(names[0],
                        n_episodes,