Esempio n. 1
0
 def test_tournament(self):
     env = rlcard.make('leduc-holdem')
     env.set_agents(
         [RandomAgent(env.action_num),
          RandomAgent(env.action_num)])
     payoffs = tournament(env, 1000)
     self.assertEqual(len(payoffs), 2)
Esempio n. 2
0
def eval_agents(agent1_name, agent1, agent2_name, agent2):
    print('\n' + agent1_name + ' vs ' + agent2_name)
    env = rlcard.make('leduc-holdem')
    env.set_agents([agent1, agent2])
    reward_1, reward_2 = tournament(env, evaluate_num)
    print('Reward ' + agent1_name + ': ', reward_1)
    print('Reward ' + agent2_name + ': ', reward_2)
    return reward_1, reward_2
Esempio n. 3
0
# The paths for saving the logs and learning curves
log_dir = './experiments/leduc_holdem_cfr_result/'

# Set a global seed
set_global_seed(0)

# Initilize CFR Agent
agent = CFRAgent(env)
agent.load()  # If we have saved model, we first load the model

# Evaluate CFR against pre-trained NFSP
eval_env.set_agents([agent, models.load('leduc-holdem-nfsp').agents[0]])

# Init a Logger to plot the learning curve
logger = Logger(log_dir)

for episode in range(episode_num):
    agent.train()
    print('\rIteration {}'.format(episode), end='')
    # Evaluate the performance. Play with NFSP agents.
    if episode % evaluate_every == 0:
        agent.save()  # Save model
        logger.log_performance(env.timestep,
                               tournament(eval_env, evaluate_num)[0])

# Close files in the logger
logger.close_files()

# Plot the learning curve
logger.plot('CFR')
Esempio n. 4
0
    # Initialize a Logger to plot the learning curve
    logger = Logger(log_dir)

    for episode in range(episode_num):

        # Generate data from the environment
        trajectories, _ = env.run(is_training=True)

        # Feed transitions into agent memory, and train the agent
        for ts in trajectories[0]:
            agent.feed(ts)

        # Evaluate the performance. Play with random agents.
        if episode % evaluate_every == 0:
            logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])

    # Close files in the logger
    logger.close_files()

    # Plot the learning curve
    logger.plot('DQN')
    
    # Save model
    save_dir = 'models/blackjack_dqn'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    saver = tf.train.Saver()
    saver.save(sess, os.path.join(save_dir, 'model'))
    
''' Another example of loading a pre-trained NFSP model on Leduc Hold'em
    Here, we directly load the model from model zoo
'''
import rlcard
from rlcard.agents.random_agent import RandomAgent
from rlcard.utils.utils import set_global_seed, tournament
from rlcard import models

# Make environment
env = rlcard.make('leduc-holdem')

# Set a global seed
set_global_seed(0)

# Here we directly load NFSP models from /models module
nfsp_agents = models.load('leduc-holdem-nfsp').agents

# Evaluate the performance. Play with random agents.
evaluate_num = 10000
random_agent = RandomAgent(env.action_num)
env.set_agents([nfsp_agents[0], random_agent])
reward = tournament(env, evaluate_num)[0]
print('Average reward against random agent: ', reward)

Esempio n. 6
0
def train_mahjong():

    # Make environment
    env = rlcard.make('mahjong', config={'seed': 0})
    eval_env = rlcard.make('mahjong', config={'seed': 0})

    # Set the iterations numbers and how frequently we evaluate the performance
    evaluate_every = 1000
    evaluate_num = 1000
    episode_num = 10000

    # The intial memory size
    memory_init_size = 1000

    # Train the agent every X steps
    train_every = 64

    # The paths for saving the logs and learning curves
    log_dir = './experiments/mahjong_nfsp_result/'

    # Set a global seed
    set_global_seed(0)

    with tf.Session() as sess:

        # Initialize a global step
        global_step = tf.Variable(0, name='global_step', trainable=False)

        # Set up the agents
        agents = []
        for i in range(env.player_num):
            agent = NFSPAgent(sess,
                              scope='nfsp' + str(i),
                              action_num=env.action_num,
                              state_shape=env.state_shape,
                              hidden_layers_sizes=[512, 512],
                              anticipatory_param=0.5,
                              batch_size=256,
                              rl_learning_rate=0.00005,
                              sl_learning_rate=0.00001,
                              min_buffer_size_to_learn=memory_init_size,
                              q_replay_memory_size=int(1e5),
                              q_replay_memory_init_size=memory_init_size,
                              train_every=train_every,
                              q_train_every=train_every,
                              q_batch_size=256,
                              q_mlp_layers=[512, 512])
            agents.append(agent)
        random_agent = RandomAgent(action_num=eval_env.action_num)

        env.set_agents(agents)
        eval_env.set_agents(
            [agents[0], random_agent, random_agent, random_agent])

        # Initialize global variables
        sess.run(tf.global_variables_initializer())

        # Init a Logger to plot the learning curvefrom rlcard.agents.random_agent import RandomAgent

        logger = Logger(log_dir)

        for episode in tqdm(range(episode_num)):

            # First sample a policy for the episode
            for agent in agents:
                agent.sample_episode_policy()

            # Generate data from the environment
            trajectories, _ = env.run(is_training=True)

            # Feed transitions into agent memory, and train the agent
            for i in range(env.player_num):
                for ts in trajectories[i]:
                    agents[i].feed(ts)

            # Evaluate the performance. Play with random agents.
            if episode % evaluate_every == 0:
                logger.log_performance(env.timestep,
                                       tournament(eval_env, evaluate_num)[0])

        # Close files in the logger
        logger.close_files()

        # Plot the learning curve
        logger.plot('NFSP')

        # Save model
        save_dir = 'models/mahjong_nfsp'
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        saver = tf.train.Saver()
        saver.save(sess, os.path.join(save_dir, 'model'))
Esempio n. 7
0
with tf.Session() as sess:

    # Initialize a global step
    global_step = tf.Variable(0, name='global_step', trainable=False)

    # Set up the agents
    agent = DQNAgent(sess,
                     scope='dqn',
                     action_num=env.action_num,
                     replay_memory_size=20000,
                     replay_memory_init_size=memory_init_size,
                     train_every=train_every,
                     state_shape=env.state_shape,
                     mlp_layers=[60, 60, 60, 60, 60],
                     batch_size=512)

    saver = tf.train.Saver()
    random_agent = RandomAgent(action_num=eval_env.action_num)
    env.set_agents([agent, random_agent])
    eval_env.set_agents([agent, random_agent])

    # Initialize global variables
    sess.run(tf.global_variables_initializer())

    saver.restore(sess, "models/uno_dqn5/model")


    print(tournament(eval_env, 10000))
    
    
Esempio n. 8
0
        # Init a Logger to plot the learning curve
        logger = Logger(log_dir)

        for episode in range(episode_num):
            print('Episode: ' + str(episode))

            # Generate data from the environment
            trajectories, _ = env.run(is_training=True)

            # Feed transitions into agent memory, and train the agent
            for ts in trajectories[0]:
                agent.feed(ts)

            # Evaluate the performance. Play with random agents.
            if episode % evaluate_every == 0:
                a, b = env.timestep, tournament(eval_env, evaluate_num)[0]
                logger.log_performance(a, b)
                csvw.writerow([a,b])
                f.flush()
                
        # Close files in the logger
        logger.close_files()

        # Plot the learning curve
        logger.plot('DQN')
        saver.save(sess, os.path.join(save_dir, 'model_FINAL'))

    

    
    
Esempio n. 9
0
                 state_shape=env.state_shape,
                 batch_size=64,
                 mlp_layers=[64])

env.set_agents([
    agent,
    RandomAgent(action_num=env.action_num),
    RandomAgent(action_num=env.action_num),
    RandomAgent(action_num=env.action_num)
])

eval_env.set_agents([
    agent,
    RandomAgent(action_num=env.action_num),
    RandomAgent(action_num=env.action_num),
    RandomAgent(action_num=env.action_num)
])

logger = Logger('.')
for episode in range(100000):
    # Generate data from the environment
    trajectories, _ = env.run(is_training=True)
    # Feed transitions into agent memory, and train the agent
    for ts in trajectories[0]:
        agent.feed(ts)
    # Evaluate the performance. Play with random agents.
    if episode % 5000 == 0:
        logger.log_performance(env.timestep, tournament(eval_env, 10000)[0])

logger.close_files()
logger.plot('DQN')