예제 #1
0
    def test_train(self):

        memory_init_size = 20
        num_steps = 1000

        agent = NFSPAgent(num_actions=2,
                          state_shape=[2],
                          hidden_layers_sizes=[10, 10],
                          reservoir_buffer_capacity=50,
                          batch_size=4,
                          min_buffer_size_to_learn=memory_init_size,
                          q_replay_memory_size=50,
                          q_replay_memory_init_size=memory_init_size,
                          q_batch_size=4,
                          q_mlp_layers=[10, 10],
                          device=torch.device('cpu'))

        predicted_action, _ = agent.eval_step({
            'obs':
            np.random.random_sample((2, )),
            'legal_actions': {
                0: None,
                1: None
            },
            'raw_legal_actions': ['call', 'raise']
        })
        self.assertGreaterEqual(predicted_action, 0)
        self.assertLessEqual(predicted_action, 1)

        for _ in range(num_steps):
            agent.sample_episode_policy()
            predicted_action = agent.step({
                'obs': np.random.random_sample((2, )),
                'legal_actions': {
                    0: None,
                    1: None
                }
            })
            self.assertGreaterEqual(predicted_action, 0)
            self.assertLessEqual(predicted_action, 1)

            ts = [{
                'obs': np.random.random_sample((2, )),
                'legal_actions': {
                    0: None,
                    1: None
                }
            },
                  np.random.randint(2), 0, {
                      'obs': np.random.random_sample((2, )),
                      'legal_actions': {
                          0: None,
                          1: None
                      },
                      'raw_legal_actions': ['call', 'raise']
                  }, True]
            agent.feed(ts)
예제 #2
0
파일: test_nfsp.py 프로젝트: qq-ship/NV-Dou
    def test_train(self):

        norm_step = 100
        memory_init_size = 20
        step_num = 1000

        sess = tf.InteractiveSession()
        tf.Variable(0, name='global_step', trainable=False)
        agent = NFSPAgent(sess=sess,
                          scope='nfsp',
                          action_num=2,
                          state_shape=[2],
                          hidden_layers_sizes=[10, 10],
                          reservoir_buffer_capacity=50,
                          batch_size=4,
                          min_buffer_size_to_learn=memory_init_size,
                          q_replay_memory_size=50,
                          q_replay_memory_init_size=memory_init_size,
                          q_batch_size=4,
                          q_norm_step=norm_step,
                          q_mlp_layers=[10, 10])
        sess.run(tf.global_variables_initializer())

        predicted_action = agent.eval_step({
            'obs':
            np.random.random_sample((2, )),
            'legal_actions': [0, 1]
        })
        self.assertGreaterEqual(predicted_action, 0)
        self.assertLessEqual(predicted_action, 1)

        for step in range(step_num):
            agent.sample_episode_policy()
            predicted_action = agent.step({
                'obs': np.random.random_sample((2, )),
                'legal_actions': [0, 1]
            })
            self.assertGreaterEqual(predicted_action, 0)
            self.assertLessEqual(predicted_action, 1)

            ts = [{
                'obs': np.random.random_sample((2, )),
                'legal_actions': [0, 1]
            },
                  np.random.randint(2), 0, {
                      'obs': np.random.random_sample((2, )),
                      'legal_actions': [0, 1]
                  }, True]
            agent.feed(ts)
            if step > norm_step + memory_init_size:
                agent.train_rl()

            agent.train_sl()
        sess.close()
        tf.reset_default_graph()
예제 #3
0
    # Count the number of steps
    step_counters = [0 for _ in range(env.player_num)]

    # Init a Logger to plot the learning curve
    logger = Logger(xlabel='timestep',
                    ylabel='reward',
                    legend='NFSP on Limit Texas Holdem',
                    log_path=log_path,
                    csv_path=csv_path)

    for episode in range(episode_num):

        # First sample a policy for the episode
        for agent in agents:
            agent.sample_episode_policy()

        # Generate data from the environment
        trajectories, _ = env.run(is_training=True)

        # Feed transitions into agent memory, and train the agent
        for i in range(env.player_num):
            for ts in trajectories[i]:
                agents[i].feed(ts)
                step_counters[i] += 1

                # Train the agent
                train_count = step_counters[i] - (memory_init_size + norm_step)
                if train_count > 0 and train_count % 64 == 0:
                    rl_loss = agents[i].train_rl()
                    sl_loss = agents[i].train_sl()
예제 #4
0
def train_mahjong():

    # Make environment
    env = rlcard.make('mahjong', config={'seed': 0})
    eval_env = rlcard.make('mahjong', config={'seed': 0})

    # Set the iterations numbers and how frequently we evaluate the performance
    evaluate_every = 1000
    evaluate_num = 1000
    episode_num = 10000

    # The intial memory size
    memory_init_size = 1000

    # Train the agent every X steps
    train_every = 64

    # The paths for saving the logs and learning curves
    log_dir = './experiments/mahjong_nfsp_result/'

    # Set a global seed
    set_global_seed(0)

    with tf.Session() as sess:

        # Initialize a global step
        global_step = tf.Variable(0, name='global_step', trainable=False)

        # Set up the agents
        agents = []
        for i in range(env.player_num):
            agent = NFSPAgent(sess,
                              scope='nfsp' + str(i),
                              action_num=env.action_num,
                              state_shape=env.state_shape,
                              hidden_layers_sizes=[512, 512],
                              anticipatory_param=0.5,
                              batch_size=256,
                              rl_learning_rate=0.00005,
                              sl_learning_rate=0.00001,
                              min_buffer_size_to_learn=memory_init_size,
                              q_replay_memory_size=int(1e5),
                              q_replay_memory_init_size=memory_init_size,
                              train_every=train_every,
                              q_train_every=train_every,
                              q_batch_size=256,
                              q_mlp_layers=[512, 512])
            agents.append(agent)
        random_agent = RandomAgent(action_num=eval_env.action_num)

        env.set_agents(agents)
        eval_env.set_agents(
            [agents[0], random_agent, random_agent, random_agent])

        # Initialize global variables
        sess.run(tf.global_variables_initializer())

        # Init a Logger to plot the learning curvefrom rlcard.agents.random_agent import RandomAgent

        logger = Logger(log_dir)

        for episode in tqdm(range(episode_num)):

            # First sample a policy for the episode
            for agent in agents:
                agent.sample_episode_policy()

            # Generate data from the environment
            trajectories, _ = env.run(is_training=True)

            # Feed transitions into agent memory, and train the agent
            for i in range(env.player_num):
                for ts in trajectories[i]:
                    agents[i].feed(ts)

            # Evaluate the performance. Play with random agents.
            if episode % evaluate_every == 0:
                logger.log_performance(env.timestep,
                                       tournament(eval_env, evaluate_num)[0])

        # Close files in the logger
        logger.close_files()

        # Plot the learning curve
        logger.plot('NFSP')

        # Save model
        save_dir = 'models/mahjong_nfsp'
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        saver = tf.train.Saver()
        saver.save(sess, os.path.join(save_dir, 'model'))