def __init__(self): super().__init__() self.wins = 0 self.losses = 0 ''' Instantiate agent. ''' # Setup RL NFSP agent # Set the iterations numbers and how frequently we evaluate/save plot evaluate_every = 10000 evaluate_num = 10000 episode_num = 100000 # The intial memory size memory_init_size = 1000 # Train the agent every X steps train_every = 64 # The paths for saving the logs and learning curves log_dir = './training/nfsp/' # Set a global seed set_global_seed(0) # Set agent - TODO - determine PPE parameters self.agent = NFSPAgent(scope='nfsp', action_num=3, state_shape=54, hidden_layers_sizes=[512, 512], min_buffer_size_to_learn=memory_init_size, q_replay_memory_init_size=memory_init_size, train_every=train_every, q_train_every=train_every, q_mlp_layers=[512, 512], device=torch.device('cpu')) # Init a Logger to plot the learning curve self.logger = Logger(log_dir)
def main(): parser = createParser() namespace = parser.parse_args(sys.argv[1:]) #random seed random_seed = namespace.random_seed #names env_name = namespace.env_name # Set the iterations numbers and how frequently we evaluate/save plot evaluate_num = namespace.evaluate_num # Make environment eval_env = rlcard.make(env_name, config={'seed': random_seed}) # Set a global seed set_global_seed(random_seed) # Initialize a global step global_step = tf.Variable(0, name='global_step', trainable=False) # Set up the agents agent0 = getAgent(namespace.agent_type0, eval_env) agent1 = getAgent(namespace.agent_type1, eval_env) if namespace.load_model0 is not None: agent0.load_model(namespace.load_model0) if namespace.load_model1 is not None: agent1.load_model(namespace.load_model1) eval_env.set_agents([agent0, agent1]) # Evaluate the performance. Play with random agents. rewards = tournament(eval_env, evaluate_num) print('Average reward for agent0 against agent1: ', rewards[0])
def main(): warnings.simplefilter(action='ignore', category=FutureWarning) set_global_seed(0) env = rlcard.make('limit-holdem', config={'record_action': True}) human_agent = HumanAgent(env.action_num) dqn_agent = DQNAgent(env.action_num, env.state_shape[0], hidden_neurons=[1024, 512, 1024, 512]) dqn_agent.load(sys.argv[1]) env.set_agents([human_agent, dqn_agent]) play(env)
def train_leduc(): # Make environment and enable human mode env = rlcard.make('leduc-holdem', config={ 'seed': 0, 'allow_step_back': True }) eval_env = rlcard.make('leduc-holdem', config={'seed': 0}) # Set the iterations numbers and how frequently we evaluate the performance and save model evaluate_every = 100 save_plot_every = 1000 evaluate_num = 10000 episode_num = 10000 # The paths for saving the logs and learning curves log_dir = './experiments/leduc_holdem_oscfr_result/' # Set a global seed set_global_seed(0) # Initilize CFR Agent model_path = 'models/leduc_holdem_oscfr' agent = OutcomeSampling_CFR(env, model_path=model_path) agent.load() # If we have saved model, we first load the model # Evaluate CFR against pre-trained NFSP eval_env.set_agents([agent, models.load('leduc-holdem-nfsp').agents[0]]) # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): agent.train() print('\rIteration {}'.format(episode), end='') # Evaluate the performance. Play with NFSP agents. if episode % evaluate_every == 0: agent.save() # Save model logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0]) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot('OSCFR')
def run(path: str, num: int, position: int, opponent: str): # Set a global seed set_global_seed(123) env = make('thousand-schnapsen', config={ 'seed': 0, 'force_zero_sum': True }) agents = [] for _ in range(env.player_num): agent = RandomAgent(action_num=env.action_num) agents.append(agent) graph = tf.Graph() sess = tf.Session(graph=graph) with graph.as_default(): agent = DeepCFR(sess, scope=f'deep_cfr{position}', env=env, policy_network_layers=(8 * 24, 4 * 24, 2 * 24, 24), advantage_network_layers=(8 * 24, 4 * 24, 2 * 24, 24)) if opponent == 'deep_cfr': agents[0] = agent agents[1] = agent agents[2] = agent else: agents[position] = agent with sess.as_default(): with graph.as_default(): saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint(path)) env.set_agents(agents) _, wins = tournament(env, num) print(wins)
def main(): # Make environment env = rlcard.make('leduc-holdem', config={'seed': 0, 'env_num': 4}) iterations = 1 # Set a global seed set_global_seed(0) # Set up agents agent = RandomAgent(action_num=env.action_num) env.set_agents([agent, agent]) for it in range(iterations): # Generate data from the environment trajectories, payoffs = env.run(is_training=False) # Print out the trajectories print('\nIteration {}'.format(it)) for ts in trajectories[0]: print( 'State: {}, Action: {}, Reward: {}, Next State: {}, Done: {}'. format(ts[0], ts[1], ts[2], ts[3], ts[4]))
# Set the iterations numbers and how frequently we evaluate the performance evaluate_every = 100 evaluate_num = 1000 episode_num = 100000 # The intial memory size memory_init_size = 1000 # Train the agent every X steps train_every = 1 # The paths for saving the logs and learning curves log_dir = './experiments/doudizhu_dqn_result/' # Set a global seed set_global_seed(0) # Mitigation for gpu memory issue # config = tf.ConfigProto() # config.gpu_options.allow_growth = True # config.gpu_options.per_process_gpu_memory_fraction = 0.9 # with tf.Session(config=config) as sess: with tf.Session() as sess: # Initialize a global step global_step = tf.Variable(0, name='global_step', trainable=False) # Set up the agents agent = DQNAgent(sess, scope='dqn',
eval_env2 = rlcard.make('no-limit-holdem', config={ 'seed': 43, 'game_player_num': 2 }) #eval_env3 = rlcard.make('no-limit-holdem', config={'seed': 43, 'game_player_num': 2}) # Set the iterations numbers and how frequently we evaluate the performance # The intial memory size memory_init_size = 300 # The paths for saving the logs and learning curves log_dir = './experiments/nolimit_holdem_nfsp_result/ivvan' # Set a global seed set_global_seed(577) evaluate_every = 512 evaluate_num = 64 episode_num = 20480 # The intial memory size memory_init_size = 256 # Train the agent every X steps train_every = 256 agents = [] agents.append( NFSPAgent(scope='nfsp' + str(0), action_num=env.action_num,
def main(): parser = createParser() namespace = parser.parse_args(sys.argv[1:]) #random seed random_seed = namespace.random_seed #names env_name = namespace.env_name env_num = 1 test_name = namespace.test_name dir_name = str(env_name)+'_a2c_'+str(test_name)+str(random_seed) # Set the iterations numbers and how frequently we evaluate/save plot evaluate_every = namespace.evaluate_every evaluate_num = namespace.evaluate_num episode_num = namespace.episode_num # Train the agent every X steps train_every = namespace.train_every save_every = namespace.save_every # Make environment env_rand = rlcard.make(env_name, config={'seed': random_seed}) eval_env = rlcard.make(env_name, config={'seed': random_seed}) # The paths for saving the logs and learning curves log_dir = './experiments/rl/'+dir_name+'_result' # Save model save_dir = 'models/rl/'+dir_name+'_result' # Set a global seed set_global_seed(random_seed) # Initialize a global step global_step = tf.Variable(0, name='global_step', trainable=False) # Set up the agents agent_rand = RandomAgent(action_num=eval_env.action_num) agent_test = A2CLSTMQPGAgent( action_num=eval_env.action_num, state_shape=eval_env.state_shape, discount_factor=0.95, critic_lstm_layers=[1,512], critic_mlp_layers=[3,512], critic_activation_func='tanh', critic_kernel_initializer='glorot_uniform', critic_learning_rate=0.001, critic_bacth_size=128, actor_lstm_layers=[1,512], actor_mlp_layers=[3,512], actor_activation_func='tanh', actor_kernel_initializer='glorot_uniform', actor_learning_rate=0.0001, actor_bacth_size=512, entropy_coef=0.5, entropy_decoy=math.pow(0.1/0.5, 1.0/(episode_num//train_every)), max_grad_norm = 1,) if namespace.load_model is not None: agent_test.load_model(namespace.load_model) env_rand.set_agents([agent_test, agent_rand]) eval_env.set_agents([agent_test, agent_rand]) # Init a Logger to plot the learning curve logger = Logger(log_dir+'/'+test_name) envs = [env_rand, ] env_num = len(envs) for episode in range(episode_num // env_num): # Generate data from the for env in envs: trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent_test.feed(ts) if episode % (train_every // env_num) == 0: agent_test.train() if episode % (save_every // env_num) == 0 : # Save model if not os.path.exists(save_dir+'/'+test_name+str(episode*env_num)): os.makedirs(save_dir+'/'+test_name+str(episode*env_num)) agent_test.save_model(save_dir+'/'+test_name+str(episode*env_num)) # Evaluate the performance. Play with random agents. if episode % (evaluate_every // env_num) == 0: print('episode: ', episode*env_num) logger.log_performance(episode*env_num, tournament(eval_env, evaluate_num)[0]) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot(dir_name) # Save model if not os.path.exists(save_dir+'/'+test_name+str(episode_num)): os.makedirs(save_dir+'/'+test_name+str(episode_num)) agent_test.save_model(save_dir+'/'+test_name+str(episode_num))
def main(): # Make environment env = rlcard.make('blackjack', config={'env_num': 4, 'seed': 0}) eval_env = rlcard.make('blackjack', config={'env_num': 4, 'seed': 0}) # Set the iterations numbers and how frequently we evaluate performance evaluate_every = 100 evaluate_num = 10000 iteration_num = 100000 # The intial memory size memory_init_size = 100 # Train the agent every X steps train_every = 1 # The paths for saving the logs and learning curves log_dir = './experiments/blackjack_dqn_result/' # Set a global seed set_global_seed(0) with tf.compat.v1.Session() as sess: # Initialize a global step global_step = tf.Variable(0, name='global_step', trainable=False) # Set up the agents agent = DQNAgent(sess, scope='dqn', action_num=env.action_num, replay_memory_init_size=memory_init_size, train_every=train_every, state_shape=env.state_shape, mlp_layers=[10, 10]) env.set_agents([agent]) eval_env.set_agents([agent]) # Initialize global variables sess.run(tf.compat.v1.global_variables_initializer()) # Initialize a Logger to plot the learning curve logger = Logger(log_dir) for iteration in range(iteration_num): # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent.feed(ts) # Evaluate the performance. Play with random agents. if iteration % evaluate_every == 0: logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0]) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot('DQN') # Save model save_dir = 'models/blackjack_dqn' if not os.path.exists(save_dir): os.makedirs(save_dir) saver = tf.compat.v1.train.Saver() saver.save(sess, os.path.join(save_dir, 'model'))
def play(): import tensorflow as tf # We have a pretrained model here. Change the path for your model. if tf.test.gpu_device_name(): print('GPU found') else: print("No GPU found") #os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # Make environment env = rlcard.make('no-limit-holdem', config={ 'record_action': True, 'game_player_num': 2 }) # Set a global seed set_global_seed(0) evaluate_every = 2048 evaluate_num = 32 episode_num = 262144 # The intial memory size memory_init_size = 256 # Train the agent every X steps train_every = 256 graph = tf.Graph() sess = tf.Session(graph=graph) with graph.as_default(): agents = [] agents.append( NFSPAgent(sess, scope='nfsp' + str(0), action_num=env.action_num, state_shape=env.state_shape, hidden_layers_sizes=[512, 512], anticipatory_param=0.1, rl_learning_rate=0.01, sl_learning_rate=0.005, q_epsilon_start=.6, min_buffer_size_to_learn=memory_init_size, q_replay_memory_size=80000, q_replay_memory_init_size=memory_init_size, train_every=train_every + 44, q_train_every=train_every, q_mlp_layers=[512, 512], evaluate_with='best_response')) agents.append( NFSPAgent(sess, scope='nfsp' + str(1), action_num=env.action_num, state_shape=env.state_shape, hidden_layers_sizes=[512, 512], anticipatory_param=0.1, rl_learning_rate=0.01, sl_learning_rate=0.005, q_epsilon_start=.6, q_replay_memory_size=80000, min_buffer_size_to_learn=memory_init_size, q_replay_memory_init_size=memory_init_size, train_every=train_every + 44, q_train_every=train_every, q_mlp_layers=[512, 512], evaluate_with='best_response')) check_point_path = os.path.join( 'models/nolimit_holdem_nfsp/no_all_in/cp/9/') print( '-------------------------------------------------------------------------------------' ) print(check_point_path) with sess.as_default(): with graph.as_default(): saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint(check_point_path)) human = nolimit_holdem_human_agent.HumanAgent(env.action_num) env.set_agents([human, agents[1]]) while (True): print(">> Start a new game") trajectories, payoffs = env.run(is_training=False) if (len(trajectories[0]) == 0): # the bot folded immediately continue # If the human does not take the final action, we need to # print other players action final_state = trajectories[0][-1][-2] action_record = final_state['action_record'] state = final_state['raw_obs'] _action_list = [] for i in range(1, len(action_record) + 1): if action_record[-i][0] == state['current_player']: break _action_list.insert(0, action_record[-i]) for pair in _action_list: print('>> Player', pair[0], 'chooses', pair[1]) # Let's take a look at what the agent card is print('=============== CFR Agent ===============') print_card(env.get_perfect_information()['hand_cards'][1]) print('=============== Result ===============') if payoffs[0] > 0: print('You win {} chips!'.format(payoffs[0])) elif payoffs[0] == 0: print('It is a tie.') else: print('You lose {} chips!'.format(-payoffs[0])) print('') input("Press any key to continue...")
def nfsp(): import tensorflow as tf if tf.test.gpu_device_name(): print('GPU found') else: print("No GPU found") #os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # Make environment env = rlcard.make('no-limit-holdem', config={ 'game_player_num': 2, 'seed': 477 }) eval_env = rlcard.make('no-limit-holdem', config={ 'seed': 12, 'game_player_num': 2 }) eval_env2 = rlcard.make('no-limit-holdem', config={ 'seed': 43, 'game_player_num': 2 }) #eval_env3 = rlcard.make('no-limit-holdem', config={'seed': 43, 'game_player_num': 2}) # Set the iterations numbers and how frequently we evaluate the performance # The intial memory size memory_init_size = 1000 # The paths for saving the logs and learning curves log_dir = './experiments/nolimit_holdem_nfsp_result/no_all_in' # Set a global seed set_global_seed(477) graph = tf.Graph() tf.ConfigProto() sess = tf.Session(graph=graph) evaluate_every = 2048 evaluate_num = 32 episode_num = 24576 # The intial memory size memory_init_size = 256 # Train the agent every X steps train_every = 256 agents = [] with graph.as_default(): """ def __init__(self, sess, scope, action_num=4, state_shape=None, hidden_layers_sizes=None, reservoir_buffer_capacity=int(1e6), anticipatory_param=0.1, batch_size=256, train_every=1, rl_learning_rate=0.1, sl_learning_rate=0.005, min_buffer_size_to_learn=1000, q_replay_memory_size=30000, q_replay_memory_init_size=1000, q_update_target_estimator_every=1000, q_discount_factor=0.99, q_epsilon_start=0.06, q_epsilon_end=0, q_epsilon_decay_steps=int(1e6), q_batch_size=256, q_train_every=1, q_mlp_layers=None, evaluate_with='average_policy'): """ # Model1v1V3cp10good agents.append( NFSPAgent(sess, scope='nfsp' + str(0), action_num=env.action_num, state_shape=env.state_shape, hidden_layers_sizes=[512, 512], anticipatory_param=0.1, rl_learning_rate=0.01, sl_learning_rate=0.005, q_epsilon_start=.7, min_buffer_size_to_learn=memory_init_size, q_replay_memory_size=80000, q_replay_memory_init_size=memory_init_size, train_every=train_every + 44, q_train_every=train_every, q_mlp_layers=[512, 512])) agents.append( NFSPAgent(sess, scope='nfsp' + str(1), action_num=env.action_num, state_shape=env.state_shape, hidden_layers_sizes=[512, 512], anticipatory_param=0.1, rl_learning_rate=0.01, sl_learning_rate=0.005, q_epsilon_start=.7, q_replay_memory_size=80000, min_buffer_size_to_learn=memory_init_size, q_replay_memory_init_size=memory_init_size, train_every=train_every + 44, q_train_every=train_every, q_mlp_layers=[512, 512])) # check_point_path = os.path.join('models\\nolimit_holdem_nfsp\\iivan') print( '-------------------------------------------------------------------------------------' ) # print(check_point_path) #todays project :) # https://stackoverflow.com/questions/33758669/running-multiple-tensorflow-sessions-concurrently with sess.as_default(): with graph.as_default(): # saver = tf.train.Saver() # saver.restore(sess, tf.train.latest_checkpoint(check_point_path)) global_step = tf.Variable(0, name='global_step', trainable=False) random_agent = RandomAgent(action_num=eval_env2.action_num) env.set_agents(agents) eval_env.set_agents([agents[0], random_agent]) eval_env2.set_agents([random_agent, agents[1]]) # eval_env3.set_agents([agents[1], random_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): print(episode, end='\r') #print('oh') # First sample a policy for the episode for agent in agents: agent.sample_episode_policy() # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for i in range(env.player_num): for ts in trajectories[i]: agents[i].feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: logger.log( '\n\n\n---------------------------------------------------------------\nTournament ' + str(episode / evaluate_every)) # tournament(eval_env2, 6) # exploitability.exploitability(eval_env, agents[0], 500) res = tournament(env, evaluate_num) logger.log_performance(env.timestep, res[0]) res2 = tournament(eval_env, evaluate_num // 3) logger.log_performance(env.timestep, res2[0]) res3 = tournament(eval_env2, evaluate_num // 3) logger.log_performance(env.timestep, res3[0]) logger.log('' + str(episode_num) + " - " + str(episode) + '\n') logger.log( '\n\n----------------------------------------------------------------' ) if episode % (evaluate_every) == 0 and not episode == 0: save_dir = 'models/nolimit_holdem_nfsp/no_all_in/cp/' + str( episode // evaluate_every) if not os.path.exists(save_dir): os.makedirs(save_dir) saver = tf.train.Saver() saver.save(sess, os.path.join(save_dir, 'model')) logger.log( '\n\n\n---------------------------------------------------------------\nTournament ' + str(episode / evaluate_every)) res = tournament(eval_env, evaluate_num) logger.log_performance(env.timestep, res[0]) logger.log('' + str(episode_num) + " - " + str(episode)) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot('NFSP') # Save model save_dir = 'models/nolimit_holdem_nfsp/no_all_in' if not os.path.exists(save_dir): os.makedirs(save_dir) saver = tf.train.Saver() saver.save(sess, os.path.join(save_dir, 'model'))
def nfsp(): import tensorflow as tf if tf.test.gpu_device_name(): print('GPU found') else: print("No GPU found") #os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # Make environment env = rlcard.make('no-limit-holdem', config={ 'record_action': False, 'game_player_num': 2 }) eval_env = rlcard.make('no-limit-holdem', config={ 'seed': 12, 'game_player_num': 2 }) eval_env2 = rlcard.make('no-limit-holdem', config={ 'seed': 43, 'game_player_num': 2 }) # Set the iterations numbers and how frequently we evaluate the performance # The intial memory size memory_init_size = 1000 # The paths for saving the logs and learning curves log_dir = './experiments/nolimit_holdem_nfsp_result/1v1MCNFSPv3' # Set a global seed set_global_seed(0) graph = tf.Graph() sess = tf.Session(graph=graph) evaluate_every = 1000 evaluate_num = 250 episode_num = 5000 # The intial memory size memory_init_size = 1500 # Train the agent every X steps train_every = 256 agents = [] with graph.as_default(): # Model1v1V3cp10good agents.append( NFSPAgent(sess, scope='nfsp' + str(0), action_num=env.action_num, state_shape=env.state_shape, hidden_layers_sizes=[512, 512], anticipatory_param=0.1, rl_learning_rate=.1, min_buffer_size_to_learn=memory_init_size, q_replay_memory_init_size=memory_init_size, train_every=train_every, q_train_every=train_every, q_mlp_layers=[512, 512])) agents.append( NFSPAgent(sess, scope='nfsp' + str(1), action_num=env.action_num, state_shape=env.state_shape, hidden_layers_sizes=[512, 512], anticipatory_param=0.075, rl_learning_rate=0.075, min_buffer_size_to_learn=memory_init_size, q_replay_memory_init_size=memory_init_size, train_every=train_every // 2, q_train_every=train_every // 2, q_mlp_layers=[512, 512])) # check_point_path = os.path.join('models\\nolimit_holdem_nfsp\\1v1MCNFSPv3\\cp\\10') print( '-------------------------------------------------------------------------------------' ) # print(check_point_path) with sess.as_default(): with graph.as_default(): saver = tf.train.Saver() # saver.restore(sess, tf.train.latest_checkpoint(check_point_path)) global_step = tf.Variable(0, name='global_step', trainable=False) random_agent = RandomAgent(action_num=eval_env2.action_num) #easy_agent = nfsp_agents[0] print(agents) # print(nfsp_agents) env.set_agents(agents) eval_env.set_agents(agents) eval_env2.set_agents([agents[0], random_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): # First sample a policy for the episode for agent in agents: agent.sample_episode_policy() table = [] # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for i in range(env.player_num): for ts in trajectories[i]: agents[i].feed(ts, table) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: logger.log( '\n\n\n---------------------------------------------------------------\nTournament ' + str(episode / evaluate_every)) res = tournament(eval_env, evaluate_num) res2 = tournament(eval_env2, evaluate_num // 4) logger.log_performance(env.timestep, res[0]) logger.log_performance(env.timestep, res2[0]) logger.log('' + str(episode_num) + " - " + str(episode) + '\n') logger.log( '\n\n----------------------------------------------------------------' ) if episode % (evaluate_every) == 0 and not episode == 0: save_dir = 'models/nolimit_holdem_nfsp/1v1MCNFSPv3/cp/10/good' + str( episode // evaluate_every) if not os.path.exists(save_dir): os.makedirs(save_dir) saver = tf.train.Saver() saver.save(sess, os.path.join(save_dir, 'model')) logger.log( '\n\n\n---------------------------------------------------------------\nTournament ' + str(episode / evaluate_every)) res = tournament(eval_env, evaluate_num) logger.log_performance(env.timestep, res[0]) logger.log('' + str(episode_num) + " - " + str(episode)) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot('NFSP') # Save model save_dir = 'models/nolimit_holdem_nfsp/1v1MCNFSPv3/cp/10/good' if not os.path.exists(save_dir): os.makedirs(save_dir) saver = tf.train.Saver() saver.save(sess, os.path.join(save_dir, 'model'))
def main(): # Make environment env = rlcard.make('no-limit-holdem', config={ 'seed': 0, 'env_num': 16, 'game_player_num': 4 }) eval_env = rlcard.make('no-limit-holdem', config={ 'seed': 0, 'env_num': 16 }) # Set the iterations numbers and how frequently we evaluate the performance evaluate_every = 100 evaluate_num = 1000 episode_num = 200000 # The intial memory size memory_init_size = 1000 # Train the agent every X steps train_every = 1 _reward_max = -0.8 # The paths for saving the logs and learning curves log_dir = './experiments/nolimit_holdem_dqn_result/' # Set a global seed set_global_seed(0) with tf.Session() as sess: # Initialize a global step global_step = tf.Variable(0, name='global_step', trainable=False) # Set up the agents agent = DQNAgent(sess, scope='dqn', action_num=env.action_num, replay_memory_init_size=memory_init_size, train_every=train_every, state_shape=env.state_shape, mlp_layers=[512, 512]) agent2 = NFSPAgent(sess, scope='nfsp', action_num=env.action_num, state_shape=env.state_shape, hidden_layers_sizes=[512, 512], anticipatory_param=0.1, min_buffer_size_to_learn=memory_init_size, q_replay_memory_init_size=memory_init_size, train_every=64, q_train_every=64, q_mlp_layers=[512, 512]) # Initialize global variables sess.run(tf.global_variables_initializer()) save_dir = 'models/nolimit_holdem_dqn' saver = tf.train.Saver() #saver.restore(sess, os.path.join(save_dir, 'model')) random_agent = RandomAgent(action_num=eval_env.action_num) env.set_agents([agent, agent, agent2, random_agent]) eval_env.set_agents([agent, agent2]) # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): agent2.sample_episode_policy() # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent.feed(ts) for ts in trajectories[2]: agent2.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: _reward = tournament(eval_env, evaluate_num)[0] logger.log_performance(episode, _reward) if _reward > _reward_max: if not os.path.exists(save_dir): os.makedirs(save_dir) saver.save(sess, os.path.join(save_dir, 'model')) _reward_max = _reward # Close files in the logger logger.close_files() if not os.path.exists(save_dir): os.makedirs(save_dir) saver.save(sess, os.path.join(save_dir, 'model_final'))
def main(): # Make environment env = rlcard.make('leduc-holdem', config={'seed': 0, 'env_num': 4}) eval_env = rlcard.make('leduc-holdem', config={'seed': 0, 'env_num': 4}) # Set the iterations numbers and how frequently we evaluate the performance evaluate_every = 100 evaluate_num = 10000 episode_num = 800000 # The intial memory size memory_init_size = 1000 # Train the agent every X steps train_every = 1 _reward_max = -0.5 # The paths for saving the logs and learning curves log_dir = './experiments/leduc_holdem_dqn_result/' # Set a global seed set_global_seed(0) with tf.Session() as sess: # Initialize a global step global_step = tf.Variable(0, name='global_step', trainable=False) # Set up the agents agent = DQNAgent(sess, scope='dqn', action_num=env.action_num, replay_memory_init_size=memory_init_size, train_every=train_every, state_shape=env.state_shape, mlp_layers=[128, 128]) # random_agent = RandomAgent(action_num=eval_env.action_num) cfr_agent = models.load('leduc-holdem-cfr').agents[0] env.set_agents([agent, agent]) eval_env.set_agents([agent, cfr_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve logger = Logger(log_dir) saver = tf.train.Saver() save_dir = 'models/leduc_holdem_dqn' saver.restore(sess, os.path.join(save_dir, 'model')) for episode in range(episode_num): # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: _reward = tournament(eval_env, evaluate_num)[0] logger.log_performance(episode, _reward) if _reward > _reward_max: # Save model if not os.path.exists(save_dir): os.makedirs(save_dir) saver.save(sess, os.path.join(save_dir, 'model')) _reward_max = _reward # Close files in the logger logger.close_files() # Plot the learning curve logger.plot('DQN')
def train_uno(): # Make environment env = rlcard.make("uno", config={"seed": 0}) eval_env = rlcard.make("uno", config={"seed": 0}) # Set the iterations numbers and how frequently we evaluate the performance evaluate_every = 100 evaluate_num = 1000 episode_num = 3000 # The intial memory size memory_init_size = 1000 # Train the agent every X steps train_every = 100 # The paths for saving the logs and learning curves log_dir = "./experiments/uno_results_dqn/" # Set a global seed set_global_seed(0) params = { "scope": "DQN-Agent", "num_actions": env.action_num, "replay_memory_size": memory_init_size, "num_states": env.state_shape, "discount_factor": 0.99, "epsilon_start": 1.0, "epsilon_end": 0.1, "epsilon_decay_steps": 20000, "batch_size": 32, "train_every": 1, "mlp_layers": [512, 512], "lr": 0.0005, } agent_conf = DQN_conf(**params) agent = DQN_agent(agent_conf) random_agent = RandomAgent(action_num=eval_env.action_num) env.set_agents([agent, random_agent]) eval_env.set_agents([agent, random_agent]) logger = Logger(log_dir) for episode in range(episode_num): # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0]) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot("DQN UNO") # Save model save_dir = "models/uno_dqn_pytorch" if not os.path.exists(save_dir): os.makedirs(save_dir) state_dict = agent.get_state_dict() print(state_dict.keys()) torch.save(state_dict, os.path.join(save_dir, "model.pth"))