def train(args): # Make environments, CFR only supports Leduc Holdem env = rlcard.make('leduc-holdem', config={'seed': 0, 'allow_step_back':True}) eval_env = rlcard.make('leduc-holdem', config={'seed': 0}) # Seed numpy, torch, random set_seed(args.seed) # Initilize CFR Agent agent = CFRAgent(env, os.path.join(args.log_dir, 'cfr_model')) agent.load() # If we have saved model, we first load the model # Evaluate CFR against random eval_env.set_agents([agent, RandomAgent(num_actions=env.num_actions)]) # Start training with Logger(args.log_dir) as logger: for episode in range(args.num_episodes): agent.train() print('\rIteration {}'.format(episode), end='') # Evaluate the performance. Play with Random agents. if episode % args.evaluate_every == 0: agent.save() # Save model logger.log_performance(env.timestep, tournament(eval_env, args.num_eval_games)[0]) # Get the paths csv_path, fig_path = logger.csv_path, logger.fig_path # Plot the learning curve plot_curve(csv_path, fig_path, 'cfr')
def __init__(self): super().__init__() self.wins = 0 self.losses = 0 ''' Instantiate agent. ''' # Setup RL NFSP agent # Set the iterations numbers and how frequently we evaluate/save plot evaluate_every = 10000 evaluate_num = 10000 episode_num = 100000 # The intial memory size memory_init_size = 1000 # Train the agent every X steps train_every = 64 # The paths for saving the logs and learning curves log_dir = './training/nfsp/' # Set a global seed set_global_seed(0) # Set agent - TODO - determine PPE parameters self.agent = NFSPAgent(scope='nfsp', action_num=3, state_shape=54, hidden_layers_sizes=[512, 512], min_buffer_size_to_learn=memory_init_size, q_replay_memory_init_size=memory_init_size, train_every=train_every, q_train_every=train_every, q_mlp_layers=[512, 512], device=torch.device('cpu')) # Init a Logger to plot the learning curve self.logger = Logger(log_dir)
def train_leduc(): # Make environment and enable human mode env = rlcard.make('leduc-holdem', config={ 'seed': 0, 'allow_step_back': True }) eval_env = rlcard.make('leduc-holdem', config={'seed': 0}) # Set the iterations numbers and how frequently we evaluate the performance and save model evaluate_every = 100 save_plot_every = 1000 evaluate_num = 10000 episode_num = 10000 # The paths for saving the logs and learning curves log_dir = './experiments/leduc_holdem_oscfr_result/' # Set a global seed set_global_seed(0) # Initilize CFR Agent model_path = 'models/leduc_holdem_oscfr' agent = OutcomeSampling_CFR(env, model_path=model_path) agent.load() # If we have saved model, we first load the model # Evaluate CFR against pre-trained NFSP eval_env.set_agents([agent, models.load('leduc-holdem-nfsp').agents[0]]) # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): agent.train() print('\rIteration {}'.format(episode), end='') # Evaluate the performance. Play with NFSP agents. if episode % evaluate_every == 0: agent.save() # Save model logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0]) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot('OSCFR')
anticipatory_param=0.1, min_buffer_size_to_learn=memory_init_size, q_replay_memory_init_size=memory_init_size, train_every=train_every, q_train_every=train_every, q_mlp_layers=[512, 512]) mcts_agent = MCTS_Agent(env.action_num, duration, explore, model_action, model_hand_rank) env.set_agents([mcts_agent, nfsp_agent]) eval_env.set_agents([mcts_agent, nfsp_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve logger_mcts = Logger(log_dir_mcts) logger_nfsp = Logger(log_dir_nfsp) for episode in range(episode_num): # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: nfsp_agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: logger_mcts.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])
train_every=train_every + 44, q_train_every=train_every, q_mlp_layers=[512, 512], evaluate_with='average_policy')) random_agent = RandomAgent(action_num=eval_env2.action_num) env.set_agents(agents) eval_env.set_agents([agents[0], random_agent]) eval_env2.set_agents([random_agent, agents[1]]) # eval_env3.set_agents([agents[1], random_agent]) # Initialize global variables # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): print(episode, end='\r') #print('oh') # First sample a policy for the episode for agent in agents: agent.sample_episode_policy() # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for i in range(env.player_num): # update ray rl model
def main(): parser = createParser() namespace = parser.parse_args(sys.argv[1:]) #random seed random_seed = namespace.random_seed #names env_name = namespace.env_name env_num = 1 test_name = namespace.test_name dir_name = str(env_name)+'_a2c_'+str(test_name)+str(random_seed) # Set the iterations numbers and how frequently we evaluate/save plot evaluate_every = namespace.evaluate_every evaluate_num = namespace.evaluate_num episode_num = namespace.episode_num # Train the agent every X steps train_every = namespace.train_every save_every = namespace.save_every # Make environment env_rand = rlcard.make(env_name, config={'seed': random_seed}) eval_env = rlcard.make(env_name, config={'seed': random_seed}) # The paths for saving the logs and learning curves log_dir = './experiments/rl/'+dir_name+'_result' # Save model save_dir = 'models/rl/'+dir_name+'_result' # Set a global seed set_global_seed(random_seed) # Initialize a global step global_step = tf.Variable(0, name='global_step', trainable=False) # Set up the agents agent_rand = RandomAgent(action_num=eval_env.action_num) agent_test = A2CLSTMQPGAgent( action_num=eval_env.action_num, state_shape=eval_env.state_shape, discount_factor=0.95, critic_lstm_layers=[1,512], critic_mlp_layers=[3,512], critic_activation_func='tanh', critic_kernel_initializer='glorot_uniform', critic_learning_rate=0.001, critic_bacth_size=128, actor_lstm_layers=[1,512], actor_mlp_layers=[3,512], actor_activation_func='tanh', actor_kernel_initializer='glorot_uniform', actor_learning_rate=0.0001, actor_bacth_size=512, entropy_coef=0.5, entropy_decoy=math.pow(0.1/0.5, 1.0/(episode_num//train_every)), max_grad_norm = 1,) if namespace.load_model is not None: agent_test.load_model(namespace.load_model) env_rand.set_agents([agent_test, agent_rand]) eval_env.set_agents([agent_test, agent_rand]) # Init a Logger to plot the learning curve logger = Logger(log_dir+'/'+test_name) envs = [env_rand, ] env_num = len(envs) for episode in range(episode_num // env_num): # Generate data from the for env in envs: trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent_test.feed(ts) if episode % (train_every // env_num) == 0: agent_test.train() if episode % (save_every // env_num) == 0 : # Save model if not os.path.exists(save_dir+'/'+test_name+str(episode*env_num)): os.makedirs(save_dir+'/'+test_name+str(episode*env_num)) agent_test.save_model(save_dir+'/'+test_name+str(episode*env_num)) # Evaluate the performance. Play with random agents. if episode % (evaluate_every // env_num) == 0: print('episode: ', episode*env_num) logger.log_performance(episode*env_num, tournament(eval_env, evaluate_num)[0]) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot(dir_name) # Save model if not os.path.exists(save_dir+'/'+test_name+str(episode_num)): os.makedirs(save_dir+'/'+test_name+str(episode_num)) agent_test.save_model(save_dir+'/'+test_name+str(episode_num))
def main(): # Make environment env = rlcard.make('blackjack', config={'env_num': 4, 'seed': 0}) eval_env = rlcard.make('blackjack', config={'env_num': 4, 'seed': 0}) # Set the iterations numbers and how frequently we evaluate performance evaluate_every = 100 evaluate_num = 10000 iteration_num = 100000 # The intial memory size memory_init_size = 100 # Train the agent every X steps train_every = 1 # The paths for saving the logs and learning curves log_dir = './experiments/blackjack_dqn_result/' # Set a global seed set_global_seed(0) with tf.compat.v1.Session() as sess: # Initialize a global step global_step = tf.Variable(0, name='global_step', trainable=False) # Set up the agents agent = DQNAgent(sess, scope='dqn', action_num=env.action_num, replay_memory_init_size=memory_init_size, train_every=train_every, state_shape=env.state_shape, mlp_layers=[10, 10]) env.set_agents([agent]) eval_env.set_agents([agent]) # Initialize global variables sess.run(tf.compat.v1.global_variables_initializer()) # Initialize a Logger to plot the learning curve logger = Logger(log_dir) for iteration in range(iteration_num): # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent.feed(ts) # Evaluate the performance. Play with random agents. if iteration % evaluate_every == 0: logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0]) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot('DQN') # Save model save_dir = 'models/blackjack_dqn' if not os.path.exists(save_dir): os.makedirs(save_dir) saver = tf.compat.v1.train.Saver() saver.save(sess, os.path.join(save_dir, 'model'))
def train(args): # Check whether gpu is available device = get_device() # Seed numpy, torch, random set_seed(args.seed) # Make the environment with seed env_func = env_name_to_env_func[args.env] env = env_func.env() env.seed(args.seed) env.reset() # Initialize the agent and use random agents as opponents learning_agent_name = env.agents[0] if args.algorithm == 'dqn': from rlcard.agents.pettingzoo_agents import DQNAgentPettingZoo agent = DQNAgentPettingZoo( num_actions=env.action_space(learning_agent_name).n, state_shape=env.observation_space( learning_agent_name)["observation"].shape, mlp_layers=[64, 64], device=device) elif args.algorithm == 'nfsp': from rlcard.agents.pettingzoo_agents import NFSPAgentPettingZoo agent = NFSPAgentPettingZoo( num_actions=env.action_space(learning_agent_name).n, state_shape=env.observation_space( learning_agent_name)["observation"].shape, hidden_layers_sizes=[64, 64], q_mlp_layers=[64, 64], device=device) agents = {learning_agent_name: agent} for i in range(1, env.num_agents): agents[env.agents[i]] = RandomAgentPettingZoo( num_actions=env.action_space(env.agents[i]).n) # Start training num_timesteps = 0 with Logger(args.log_dir) as logger: for episode in range(args.num_episodes): if args.algorithm == 'nfsp': agent.sample_episode_policy() # Generate data from the environment trajectories = run_game_pettingzoo(env, agents, is_training=True) trajectories = reorganize_pettingzoo(trajectories) num_timesteps += sum([len(t) for t in trajectories.values()]) for ts in trajectories[learning_agent_name]: agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % args.evaluate_every == 0: average_rewards = tournament_pettingzoo( env, agents, args.num_eval_games) logger.log_performance(num_timesteps, average_rewards[learning_agent_name]) # Get the paths csv_path, fig_path = logger.csv_path, logger.fig_path # Plot the learning curve plot_curve(csv_path, fig_path, args.algorithm) # Save model save_path = os.path.join(args.log_dir, 'model.pth') torch.save(agent, save_path) print('Model saved in', save_path)
agents = [] for i in range(env.player_num): agent = DeepCFR(sess, scope='deepcfr' + str(i), env=env) agents.append(agent) random_agent = RandomAgent(action_num=eval_env.action_num) env.set_agents(agents) eval_env.set_agents([agents[0], random_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # restore checkpoint saver = tf.train.Saver() save_dir = 'models/nolimit_holdem_deepcfr' # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): for agent in agents: agent.train() # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: _reward = tournament(eval_env, evaluate_num)[0] logger.log_performance(episode, _reward) # Save model if _reward > _reward_max: if not os.path.exists(save_dir): os.makedirs(save_dir) saver.save(sess, os.path.join(save_dir, 'model'))
def nfsp(): import tensorflow as tf if tf.test.gpu_device_name(): print('GPU found') else: print("No GPU found") #os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # Make environment env = rlcard.make('no-limit-holdem', config={ 'record_action': False, 'game_player_num': 2 }) eval_env = rlcard.make('no-limit-holdem', config={ 'seed': 12, 'game_player_num': 2 }) eval_env2 = rlcard.make('no-limit-holdem', config={ 'seed': 43, 'game_player_num': 2 }) # Set the iterations numbers and how frequently we evaluate the performance # The intial memory size memory_init_size = 1000 # The paths for saving the logs and learning curves log_dir = './experiments/nolimit_holdem_nfsp_result/1v1MCNFSPv3' # Set a global seed set_global_seed(0) graph = tf.Graph() sess = tf.Session(graph=graph) evaluate_every = 1000 evaluate_num = 250 episode_num = 5000 # The intial memory size memory_init_size = 1500 # Train the agent every X steps train_every = 256 agents = [] with graph.as_default(): # Model1v1V3cp10good agents.append( NFSPAgent(sess, scope='nfsp' + str(0), action_num=env.action_num, state_shape=env.state_shape, hidden_layers_sizes=[512, 512], anticipatory_param=0.1, rl_learning_rate=.1, min_buffer_size_to_learn=memory_init_size, q_replay_memory_init_size=memory_init_size, train_every=train_every, q_train_every=train_every, q_mlp_layers=[512, 512])) agents.append( NFSPAgent(sess, scope='nfsp' + str(1), action_num=env.action_num, state_shape=env.state_shape, hidden_layers_sizes=[512, 512], anticipatory_param=0.075, rl_learning_rate=0.075, min_buffer_size_to_learn=memory_init_size, q_replay_memory_init_size=memory_init_size, train_every=train_every // 2, q_train_every=train_every // 2, q_mlp_layers=[512, 512])) # check_point_path = os.path.join('models\\nolimit_holdem_nfsp\\1v1MCNFSPv3\\cp\\10') print( '-------------------------------------------------------------------------------------' ) # print(check_point_path) with sess.as_default(): with graph.as_default(): saver = tf.train.Saver() # saver.restore(sess, tf.train.latest_checkpoint(check_point_path)) global_step = tf.Variable(0, name='global_step', trainable=False) random_agent = RandomAgent(action_num=eval_env2.action_num) #easy_agent = nfsp_agents[0] print(agents) # print(nfsp_agents) env.set_agents(agents) eval_env.set_agents(agents) eval_env2.set_agents([agents[0], random_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): # First sample a policy for the episode for agent in agents: agent.sample_episode_policy() table = [] # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for i in range(env.player_num): for ts in trajectories[i]: agents[i].feed(ts, table) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: logger.log( '\n\n\n---------------------------------------------------------------\nTournament ' + str(episode / evaluate_every)) res = tournament(eval_env, evaluate_num) res2 = tournament(eval_env2, evaluate_num // 4) logger.log_performance(env.timestep, res[0]) logger.log_performance(env.timestep, res2[0]) logger.log('' + str(episode_num) + " - " + str(episode) + '\n') logger.log( '\n\n----------------------------------------------------------------' ) if episode % (evaluate_every) == 0 and not episode == 0: save_dir = 'models/nolimit_holdem_nfsp/1v1MCNFSPv3/cp/10/good' + str( episode // evaluate_every) if not os.path.exists(save_dir): os.makedirs(save_dir) saver = tf.train.Saver() saver.save(sess, os.path.join(save_dir, 'model')) logger.log( '\n\n\n---------------------------------------------------------------\nTournament ' + str(episode / evaluate_every)) res = tournament(eval_env, evaluate_num) logger.log_performance(env.timestep, res[0]) logger.log('' + str(episode_num) + " - " + str(episode)) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot('NFSP') # Save model save_dir = 'models/nolimit_holdem_nfsp/1v1MCNFSPv3/cp/10/good' if not os.path.exists(save_dir): os.makedirs(save_dir) saver = tf.train.Saver() saver.save(sess, os.path.join(save_dir, 'model'))
def main(): # Make environment env = rlcard.make('no-limit-holdem', config={ 'seed': 0, 'env_num': 16, 'game_player_num': 4 }) eval_env = rlcard.make('no-limit-holdem', config={ 'seed': 0, 'env_num': 16 }) # Set the iterations numbers and how frequently we evaluate the performance evaluate_every = 100 evaluate_num = 1000 episode_num = 200000 # The intial memory size memory_init_size = 1000 # Train the agent every X steps train_every = 1 _reward_max = -0.8 # The paths for saving the logs and learning curves log_dir = './experiments/nolimit_holdem_dqn_result/' # Set a global seed set_global_seed(0) with tf.Session() as sess: # Initialize a global step global_step = tf.Variable(0, name='global_step', trainable=False) # Set up the agents agent = DQNAgent(sess, scope='dqn', action_num=env.action_num, replay_memory_init_size=memory_init_size, train_every=train_every, state_shape=env.state_shape, mlp_layers=[512, 512]) agent2 = NFSPAgent(sess, scope='nfsp', action_num=env.action_num, state_shape=env.state_shape, hidden_layers_sizes=[512, 512], anticipatory_param=0.1, min_buffer_size_to_learn=memory_init_size, q_replay_memory_init_size=memory_init_size, train_every=64, q_train_every=64, q_mlp_layers=[512, 512]) # Initialize global variables sess.run(tf.global_variables_initializer()) save_dir = 'models/nolimit_holdem_dqn' saver = tf.train.Saver() #saver.restore(sess, os.path.join(save_dir, 'model')) random_agent = RandomAgent(action_num=eval_env.action_num) env.set_agents([agent, agent, agent2, random_agent]) eval_env.set_agents([agent, agent2]) # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): agent2.sample_episode_policy() # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent.feed(ts) for ts in trajectories[2]: agent2.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: _reward = tournament(eval_env, evaluate_num)[0] logger.log_performance(episode, _reward) if _reward > _reward_max: if not os.path.exists(save_dir): os.makedirs(save_dir) saver.save(sess, os.path.join(save_dir, 'model')) _reward_max = _reward # Close files in the logger logger.close_files() if not os.path.exists(save_dir): os.makedirs(save_dir) saver.save(sess, os.path.join(save_dir, 'model_final'))
def main(): # Make environment env = rlcard.make('leduc-holdem', config={'seed': 0, 'env_num': 4}) eval_env = rlcard.make('leduc-holdem', config={'seed': 0, 'env_num': 4}) # Set the iterations numbers and how frequently we evaluate the performance evaluate_every = 100 evaluate_num = 10000 episode_num = 800000 # The intial memory size memory_init_size = 1000 # Train the agent every X steps train_every = 1 _reward_max = -0.5 # The paths for saving the logs and learning curves log_dir = './experiments/leduc_holdem_dqn_result/' # Set a global seed set_global_seed(0) with tf.Session() as sess: # Initialize a global step global_step = tf.Variable(0, name='global_step', trainable=False) # Set up the agents agent = DQNAgent(sess, scope='dqn', action_num=env.action_num, replay_memory_init_size=memory_init_size, train_every=train_every, state_shape=env.state_shape, mlp_layers=[128, 128]) # random_agent = RandomAgent(action_num=eval_env.action_num) cfr_agent = models.load('leduc-holdem-cfr').agents[0] env.set_agents([agent, agent]) eval_env.set_agents([agent, cfr_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve logger = Logger(log_dir) saver = tf.train.Saver() save_dir = 'models/leduc_holdem_dqn' saver.restore(sess, os.path.join(save_dir, 'model')) for episode in range(episode_num): # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: _reward = tournament(eval_env, evaluate_num)[0] logger.log_performance(episode, _reward) if _reward > _reward_max: # Save model if not os.path.exists(save_dir): os.makedirs(save_dir) saver.save(sess, os.path.join(save_dir, 'model')) _reward_max = _reward # Close files in the logger logger.close_files() # Plot the learning curve logger.plot('DQN')
learning_rate=1e-5, strategy_memory_capacity=2 * int(1e6)) agents.append(agent) for _ in range(env.player_num - 1): agent = RandomAgent(action_num=eval_env.action_num) random_agents.append(agent) env.set_agents(agents) eval_env.set_agents([agents[0], *random_agents]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve logger = Logger(log_dir) # Create dir for results save_dir = 'models/thousand_schnapsen_deep_cfr3' if not os.path.exists(save_dir): os.makedirs(save_dir) saver = tf.train.Saver() best_win_rate = 0 for episode in range(episode_num): agents[0].train() # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: payoffs, wins = tournament(eval_env, evaluate_num) logger.log_performance(env.timestep, payoffs[0])
duration = args.d explore = args.e model_action = args.ma model_hand_rank = args.mh # Make environment env = rlcard.make('limit-holdem', config={'seed': 0}) eval_env = rlcard.make('limit-holdem', config={'seed': 10}) #episode_num = 5 num_tournaments = 25 # episode_num = 100 # evaluate_every = 10 evaluate_num = 1000 log_dir = name logger = Logger(log_dir) # Set a global seed set_global_seed(0) # Set up agents agent1 = limitholdem_rule_models.LimitholdemRuleAgentV1() agent2 = MCTS_Agent(action_num=env.action_num, duration=duration, exploration=explore, model_action=model_action, model_hand_rank=model_hand_rank) env.set_agents([agent2, agent1]) eval_env.set_agents([agent2, agent1]) for i in range(num_tournaments):
def train_uno(): # Make environment env = rlcard.make("uno", config={"seed": 0}) eval_env = rlcard.make("uno", config={"seed": 0}) # Set the iterations numbers and how frequently we evaluate the performance evaluate_every = 100 evaluate_num = 1000 episode_num = 3000 # The intial memory size memory_init_size = 1000 # Train the agent every X steps train_every = 100 # The paths for saving the logs and learning curves log_dir = "./experiments/uno_results_dqn/" # Set a global seed set_global_seed(0) params = { "scope": "DQN-Agent", "num_actions": env.action_num, "replay_memory_size": memory_init_size, "num_states": env.state_shape, "discount_factor": 0.99, "epsilon_start": 1.0, "epsilon_end": 0.1, "epsilon_decay_steps": 20000, "batch_size": 32, "train_every": 1, "mlp_layers": [512, 512], "lr": 0.0005, } agent_conf = DQN_conf(**params) agent = DQN_agent(agent_conf) random_agent = RandomAgent(action_num=eval_env.action_num) env.set_agents([agent, random_agent]) eval_env.set_agents([agent, random_agent]) logger = Logger(log_dir) for episode in range(episode_num): # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0]) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot("DQN UNO") # Save model save_dir = "models/uno_dqn_pytorch" if not os.path.exists(save_dir): os.makedirs(save_dir) state_dict = agent.get_state_dict() print(state_dict.keys()) torch.save(state_dict, os.path.join(save_dir, "model.pth"))
def train(args): # Check whether gpu is available device = get_device() # Seed numpy, torch, random set_seed(args.seed) # Make the environment with seed env = rlcard.make(args.env, config={ 'seed': args.seed, }) # Initialize the agent and use random agents as opponents if args.algorithm == 'dqn': from rlcard.agents import DQNAgent agent = DQNAgent( num_actions=env.num_actions, state_shape=env.state_shape[0], mlp_layers=[64, 64], device=device, ) elif args.algorithm == 'nfsp': from rlcard.agents import NFSPAgent agent = NFSPAgent( num_actions=env.num_actions, state_shape=env.state_shape[0], hidden_layers_sizes=[64, 64], q_mlp_layers=[64, 64], device=device, ) agents = [agent] for _ in range(1, env.num_players): agents.append(RandomAgent(num_actions=env.num_actions)) env.set_agents(agents) # Start training with Logger(args.log_dir) as logger: for episode in range(args.num_episodes): if args.algorithm == 'nfsp': agents[0].sample_episode_policy() # Generate data from the environment trajectories, payoffs = env.run(is_training=True) # Reorganaize the data to be state, action, reward, next_state, done trajectories = reorganize(trajectories, payoffs) # Feed transitions into agent memory, and train the agent # Here, we assume that DQN always plays the first position # and the other players play randomly (if any) for ts in trajectories[0]: agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % args.evaluate_every == 0: logger.log_performance( env.timestep, tournament( env, args.num_eval_games, )[0]) # Get the paths csv_path, fig_path = logger.csv_path, logger.fig_path # Plot the learning curve plot_curve(csv_path, fig_path, args.algorithm) # Save model save_path = os.path.join(args.log_dir, 'model.pth') torch.save(agent, save_path) print('Model saved in', save_path)
scope='dqn', action_num=env.action_num, replay_memory_init_size=memory_init_size, train_every=train_every, state_shape=env.state_shape, mlp_layers=[512, 512]) mcts_agent = MCTS_Agent(env.action_num, duration, explore, model_action, model_hand_rank) env.set_agents([mcts_agent, dqn_agent]) eval_env.set_agents([mcts_agent, dqn_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve logger_mcts = Logger(log_dir_mcts) logger_dqn = Logger(log_dir_dqn) for episode in range(episode_num): # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: dqn_agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: logger_mcts.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])
def main(): # Make environment device = torch.device("cuda" if torch.cuda.is_available() else "cpu") env = rlcard.make('no-limit-holdem', config={'seed': 0, 'env_num': 4}) eval_env = rlcard.make('no-limit-holdem', config={'seed': 0, 'env_num': 4}) # Set the iterations numbers and how frequently we evaluate performance evaluate_every = 5000 selfplay_every = 25000 evaluate_num = 10000 iteration_num = 8000000 # The intial memory size memory_init_size = 100 # Train the agent every X steps train_every = 1 agent = DQNAgent(num_actions=env.num_actions, state_shape=env.state_shape[0], mlp_layers=[64, 64, 64, 64], device=device) agents = [agent, load_model("model.pth")] env.set_agents(agents) with Logger('./') as logger: for episode in range(iteration_num): # Generate data from the environment trajectories, payoffs = env.run(is_training=True) # Reorganaize the data to be state, action, reward, next_state, done trajectories = reorganize(trajectories, payoffs) # Feed transitions into agent memory, and train the agent # Here, we assume that DQN always plays the first position # and the other players play randomly (if any) for ts in trajectories[0]: agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: logger.log_performance(env.timestep, tournament(env, evaluate_num)[0]) if episode % selfplay_every == 0: save_path = os.path.join('./', str(episode) + "model.pth") torch.save(agent, save_path) print('Model saved in', save_path) agents = [agent, load_model(str(episode) + "model.pth")] env.set_agents(agents) # Get the paths csv_path, fig_path = logger.csv_path, logger.fig_path # Plot the learning curve #plot_curve(csv_path, fig_path, args.algorithm) # Save model save_path = os.path.join('./', 'model.pth') torch.save(agent, save_path) print('Model saved in', save_path) # The paths for saving the logs and learning curves log_dir = './experiments/nlh_cfr_result/' # Set a global seed set_seed(0)
# Initialize a global step global_step = tf.Variable(0, name='global_step', trainable=False) # Set up the agents agent = DQNAgent(sess, scope='dqn', action_num=env.action_num, replay_memory_init_size=memory_init_size, train_every=train_every, state_shape=env.state_shape, mlp_layers=[128, 128]) # Initialize global variables sess.run(tf.compat.v1.global_variables_initializer()) # Init a Logger to plot the learning curve logger = Logger(log_dir) state = env.reset() for timestep in range(timesteps): action = agent.step(state) next_state, reward, done = env.step(action) ts = (state, action, reward, next_state, done) agent.feed(ts) if timestep % evaluate_every == 0: rewards = [] state = eval_env.reset() for _ in range(evaluate_num): action, _ = agent.eval_step(state) _, reward, done = env.step(action)
# Set up the agents agent = RandomAgent(action_num=env.action_num) random_agent = RandomAgent(action_num=eval_env.action_num) env.set_agents([agent, random_agent, random_agent]) eval_env.set_agents([random_agent, random_agent, random_agent]) env.set_landlord_score(landlord_score) eval_env.set_landlord_score(landlord_score) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve log_dir = './experiments/doudizhu_random_result/' logger = Logger(log_dir) logger.log_parameters(parameter_dict) for episode in range(episode_num): ## dont need these for random agent # Generate data from the environment #trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent #for ts in trajectories[0]: # agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: payoffs, peasant_wins, landlord_wins = tournament(
q_replay_memory_size=int(1e5), q_replay_memory_init_size=memory_init_size, train_every=train_every, q_train_every=train_every, q_batch_size=256, q_mlp_layers=[512, 1024, 2048, 1024, 512]) agents.append(agent) random_agent = RandomAgent(action_num=eval_env.action_num) env.set_agents(agents) eval_env.set_agents([agents[0], random_agent]) # Initialize global variables sess.run(tf.compat.v1.global_variables_initializer()) # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): # First sample a policy for the episode for agent in agents: agent.sample_episode_policy() # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for i in range(env.player_num): for ts in trajectories[i]: agents[i].feed(ts)
actor_layers=[64, 64], critic_layers=[64, 64], ) random_agent = RandomAgent(action_num=eval_env.action_num) env.set_agents([agent, random_agent]) eval_env.set_agents([agent, random_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Include this line to verify graph not being updated in each iteration. This helps identify memory leaks. # Leave uncommented since tf.train.Saver() below is a graph operation. # sess.graph.finalize() # Init a Logger to plot the learning curve logger = Logger(log_dir) start_time = time.time() for episode in range(episode_num): # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: if episode > 0: current_time = time.time() episodes_per_sec = episode / (current_time - start_time) remaining_mins = (episode_num -
def nfsp(): import tensorflow as tf if tf.test.gpu_device_name(): print('GPU found') else: print("No GPU found") #os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # Make environment env = rlcard.make('no-limit-holdem', config={ 'game_player_num': 2, 'seed': 477 }) eval_env = rlcard.make('no-limit-holdem', config={ 'seed': 12, 'game_player_num': 2 }) eval_env2 = rlcard.make('no-limit-holdem', config={ 'seed': 43, 'game_player_num': 2 }) #eval_env3 = rlcard.make('no-limit-holdem', config={'seed': 43, 'game_player_num': 2}) # Set the iterations numbers and how frequently we evaluate the performance # The intial memory size memory_init_size = 1000 # The paths for saving the logs and learning curves log_dir = './experiments/nolimit_holdem_nfsp_result/no_all_in' # Set a global seed set_global_seed(477) graph = tf.Graph() tf.ConfigProto() sess = tf.Session(graph=graph) evaluate_every = 2048 evaluate_num = 32 episode_num = 24576 # The intial memory size memory_init_size = 256 # Train the agent every X steps train_every = 256 agents = [] with graph.as_default(): """ def __init__(self, sess, scope, action_num=4, state_shape=None, hidden_layers_sizes=None, reservoir_buffer_capacity=int(1e6), anticipatory_param=0.1, batch_size=256, train_every=1, rl_learning_rate=0.1, sl_learning_rate=0.005, min_buffer_size_to_learn=1000, q_replay_memory_size=30000, q_replay_memory_init_size=1000, q_update_target_estimator_every=1000, q_discount_factor=0.99, q_epsilon_start=0.06, q_epsilon_end=0, q_epsilon_decay_steps=int(1e6), q_batch_size=256, q_train_every=1, q_mlp_layers=None, evaluate_with='average_policy'): """ # Model1v1V3cp10good agents.append( NFSPAgent(sess, scope='nfsp' + str(0), action_num=env.action_num, state_shape=env.state_shape, hidden_layers_sizes=[512, 512], anticipatory_param=0.1, rl_learning_rate=0.01, sl_learning_rate=0.005, q_epsilon_start=.7, min_buffer_size_to_learn=memory_init_size, q_replay_memory_size=80000, q_replay_memory_init_size=memory_init_size, train_every=train_every + 44, q_train_every=train_every, q_mlp_layers=[512, 512])) agents.append( NFSPAgent(sess, scope='nfsp' + str(1), action_num=env.action_num, state_shape=env.state_shape, hidden_layers_sizes=[512, 512], anticipatory_param=0.1, rl_learning_rate=0.01, sl_learning_rate=0.005, q_epsilon_start=.7, q_replay_memory_size=80000, min_buffer_size_to_learn=memory_init_size, q_replay_memory_init_size=memory_init_size, train_every=train_every + 44, q_train_every=train_every, q_mlp_layers=[512, 512])) # check_point_path = os.path.join('models\\nolimit_holdem_nfsp\\iivan') print( '-------------------------------------------------------------------------------------' ) # print(check_point_path) #todays project :) # https://stackoverflow.com/questions/33758669/running-multiple-tensorflow-sessions-concurrently with sess.as_default(): with graph.as_default(): # saver = tf.train.Saver() # saver.restore(sess, tf.train.latest_checkpoint(check_point_path)) global_step = tf.Variable(0, name='global_step', trainable=False) random_agent = RandomAgent(action_num=eval_env2.action_num) env.set_agents(agents) eval_env.set_agents([agents[0], random_agent]) eval_env2.set_agents([random_agent, agents[1]]) # eval_env3.set_agents([agents[1], random_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): print(episode, end='\r') #print('oh') # First sample a policy for the episode for agent in agents: agent.sample_episode_policy() # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for i in range(env.player_num): for ts in trajectories[i]: agents[i].feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: logger.log( '\n\n\n---------------------------------------------------------------\nTournament ' + str(episode / evaluate_every)) # tournament(eval_env2, 6) # exploitability.exploitability(eval_env, agents[0], 500) res = tournament(env, evaluate_num) logger.log_performance(env.timestep, res[0]) res2 = tournament(eval_env, evaluate_num // 3) logger.log_performance(env.timestep, res2[0]) res3 = tournament(eval_env2, evaluate_num // 3) logger.log_performance(env.timestep, res3[0]) logger.log('' + str(episode_num) + " - " + str(episode) + '\n') logger.log( '\n\n----------------------------------------------------------------' ) if episode % (evaluate_every) == 0 and not episode == 0: save_dir = 'models/nolimit_holdem_nfsp/no_all_in/cp/' + str( episode // evaluate_every) if not os.path.exists(save_dir): os.makedirs(save_dir) saver = tf.train.Saver() saver.save(sess, os.path.join(save_dir, 'model')) logger.log( '\n\n\n---------------------------------------------------------------\nTournament ' + str(episode / evaluate_every)) res = tournament(eval_env, evaluate_num) logger.log_performance(env.timestep, res[0]) logger.log('' + str(episode_num) + " - " + str(episode)) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot('NFSP') # Save model save_dir = 'models/nolimit_holdem_nfsp/no_all_in' if not os.path.exists(save_dir): os.makedirs(save_dir) saver = tf.train.Saver() saver.save(sess, os.path.join(save_dir, 'model'))
train_every=train_every, q_train_every=train_every, q_batch_size=256, q_mlp_layers=[512, 1024, 2048, 1024, 512]) agents.append(agent) random_agent = RandomAgent(action_num=eval_env.action_num) env.set_agents(agents) eval_env.set_agents([agents[0], random_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curvefrom rlcard.agents.random_agent import RandomAgent logger = Logger(log_dir) for episode in range(episode_num): print("Episode: " + str(episode)) # First sample a policy for the episode for agent in agents: agent.sample_episode_policy() # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for i in range(env.player_num): for ts in trajectories[i]: agents[i].feed(ts)
from rlcard.utils import set_global_seed, tournament from SeedRanomAgent import SRandomAgent from rlcard.utils import Logger from eval_util import * # Set the iterations numbers and how frequently we evaluate/save plot evaluate_num = 100 emu_num = 50 log_dir = './experiments/doudizhu_mcts_vs_drqn_result/' best_model_path = './models/doudizhu_train_drqn_as_L_vs_random_and_eval_vs_random_best.npy' # Set a global seed # Init a Logger to plot the learning curve logger = Logger(log_dir) logger.log("MCTS-UCT VS DRQN") env = rlcard.make('doudizhu', config={'seed': 0, 'allow_step_back': True}) config = tf.ConfigProto() config.gpu_options.allow_growth = True os.environ["CUDA_VISIBLE_DEVICES"] = "0" sess = tf.Session(config=config) drqn_agent = DRQNAgent(sess, scope='doudizhu_drqn', action_num=env.action_num, memory_init_size=3000, memory_size=6000,
elif(role==2): agent_list = [random_agent, random_agent, agent] parameter_dict['always_peasant_2'] = True ''' # set agents in environment env.set_agents(agent_list) eval_env.set_agents(agent_list) env.set_landlord_score(landlord_score) eval_env.set_landlord_score(landlord_score) eval_env.set_eval_agent(role) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve log_dir = './results/doudizhu_dqn_result/' logger = Logger(log_dir) logger.log_parameters(parameter_dict) role_counter = role for episode in range(episode_num): # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0:
train_every=train_every, q_train_every=train_every, q_batch_size=256, q_mlp_layers=[512, 1024, 2048, 1024, 512]) agents.append(agent) random_agent = RandomAgent(action_num=eval_env.action_num) env.set_agents(agents) eval_env.set_agents([agents[0], random_agent, random_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curvefrom rlcard.agents.random_agent import RandomAgent logger = Logger(log_dir) for episode in range(episode_num): # First sample a policy for the episode for agent in agents: agent.sample_episode_policy() # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for i in range(env.player_num): for ts in trajectories[i]: agents[i].feed(ts)
episode_num = 100000 # The intial memory size memory_init_size = 100 memory_size = 6000 # Train the agent every X steps train_every = 1 # The paths for saving the logs and learning curves log_dir = './experiments/doudizhu_train_dqn_as_L_vs_random_and_eval_vs_random' loss_log = os.path.join(log_dir,'loss') L_WR_log = os.path.join(log_dir,'L_WR') P_WR_log = os.path.join(log_dir,'P_WR') logger = Logger(log_dir) loss_logger = Logger(loss_log) L_WR_logger = Logger(L_WR_log) P_WR_logger = Logger(P_WR_log) best_model_path = './models/doudizhu_train_dqn_as_L_vs_random_and_eval_vs_random_best' max_P_WR = 0.0 max_L_WR = 0.0 # Set a global seed set_global_seed(0) config = tf.ConfigProto() config.gpu_options.allow_growth = True os.environ["CUDA_VISIBLE_DEVICES"] = "2"
agent = DQNAgent(sess, scope='dqn', action_num=env.action_num, replay_memory_init_size=memory_init_size, train_every=train_every, state_shape=env.state_shape, mlp_layers=[512, 512]) random_agent = RandomAgent(action_num=eval_env.action_num) env.set_agents([agent, random_agent, random_agent]) eval_env.set_agents([agent, random_agent, random_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): # Generate data from the environment trajectories, _ = env.run(is_training=True) # Feed transitions into agent memory, and train the agent for ts in trajectories[0]: agent.feed(ts) # Evaluate the performance. Play with random agents. if episode % evaluate_every == 0: logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])
# env.set_agents([agents[0], rule_agent, agents[0], rule_agent]) # eval_env.set_agents([agents[0], rule_agent, agents[0], rule_agent]) # 4 dqn agent with single brain env.set_agents([agents[0], agents[0], agents[0], agents[0]]) eval_env.set_agents([agents[0], rule_agent, agents[0], rule_agent]) # 4 dqn agent with two brains # env.set_agents([agents[0], agents[1], agents[0], agents[1]]) # eval_env.set_agents([agents[0], rule_agent, agents[0], rule_agent]) # Initialize global variables sess.run(tf.global_variables_initializer()) # Init a Logger to plot the learning curve logger = Logger(save_dir) # Init moving average calculator m_avg = MovingAvg(100) payoff_avg = MovingAvg(100) # load the pre-trained model check_point_path = os.path.join(TRACTOR_PATH, 'tractor_dqn_430k') saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint(check_point_path)) graph = tf.get_default_graph() print('INFO: Loaded model from {}'.format(check_point_path)) t = trange(episode_num, desc='rl-loss:', leave=True) for episode in t: # Generate data from the environment