def dqn_train_keras_rl(self, num_par_agents, model_name): """Implementation of kreras-rl deep q learing.""" from agents.agent_consider_equity import Player as EquityPlayer from agents.agent_keras_rl_dqn import Player as DQNPlayer from agents.agent_random import Player as RandomPlayer env_name = 'neuron_poker-v0' env = gym.make(env_name, initial_stacks=self.stack, funds_plot=self.funds_plot, render=self.render, use_cpp_montecarlo=self.use_cpp_montecarlo) np.random.seed(123) env.seed(123) env.add_player(EquityPlayer(name='equity/50/70', min_call_equity=.5, min_bet_equity=.7)) env.add_player(EquityPlayer(name='equity/20/30', min_call_equity=.2, min_bet_equity=.3)) env.add_player(RandomPlayer()) env.add_player(RandomPlayer()) env.add_player(RandomPlayer()) # shell is used for callback to keras rl env.add_player(PlayerShell(name='keras-rl', stack_size=self.stack)) env.reset() env_names = np.full((1, num_par_agents), model_name) dqn = DQNPlayer() with multiprocessing.Pool(num_par_agents) as pool: pool.apply_async(parallel_dqn_train(dqn, env, env_name))
def dqn_train_custom_q1(self): """Create 6 players, 4 of them equity based, 2 of them random""" from agents.agent_consider_equity import Player as EquityPlayer from agents.agent_custom_q1 import Player as Custom_Q1 from agents.agent_random import Player as RandomPlayer env_name = 'neuron_poker-v0' self.env = gym.make(env_name, initial_stacks=self.stack, render=self.render) # self.env.add_player(EquityPlayer(name='equity/50/50', min_call_equity=.5, min_bet_equity=-.5)) # self.env.add_player(EquityPlayer(name='equity/50/80', min_call_equity=.8, min_bet_equity=-.8)) # self.env.add_player(EquityPlayer(name='equity/70/70', min_call_equity=.7, min_bet_equity=-.7)) self.env.add_player(EquityPlayer(name='equity/20/30', min_call_equity=.2, min_bet_equity=-.3)) # self.env.add_player(RandomPlayer()) self.env.add_player(RandomPlayer()) self.env.add_player(RandomPlayer()) self.env.add_player(Custom_Q1(name='Deep_Q1')) for _ in range(self.num_episodes): self.env.reset() self.winner_in_episodes.append(self.env.winner_ix) league_table = pd.Series(self.winner_in_episodes).value_counts() best_player = league_table.index[0] print("League Table") print("============") print(league_table) print(f"Best Player: {best_player}")
def dqn_train_keras_rl(self, model_name): """Implementation of kreras-rl deep q learing.""" env_name = 'neuron_poker-v0' stack = 100 env = gym.make(env_name, initial_stacks=stack, funds_plot=self.funds_plot, render=self.render, use_cpp_montecarlo=self.use_cpp_montecarlo) np.random.seed(123) env.seed(123) env.add_player( EquityPlayer(name='equity/50/70', min_call_equity=.5, min_bet_equity=.7)) env.add_player( EquityPlayer(name='equity/20/30', min_call_equity=.2, min_bet_equity=.3)) env.add_player(RandomPlayer()) env.add_player(RandomPlayer()) env.add_player(RandomPlayer()) env.add_player(PlayerShell( name='keras-rl', stack_size=stack)) # shell is used for callback to keras rl env.reset() dqn = DQNPlayer() dqn.initiate_agent(env) dqn.train(env_name=model_name)
def equity_vs_random(self): """Create 6 players, 4 of them equity based, 2 of them random""" env_name = 'neuron_poker-v0' stack = 500 self.env = gym.make(env_name, initial_stacks=stack, render=self.render) self.env.add_player( EquityPlayer(name='equity/50/50', min_call_equity=.5, min_bet_equity=-.5)) self.env.add_player( EquityPlayer(name='equity/50/80', min_call_equity=.8, min_bet_equity=-.8)) self.env.add_player( EquityPlayer(name='equity/70/70', min_call_equity=.7, min_bet_equity=-.7)) self.env.add_player( EquityPlayer(name='equity/20/30', min_call_equity=.2, min_bet_equity=-.3)) self.env.add_player(RandomPlayer()) self.env.add_player(RandomPlayer()) for _ in range(self.num_episodes): self.env.reset() self.winner_in_episodes.append(self.env.winner_ix) league_table = pd.Series(self.winner_in_episodes).value_counts() best_player = league_table.index[0] print("League Table") print("============") print(league_table) print(f"Best Player: {best_player}")
def dqn_play_keras_rl(self): """Create 6 players, one of them a trained DQN""" env_name = 'neuron_poker-v0' stack = 500 self.env = gym.make(env_name, initial_stacks=stack, render=self.render) self.env.add_player( EquityPlayer(name='equity/50/50', min_call_equity=.5, min_bet_equity=.5)) self.env.add_player( EquityPlayer(name='equity/50/80', min_call_equity=.8, min_bet_equity=.8)) self.env.add_player( EquityPlayer(name='equity/70/70', min_call_equity=.7, min_bet_equity=.7)) self.env.add_player( EquityPlayer(name='equity/20/30', min_call_equity=.2, min_bet_equity=.3)) self.env.add_player(RandomPlayer()) self.env.add_player(PlayerShell(name='keras-rl', stack_size=stack)) self.env.reset() dqn = DQNPlayer(load_model='dqn1', env=self.env) dqn.play(nb_episodes=self.num_episodes, render=self.render)
def dqn_play(self): """Create 6 players, one of them a trained DQN""" env_name = 'neuron_poker-v0' stack = 500 num_of_plrs = 6 self.env = gym.make(env_name, num_of_players=num_of_plrs, initial_stacks=stack, render=self.render) self.env.add_player( EquityPlayer(name='equity/50/50', min_call_equity=.5, min_bet_equity=-.5)) self.env.add_player( EquityPlayer(name='equity/50/80', min_call_equity=.8, min_bet_equity=-.8)) self.env.add_player( EquityPlayer(name='equity/70/70', min_call_equity=.7, min_bet_equity=-.7)) self.env.add_player( EquityPlayer(name='equity/20/30', min_call_equity=.2, min_bet_equity=-.3)) self.env.add_player(RandomPlayer()) self.env.add_player(DQNPlayer(load_model='neuron_poker-v0')) for _ in range(self.num_episodes): self.env.reset()
def dqn_play_keras_rl(self, model_name): """Create 6 players, one of them a trained DQN""" from agents.agent_consider_equity import Player as EquityPlayer from agents.agent_keras_rl_dqn import Player as DQNPlayer from agents.agent_random import Player as RandomPlayer env_name = 'neuron_poker-v0' self.env = gym.make(env_name, initial_stacks=self.stack, render=self.render) self.env.add_player( EquityPlayer(name='equity/50/50', min_call_equity=.5, min_bet_equity=.5)) self.env.add_player( EquityPlayer(name='equity/50/80', min_call_equity=.8, min_bet_equity=.8)) self.env.add_player( EquityPlayer(name='equity/70/70', min_call_equity=.7, min_bet_equity=.7)) self.env.add_player( EquityPlayer(name='equity/20/30', min_call_equity=.2, min_bet_equity=.3)) self.env.add_player(RandomPlayer()) self.env.add_player(PlayerShell(name='keras-rl', stack_size=self.stack)) self.env.reset() dqn = DQNPlayer(load_model=model_name, env=self.env) dqn.play(nb_episodes=self.num_episodes, render=self.render)
def random_agents(self): """Create an environment with 6 random players""" num_of_plrs = 6 self.env = HoldemTable(num_of_players=num_of_plrs, initial_stacks=500) for _ in range(num_of_plrs): player = RandomPlayer(500) self.env.add_player(player) self.run_episode()
def equity_vs_random(self): """Create 6 players, 4 of them equity based, 2 of them random""" self.env = HoldemTable(num_of_players=5, initial_stacks=500) self.env.add_player(EquityPlayer(name='equity/50/50', min_call_equity=.5, min_bet_equity=-.5)) self.env.add_player(EquityPlayer(name='equity/50/80', min_call_equity=.8, min_bet_equity=-.8)) self.env.add_player(EquityPlayer(name='equity/70/70', min_call_equity=.7, min_bet_equity=-.7)) self.env.add_player(EquityPlayer(name='equity/20/30', min_call_equity=.2, min_bet_equity=-.3)) self.env.add_player(RandomPlayer()) self.env.add_player(RandomPlayer()) for _ in range(self.num_episodes): self.run_episode() self.winner_in_episodes.append(self.env.winner_ix) league_table = pd.Series(self.winner_in_episodes).value_counts() best_player = league_table.index[0] print(league_table) print(f"Best Player: {best_player}")
def random_agents(self): """Create an environment with 6 random players""" from agents.agent_random import Player as RandomPlayer env_name = 'neuron_poker-v0' num_of_plrs = 2 self.env = gym.make(env_name, initial_stacks=self.stack, render=self.render) for _ in range(num_of_plrs): player = RandomPlayer() self.env.add_player(player) self.env.reset()
def random_agents(self): """Create an environment with 6 random players""" env_name = 'neuron_poker-v0' stack = 500 num_of_plrs = 6 self.env = gym.make(env_name, num_of_players=num_of_plrs, initial_stacks=stack, render=self.render) for _ in range(num_of_plrs): player = RandomPlayer() self.env.add_player(player) self.env.reset()
def dqn_train(): """Implementation of kreras-rl deep q learing.""" env_name = 'neuron_poker-v0' stack = 100 env = gym.make(env_name, num_of_players=2, initial_stacks=stack, funds_plot=False) np.random.seed(123) env.seed(123) env.add_player(EquityPlayer(name='equity/50/70', min_call_equity=.5, min_bet_equity=.7)) env.add_player(EquityPlayer(name='equity/20/30', min_call_equity=.2, min_bet_equity=.3)) env.add_player(RandomPlayer()) env.add_player(RandomPlayer()) env.add_player(RandomPlayer()) env.add_player(PlayerShell(name='keras-rl', stack_size=stack)) # shell is used for callback to keras rl env.reset() dqn = DQNPlayer() dqn.initiate_agent(env) dqn.train(env_name='dqn1')
def dqn_train_keras_rl(self): """Implementation of kreras-rl deep q learing.""" env_name = 'neuron_poker-v0' stack = 2000 env = gym.make(env_name, initial_stacks=stack, funds_plot=self.funds_plot, render=self.render, use_cpp_montecarlo=self.use_cpp_montecarlo) np.random.seed(123) env.seed(123) # env.add_player(EquityPlayer(name='equity/50/70', min_call_equity=.5, min_bet_equity=.7)) # env.add_player(RandomPlayer()) # env.add_player(RandomPlayer()) # env.add_player(RandomPlayer()) # env.add_player(PlayerShell(name='keras-rl-1', stack_size=stack), range=0.9) # shell is used for callback to keras rl # env.add_player(PlayerShell(name='keras-rl-2', stack_size=stack), range=0.9) # shell is used for callback to keras rl # env.add_player(PlayerShell(name='keras-rl-3', stack_size=stack), range=0.9) # shell is used for callback to keras rl # env.add_player(PlayerShell(name='keras-rl-4', stack_size=stack), range=0.9) # shell is used for callback to keras rl # env.add_player(PlayerShell(name='keras-rl-5', stack_size=stack), range=0.9) # shell is used for callback to keras rl # env.add_player(PlayerShell(name='keras-rl-6', stack_size=stack), range=0.9) # shell is used for callback to keras rl # env.add_player(PlayerShell(name='keras-rl-7', stack_size=stack), range=0.9) # shell is used for callback to keras rl env.add_player(PlayerShell(name='LJY', stack_size=stack, range=0.33)) # shell is used for callback to keras rl # dqn = DQNPlayer(name='DQN-1',stack_size=2000, range=0.9, env=env , load_model=None) # env.add_player(dqn) env.add_player(RandomPlayer(name='Random-1',range=1)) # env.add_player(RandomPlayer(name='Random-2',range=1)) # env.add_player(RandomPlayer(name='Random-3',range=1)) # env.add_player(RandomPlayer(name='Random-4',range=1)) # env.add_player(RandomPlayer(name='Random-5',range=1)) # env.add_player(RandomPlayer(name='Random-6',range=1)) # env.add_player(RandomPlayer(name='Random-7',range=1)) # env.add_player(DQNPlayer(name='DQN-2',stack_size=2000, range=0.9, env=env , load_model=None)) # env.add_player(DQNPlayer(name='DQN-3',stack_size=2000, range=0.9, env=env , load_model=None)) # env.add_player(DQNPlayer(name='DQN-4',stack_size=2000, range=0.9, env=env , load_model=None)) # env.add_player(DQNPlayer(name='DQN-5',stack_size=2000, range=0.9, env=env , load_model=None)) # env.add_player(DQNPlayer(name='DQN-6',stack_size=2000, range=0.9, env=env , load_model=None)) # env.add_player(DQNPlayer(name='DQN-7',stack_size=2000, range=0.9, env=env , load_model=None)) # env.add_player(DQNPlayer(name='DQN-8',stack_size=2000, range=0.9, env=env , load_model=None)) env.reset() # print(env.players[0].range) # print(env.players[1].range) # print(env.players[2].range) # print(env.players[3].range) # print(env.players[4].range) # print(env.players[5].range) dqn = DQNPlayer() # dqn.initiate_agent(env,load_model='3dqn_vs_3rd') dqn.initiate_agent(env) dqn.train(ckpt_name='LJY')
def key_press_agents(self): """Create an environment with 6 key press agents""" env_name = 'neuron_poker-v0' stack = 2000 # num_of_plrs = 6 env = gym.make(env_name, initial_stacks=stack, render=self.render) player = KeyPressAgent(name="LJY",range=0.3) env.add_player(player) # self.env.add_player(EquityPlayer(name='equity/50/50', min_call_equity=.5, min_bet_equity=-.5)) env.add_player(RandomPlayer(name='Random-1',range=1)) # env.add_player(RandomPlayer(name='Random-2',range=1)) # env.add_player(RandomPlayer(name='Random-3',range=1)) # env.add_player(RandomPlayer(name='Random-4',range=1)) # env.add_player(RandomPlayer(name='Random-5',range=1)) # self.env.add_player(PlayerShell(name='dqn001', stack_size=stack)) # self.env.add_player(PlayerShell(name='dqn002', stack_size=stack)) # self.env.add_player(PlayerShell(name='dqn003', stack_size=stack)) # self.env.add_player(PlayerShell(name='dqn004', stack_size=stack)) # self.env.add_player(PlayerShell(name='dqn005', stack_size=stack)) env.reset()
def dqn_agent(self, mode): my_import = __import__('agents.'+self.agent, fromlist=['Player']) player = getattr(my_import, 'Player') env_path = 'env' if self.env_name != 'v0': env_path += '_' + self.env_name shell_import = __import__( 'gym_env.' + env_path, fromlist=['PlayerShell']) PlayerShell_import = getattr(shell_import, 'PlayerShell') env_name = 'neuron_poker-' + self.env_name self.env = gym.make(env_name, initial_stacks=self.stack, funds_plot=self.funds_plot, render=self.render, use_cpp_montecarlo=self.use_cpp_montecarlo) np.random.seed(42) self.env.seed(42) count = 1 for player_type in self.players: if player_type == 0: self.env.add_player(RandomPlayer(env_path)) elif type(player_type) == tuple and len(player_type) == 2: self.env.add_player(EquityPlayer(name='equity_' + str(count), env=env_path, min_call_equity=player_type[0], min_bet_equity=player_type[1])) count += 1 self.env.add_player(PlayerShell_import( name='keras-rl', stack_size=self.stack)) self.env.reset() if mode == 'train': dqn = player() dqn.initiate_agent(self.env) dqn.train(env_name=self.model_name) elif mode == 'play': dqn = player(load_model=self.model_name, env=self.env) dqn.play(nb_episodes=self.num_episodes, render=self.render)
def ai_vs_random(self, ai_num): """ Created by Xue Hongyan Create an environment with provided number of ai players and random players """ env_name = 'neuron_poker-v0' stack = 500 num_of_plrs = 6 self.env = gym.make(env_name, initial_stacks=stack, render=self.render) player_pool = [] for _ in range(ai_num): player = Custom_AI(env=self.env) player_pool.append(player) for _ in range(num_of_plrs - ai_num): player = RandomPlayer() player_pool.append(player) random.shuffle(player_pool) for player in player_pool: self.env.add_player(player) self.env.reset()
def uto_plays(self): """Create an environment with 6 random players""" env_name = 'neuron_poker-v0' stack = 500 num_of_plrs = 6 self.env = gym.make(env_name, num_of_players=num_of_plrs, initial_stacks=stack, render=self.render) self.env.add_player(RandomPlayer()) self.env.add_player( EquityPlayer(name='equity/50/80', min_call_equity=.8, min_bet_equity=.4)) self.env.add_player( EquityPlayer(name='equity/70/70', min_call_equity=.7, min_bet_equity=.5)) self.env.add_player( EquityPlayer(name='equity/20/30', min_call_equity=.2, min_bet_equity=.6)) self.env.add_player(UtoPlayer(name='Uto1 1')) self.env.add_player( UtoPlayer(name='Uto1 2', min_call_equity=0.46, min_bet_equity=0.56, min_call_equity_allin=0.7)) for _ in range(self.num_episodes): self.env.reset() self.winner_in_episodes.append(self.env.winner_ix) league_table = pd.Series(self.winner_in_episodes).value_counts() best_player = league_table.index[0] print(league_table) print(f"Best Player: {best_player}")
def deep_q_learning(): """Implementation of kreras-rl deep q learing.""" env_name = 'neuron_poker-v0' stack = 100 env = gym.make(env_name, num_of_players=5, initial_stacks=stack) np.random.seed(123) env.seed(123) env.add_player( EquityPlayer(name='equity/50/50', min_call_equity=.5, min_bet_equity=-.5)) env.add_player( EquityPlayer(name='equity/50/80', min_call_equity=.8, min_bet_equity=-.8)) env.add_player( EquityPlayer(name='equity/70/70', min_call_equity=.7, min_bet_equity=-.7)) env.add_player( EquityPlayer(name='equity/20/30', min_call_equity=.2, min_bet_equity=-.3)) env.add_player(RandomPlayer()) env.add_player(PlayerShell( name='keras-rl', stack_size=stack)) # shell is used for callback to keras rl env.reset() nb_actions = len(env.action_space) # Next, we build a very simple model. from keras import Sequential from keras.optimizers import Adam from keras.layers import Dense, Dropout from rl.memory import SequentialMemory from rl.agents import DQNAgent from rl.policy import BoltzmannQPolicy model = Sequential() model.add( Dense(64, activation='relu', input_shape=env.observation_space)) model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(nb_actions, activation='linear')) print(model.summary()) # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and # even the metrics! memory = SequentialMemory(limit=50000, window_length=1) policy = BoltzmannQPolicy() dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10, target_model_update=1e-2, policy=policy) dqn.compile(Adam(lr=1e-3), metrics=['mae']) # Okay, now it's time to learn something! We visualize the training here for show, but this # slows down training quite a lot. You can always safely abort the training prematurely using # Ctrl + C. dqn.fit(env, nb_steps=50000, visualize=True, verbose=2) # After training is done, we save the final weights. dqn.save_weights('dqn_{}_weights.h5f'.format(env_name), overwrite=True) # Finally, evaluate our algorithm for 5 episodes. dqn.test(env, nb_episodes=5, visualize=True)