コード例 #1
0
from deuces import Evaluator
from Poker_Bot import Game_State
from Poker_Bot import Policy
from Poker_Bot import State_Action_Value_Function
from predetermined_policy import predetermined_policy
import os
import pickle

#######################
# LIST BOT NAMES HERE #
#######################
sh_offset = 0  # Offset snapshot numbering
bot_name_1 = "bot_predet_1"
bot_name_2 = "bot_predet_2"

p1 = Policy(predetermined_policy())
print("New predetermined policy function created for " + bot_name_1)
p2 = Policy(predetermined_policy())
print("New predetermined policy function created for " + bot_name_2)
q1 = State_Action_Value_Function.create_uniform()
print("New zero SAVF created for " + bot_name_1)
q2 = State_Action_Value_Function.create_uniform()
print("New zero SAVF created for " + bot_name_2)

# Main training loop
# The program will conduct no_rounds training rounds, and then check if the file "quit" is present in
# the same folder as this script
# Note that bot 1 is treated as "player" and bot 2 is treated as the opponent
gs = Game_State()
learning_rate = 0.0001
#epsilon = 0.20
コード例 #2
0
# Parameters
bot_name = "bot_46_ep2_lr001"  # The bot that you want to view
obj_path = "snaps/"  # The folder that your bot is in
tot_snap = 36  # The total number of bot snapshots

# Initialize matrices to stor2 information
prob_bet = np.zeros((tot_snap, 10))
prob_fold = np.zeros((tot_snap, 10))
savf_bet = np.zeros((tot_snap, 10))
savf_fold = np.zeros((tot_snap, 10))

count = 0
for i in range(0, tot_snap):

    # Load the policies
    p = Policy.create_from_file(obj_path + bot_name + '_sh' +
                                str(100000 * (i + 1)) + policy_suffix)
    q = State_Action_Value_Function.create_from_file(obj_path + bot_name +
                                                     '_sh' + str(100000 *
                                                                 (i + 1)) +
                                                     savf_suffix)

    # Convert the policies to dictionaries
    policy = p.policy_function
    savf = q.state_action_value_function

    # The cards that we are interested in tracking
    # Ace, Ace
    prob_bet[i][0] = policy[(0, 12, 12, (), 'B')]
    prob_fold[i][0] = policy[(0, 12, 12, ('B', ), 'F')]
    savf_bet[i][0] = savf[(0, 12, 12, (), 'B')]
    savf_fold[i][0] = savf[(0, 12, 12, ('B', ), 'F')]
コード例 #3
0
from deuces import Card
from deuces import Deck
import numpy as np

gs = Game_State()
deck = Deck()
gs.set_player_cards(deck.draw(2))
gs.set_opponent_cards(deck.draw(2))
gs.set_flop(deck.draw(3))
gs.append_action('B')
gs.append_action('B')

gs.print_player_cards()
gs.print_opponent_cards()
gs.print_player_hand()
gs.print_opponent_hand()
print(gs.is_winner())
print gs.get_current_state_id()

# Used to test Policy class

from Poker_Bot import Policy
p = Policy.create_uniform()
opt_state_action = (1, 2, 4, ('Ch', 'B'), 'B')
p.update(opt_state_action)

# Used to test State_Action_Value_Function class

from Poker_Bot import State_Action_Value_Function
savf = State_Action_Value_Function.create_uniform(init_value=0.0)
コード例 #4
0
sh_offset = 0  # Offset snapshot numbering

bot_name_1 = "bot_10"  # The bot that we are training
bot_name_2 = "bot_11"  # The bot we are not training

#############################################################
# Decide if we want to train bot_name_1
trainbot1 = True
#############################################################

# Load/create first bot
if os.path.isfile(obj_path + bot_name_1 +
                  policy_suffix) and os.path.isfile(obj_path + bot_name_1 +
                                                    savf_suffix):
    print("Loading existing data for " + bot_name_1 + "...")
    p1 = Policy.create_from_file(bot_name_1 + policy_suffix)
    print("Loaded " + bot_name_1 + " policy file")
    q1 = State_Action_Value_Function.create_from_file(bot_name_1 + savf_suffix)
    print("Loaded " + bot_name_1 + " state action value function file")
else:
    print("Policy and state action value function files don't exist for " +
          bot_name_1 + ". Create new ones...")
    p1 = Policy.create_uniform()
    print("New policy function created for " + bot_name_1)
    q1 = State_Action_Value_Function.create_uniform()
    print("New policy function created for " + bot_name_1)

# Load/create second bot
if os.path.isfile(obj_path + bot_name_2 +
                  policy_suffix) and os.path.isfile(obj_path + bot_name_2 +
                                                    savf_suffix):
コード例 #5
0
learning_rate = 0.05
epsilon = 0.05
is_button = 1  # If AI is button

policy_suffix = ".policy"
savf_suffix = ".savf"
obj_path = "obj/"

bot_name = "bot_1"  # Name of bot to play against. Will load existing bot or create new one.

# Load/create bot
if os.path.isfile(obj_path + bot_name +
                  policy_suffix) and os.path.isfile(obj_path + bot_name +
                                                    savf_suffix):
    print("Loading existing data for " + bot_name + "...")
    p = Policy.create_from_file(bot_name + policy_suffix)
    print("Loaded " + bot_name + " policy file")
    q = State_Action_Value_Function.create_from_file(bot_name + savf_suffix)
    print("Loaded " + bot_name + " state action value function file")
else:
    print("Policy and state action value function files don't exist for " +
          bot_name + ". Create new ones...")
    p = Policy.create_uniform()
    print("New policy function created for " + bot_name)
    q = State_Action_Value_Function.create_uniform()
    print("New policy function created for " + bot_name)

while True:
    # Start new round and deal cards
    print("Starting round " + str(round_count) + "...")
    gs.clear_state()