Example #1
0
def main():
    myPlayers = [
        RandomPlayer('A'),
        RandomPlayer('B'),
        RandomPlayer('C'),
        RandomPlayer('D')
    ]
    myGame = HeartsGame(myPlayers)

    myGame.playGame()
Example #2
0
 def make_player(name, num):
     if name=='ai':
         return AIPlayer(num)
     elif name=='random':
         return RandomPlayer(num)
     elif name=='human':
         return HumanPlayer(num)
Example #3
0
 def make_player(name, num):
     if name == 'ai':
         return AIPlayer(num)  # TODO Change this back to hand in just board
     elif name == 'random':
         return RandomPlayer(num)
     elif name == 'human':
         return HumanPlayer(num)
Example #4
0
def PruebaJuego():
    from Game import Game
    from Models.Player import RandomPlayer, HumanPlayer
    game = Game()

    game.newPlayer(HumanPlayer("Salvador"))  #input
    game.newPlayer(RandomPlayer())  #input

    game.StartGame()
Example #5
0
    def test_black_move_out(self):
        rand_black = RandomPlayer(Checker.BLACK)
        human_white = RandomPlayer(Checker.WHITE)

        game = Game(player_1=human_white,
                    player_2=rand_black,
                    create_protocol=False)
        game.board.board = game.board.clear_board()
        game.current_player = rand_black
        game.current_dice = Die(4, 6)
        game.board.place_at(19, Checker.BLACK, 1)
        game.board.place_at(22, Checker.BLACK, 1)

        game.board.place_at(18, Checker.WHITE, 4)

        game.run()

        moves = generate_moves_serial(self.black, Die(4, 6),
                                      game.board.get_view(True))
        print(moves)
Example #6
0
def minmax_vs_radom(cycles, min_max_depth, state_eval_factory, log_file):
    results = []
    for x in range(cycles):
        white = MinMaxPlayer("Biały", True, 6, 3,
                             min_max_depth, state_eval_factory, log_file, x)
        black = RandomPlayer("Czarny", False, 0, 3)
        board = Board(white, black)
        results.append(new_game(board))
    black_wins = np.count_nonzero(results)/cycles * 100
    white_wins = 100 - black_wins
    print("Bialy wygral: " + str(white_wins)+"%")
    print("Czarny wygral: " + str(black_wins)+"%")
Example #7
0
def __main__():
    print(
        "\nThe test script should be run in an environment with pickle installed. \nCurrently this test script tests the pre-trained q-table saved in \"picklefinal\". \nI\'m using q-learning to play a simplified version of Coup. \nThe instructions to the full game are here in video format: \nhttps://youtu.be/a8bY3zI9FL4 \n\nThe simplified version removes the Ambassador card and limits the game to two players. All other state transitions and game phases are in play. \n\nMy code implements the game and encodes private state information with the public state information in the form of \"certainty bins\" for each player based on the history of actions played since the start of the game, resetting every time a new game is called. It then uses q-learning to train about one hour (about 13000 games to adequately explore the state space) against a vaguely optimal-policy player. Here are my promising results at just an hour of training:\n"
    )

    print("\n...starting test run")
    game = TwoSimpCoup()

    qbuilder = BasicQPolicy(game)

    qpolicy = qbuilder.q_learn(3600)
    qbuilder.save_table('picklefinal')

    #qpolicy = qbuilder.q_learn(300)
    #qbuilder.save_table('pickletest2')

    #qpolicy = qbuilder.q_learn(10000)

    #qbuilder.load_table("picklefinal")
    #qpolicy = qbuilder.get_policy()

    print(
        "...successfully loaded pre-trained table or trained a table from scratch. Check qcoup.py for information on training a new agent."
    )

    p1 = PolicyPlayer(game, 0, qpolicy)
    p2 = PolicyPlayer(game, 1, optimal_policy)
    p3 = RandomPlayer(game, 1)
    num_games = 2000
    p1wins = 0.0
    for i in range(num_games):
        game.setup(p1, p2)
        result = game.play()
        if result == 0:
            p1wins += 1.0

    print("success rate against optimal in 2000 games: " +
          str(p1wins / num_games))
    p1wins = 0.0
    for i in range(num_games):
        game.setup(p1, p3)
        result = game.play()
        if result == 0:
            p1wins += 1.0
    print("success rate against random in 2000 games: " +
          str(p1wins / num_games) + "\n")

    print(
        "The results currently converge around 0.55 against the optimal policy and 0.58 against the random policy. The rate against the random policy is expected given the challenge mechanic and the likelihood of the agent developing a policy that includes a multitude of strategic bluffs."
    )
    print("\n For more documentation please refer to info.txt")
Example #8
0
def PruebaRandomPlayers():
    from Player import RandomPlayer
    from Dice import Dice
    dices = [Dice().roll() for i in range(5)]

    player = RandomPlayer()
    player.setScore("Ones", 6)
    player.setScore("Chance", 21)
    player.setScore("Fours", 16)
    player.setScore("Full House", 25)
    vmLower, vmUpper = player.getScoreCardVm(dices)
    vmExtended = dict(vmLower, **vmUpper)
    print("Dices:", dices, sep=" ")
    print("ExtendedVm:", player.selectBox(vmExtended), sep=" ")
Example #9
0
    def test(self, enemyPlayer=RandomPlayer('white'), games=100, debug=False):
        players = [ModelPlayer('black', self), enemyPlayer]

        winners = {'black': 0, 'white': 0}
        for i in range(games):
            game = Game()

            winner = game.play(players, debug=debug)
            winners[winner] += 1

            winners_total = sum(winners.values())
            print("[Game %d] %s (%s) vs %s (%s) %d:%d of %d games (%.2f%%)" % (i, \
                players[0].get_name(), players[0].player, \
                players[1].get_name(), players[1].player, \
                winners['black'], winners['white'], winners_total, \
                (winners['black'] / winners_total) * 100.0))
Example #10
0
    def load_random(self, a_map, n_colonies, min_size, max_size, human_players, n_enemies, possible_races):
        a_map.empty()
        n_players = len(human_players) + n_enemies
        theta = np.random.rand(1)[0] * 2 * np.pi
        center = Point(a_map.width / 2, a_map.height / 2)
        for human in human_players:
            position = Point(
                np.cos(theta) * (a_map.width / 2 - INITIAL_COLONIES_RADIUS),
                np.sin(theta) * (a_map.height / 2 - INITIAL_COLONIES_RADIUS),
            ) + center
            GraphicColony(a_map, RegularColony(human.race, INITIAL_PLAYER_COLONY_SIZE), position, INITIAL_COLONIES_RADIUS)
            theta += (2 * np.pi) / n_players
        other_players = human_players.copy()
        for i in range(n_enemies):
            position = Point(
                np.cos(theta) * (a_map.width / 2 - INITIAL_COLONIES_RADIUS),
                np.sin(theta) * (a_map.height / 2 - INITIAL_COLONIES_RADIUS),
            ) + center
            enemy = RandomPlayer(other_players)
            enemy.random_race(other_players, possible_races)
            other_players.append(enemy)
            GraphicColony(a_map, RegularColony(enemy.race, INITIAL_PLAYER_COLONY_SIZE), position, INITIAL_COLONIES_RADIUS)

            theta += (2 * np.pi) / n_players
        self.players = other_players
        for i in range(n_colonies):
            while True:
                radius = np.random.randint(min_size, max_size)
                position = Point(
                    np.random.randint(radius, a_map.width - radius),
                    np.random.randint(radius, a_map.height - radius)
                )
                if a_map.can_place_colony(radius, position):
                    GraphicColony(a_map, RegularColony(NullRace(), INITIAL_NULL_COLONY_SIZE), position, radius)
                    break
                else:
                    print("Can't place random colony. Generating a new one.")
        pass
Example #11
0
    def load_in_map(self, map_data, a_map, human_players):
        print("LOADING MAP")
        a_map.empty()
        players = human_players.copy()
        enemies = []
        scale_ratio = a_map.width / 1600
        for i in range(map_data[N_ENEMIES_FIELD]):
            new = RandomPlayer()
            print("ADDING " + str(new.name))
            new.random_race(players, self.driver.get_races_array())
            players.append(new)
            enemies.append(new)
        for i in map_data[PLAYER_COL_FIELD]:
            print("ADDING PLAYER COLONY")
            colony = i[0]
            colony.empty()
            colony.set_size(INITIAL_PLAYER_COLONY_SIZE)
            colony.set_race(players[i[1]].race)
            GraphicColony(a_map, colony, i[2].copy().scale(scale_ratio), i[3] * scale_ratio)
        for i in map_data[ENEMY_COL_FIELD]:
            print("ADDING ENEMY COLONY")
            colony = i[0]
            colony.empty()
            colony.set_size(INITIAL_PLAYER_COLONY_SIZE)
            colony.set_race(enemies[i[1]].race)
            GraphicColony(a_map, colony, i[2].copy().scale(scale_ratio), i[3] * scale_ratio)
        for i in map_data[EMPTY_COL_FIELD]:
            print("ADDING EMPTY COLONY")
            colony = i[0]
            colony.empty()
            colony.set_size(INITIAL_NULL_COLONY_SIZE)
            colony.set_race(NullRace())
            GraphicColony(a_map, colony, i[2].copy().scale(scale_ratio), i[3] * scale_ratio)

        self.players = players
        print("LOADED")
Example #12
0
 def setUp(self):
     self.player = RandomPlayer(player_num=PlayerTest.MOCK_PLAYER_NUM)
Example #13
0
from Moves import *
from Game import *
from Player import HumanPlayer, RandomPlayer
from AIPlayer import AIPlayer
import random
import sys

#player1 = HumanPlayer()
#player2 = AIPlayer()
#
#game = Game(player1, player2)
#winner = game.play()
#print "Winner: %s" % (winner,)

player1 = AIPlayer()
player2 = RandomPlayer()


def tournament(n, player1, player2):
    wons = [0, 0, 0]
    for i in xrange(n):
        game = Game(player1, player2, quiet=True)
        (winner, winColor) = game.play()
        wons[winColor.flavourNr] += 1
        if i % 10 == 9: sys.stderr.write('.')
    print(wons)
    return wons


#ai = AIPlayer()
#tournament(400, ai, RandomPlayer())
Example #14
0
from FancyDisplay import FancyDisplay
from Move import Move
import sys
import argparse

parser = argparse.ArgumentParser(description='Play a game of chess!')
parser.add_argument('playertypes', nargs=2, choices=['h', 'r', 'g', 'n', 'np', 'id', 'skirmish'], help='playertype of white and black')
args = parser.parse_args()

# create players
players = {}
for i, color in enumerate(('white', 'black')):
    if args.playertypes[i] == 'h':
        players[color] = HumanPlayer()
    elif args.playertypes[i] == 'r':
        players[color] = RandomPlayer()
    elif args.playertypes[i] == 'g':
        players[color] = GreedyPlayer()
    elif args.playertypes[i] == 'n':
        players[color] = NegamaxPlayer()
    elif args.playertypes[i] == 'skirmish':
        players[color] = SkirmishPlayer()
    elif args.playertypes[i] == 'np':
        players[color] = NegamaxPruningPlayer()
    elif args.playertypes[i] == 'id':
        players[color] = IterativeDeepeningPlayer()



game = Board()
fancy = FancyDisplay()
Example #15
0
File: Main.py Project: batherk/hex
elif RUNS[RUN] == "Match with loaded nets":
    print(
        "Mode: Match. Two players play against each other after loading saved nets."
    )
    game = HexGame()
    if len(NETS_MATCH) != 2:
        raise ValueError("There must be two nets to play a match")
    player1 = NetBotFromLoading(NETS_MATCH[0])
    player2 = NetBotFromLoading(NETS_MATCH[1])
    match = Match(game, player1, player2)
    match.play_games()
elif RUNS[RUN] == "Match vs random":
    print("Mode: Match. Loaded default against random player")
    game = HexGame()
    player1 = NetBotFromLoading(DEFAULT_NET)
    player2 = RandomPlayer()

    match = Match(game, player1, player2)
    match.play_games()
elif RUNS[RUN] == "Tournament - different net structures using replay buffer":
    print("Mode: Tournament. Several players play against each other.")
    game = HexGame()
    replay_buffer = ReplayBuffer()
    net1 = Dense(hidden_layers=[(100, relu)], optimizer=Adam)
    net2 = Dense(hidden_layers=[(100, sigmoid)], optimizer=Adam)
    net3 = Dense(hidden_layers=[(100, relu)], optimizer=SGD)
    net4 = Dense(hidden_layers=[(100, sigmoid)], optimizer=SGD)
    player1 = NetBotFromTraining("Adam relu", net1, replay_buffer)
    player2 = NetBotFromTraining("Adam sig", net2, replay_buffer)
    player3 = NetBotFromTraining("SGD relu", net3, replay_buffer)
    player4 = NetBotFromTraining("SGD sig", net4, replay_buffer)
Example #16
0
 def __init__(self):
     self.player1 = RandomPlayer(Checker.WHITE)
     self.player2 = RandomPlayer(Checker.BLACK)
     self.game = Game(self.player1, self.player2, create_protocol=False)
Example #17
0
 def setUp(self):
     self.players = [RandomPlayer(i) for i in range(4)]
     self.board = Board(self.players)
Example #18
0
def play_against_ai():
    player1 = AiPlayer(Checker.WHITE)
    player2 = RandomPlayer(Checker.BLACK)

    game = Game(player_1=player1, player_2=player2, create_protocol=True)
    game.run()
Example #19
0
 def setUp(self):
     # Create a board with 4 random type players, this is only for testing the board functionality
     self.players = [RandomPlayer(i) for i in range(4)]
     self.board = Board(self.players)