Exemple #1
0
def match(player1, player2, n_games, size):
    for _ in range(n_games):
        game = HexGame(size, player1, player2)
        game.play([''])
        if game.win[1] == 1:
            player1.rating, player2.rating = rate_1vs1(player1.rating,
                                                       player2.rating,
                                                       drawn=False)
        elif game.win[1] == 2:
            player2.rating, player1.rating = rate_1vs1(player2.rating,
                                                       player1.rating,
                                                       drawn=False)
    return player1.rating, player2.rating
def simulate_game(sizes, timeout, TT_on, iter, player1, player2):
    combinations = [(s, t) for s in sizes for t in timeout]
    df = pd.DataFrame(columns=['Board size', 'Timeout', 'Depth'])
    for comb in combinations:
        print(comb)
        # sys.stdout = open(os.devnull, 'w')
        s = comb[0]
        t = comb[1]
        for i in range(iter):
            p1 = player1(t, TT_on)
            game = HexGame(s, p1, player2)
            game.step([])
            df.loc[len(df)] = (s, t, p1.reached)
        # sys.stdout = sys.__stdout__
    return df
def simulate_game(sizes, depths, iter, player1, player2):
    combinations = [(s, d) for s in sizes for d in depths]
    df = pd.DataFrame(columns=['Board size', 'Depth', 'Time'])
    for comb in combinations:
        print(comb)
        sys.stdout = open(os.devnull, 'w')
        s = comb[0]
        d = comb[1]
        for i in range(iter):
            game = HexGame(s, player1(d), player2)
            start = time.time()
            game.step([])
            df.loc[len(df)] = (s, d, time.time() - start)
        sys.stdout = sys.__stdout__

    return df
Exemple #4
0
    # TOPP
    TOPP_ONLY = True
    ON_POLICY_DISPLAY = False
    TOPP_DELAY = 0.2

    if not TOPP_ONLY:

        player = 1

        RBUF = ReplayBuffer()
        ANET = NeuralNetActor(SIZE**2, NN_HIDDEN_LAYERS, NN_LEANRING_RATE,
                              NN_ACTIVATION, NN_OPTIMIZER, NN_LOSS_FUNCTION,
                              SAVE_FOLDER)

        # Stateful game used for actual game
        actual_game = HexGame(SIZE, player)
        visualizer = HexMapVisualizer(actual_game.board.cells.values(),
                                      True,
                                      SIZE,
                                      game_type="hex")

        mc_game = HexGame(SIZE, player)
        mc = MCTS(mc_game,
                  MC_EXPLORATION_CONSTANT,
                  a_net=ANET,
                  epsilon=EPSILON)

        for i in tqdm(range(EPISODES + 1)):

            # No action needed to reach initial state
            action = None
Exemple #5
0
from hexgame import HexGame
from hexplayer import HexPlayerHuman, HexPlayerRandom

if __name__ == '__main__':
    # test functionality
    game = HexGame(2, HexPlayerRandom(3), None)
    game.step(['tree'])

    # play a game
    game = HexGame(5, HexPlayerRandom(4), HexPlayerHuman())
    game.play()
Exemple #6
0
from hexgame import HexGame
from hexplayer import HexPlayerHuman, HexPlayerDijkstra
from hexboard import HexBoard
from hexcolour import HexColour

if __name__ == '__main__':
    # test functionality
    board = HexBoard(3)
    board.set_colour([(0, 1), (1, 1), (2, 1)], HexColour.RED)
    board.dijkstra(HexColour.RED, True)
    board.dijkstra(HexColour.BLUE, True)

    board = HexBoard(5)
    board.set_colour([(0, 3), (2, 2), (4, 1)], HexColour.RED)
    board.dijkstra(HexColour.RED, True)

    # play a game
    game = HexGame(5, HexPlayerDijkstra(4), HexPlayerHuman())
    game.play()
Exemple #7
0
from hexgame import HexGame
from hexplayer import HexPlayerHuman, HexPlayerEnhanced
from hexboard import HexBoard

if __name__ == '__main__':
    # test functionality
    board = HexBoard(2)
    for child, _ in board.children():
        print(child.board, hash(child))

    game = HexGame(2, HexPlayerEnhanced(10, True), None)
    game.step(['tree'])
    game = HexGame(2, HexPlayerEnhanced(10, False), None)
    game.step(['tree'])

    # play a game
    game = HexGame(5, HexPlayerEnhanced(10, True), HexPlayerHuman())
    game.play()
    def tournament(self):

        self.init_AI()

        for players in self.vs:
            print(
                f"Model {self.model_postfixes[players[0]]} vs. model {self.model_postfixes[players[1]]}"
            )
            starter = 1
            game = HexGame(self.size, starter)
            visualizer = HexMapVisualizer(game.board.cells.values(),
                                          True,
                                          self.size,
                                          game_type="hex")

            smarter_wins = 0

            for i in range(self.number_of_games):

                player = self.AI[players[i % 2]]
                strtr = player.name
                #print(f"Player {game.playing} (model {player.name}) is starting!")

                while not game.is_terminal_state():
                    #print(f"Player {player.name} moving")
                    if self.display:
                        visualizer.draw(game.get_state(), self.delay)

                    state = game.get_simple_state()
                    legal_moves = game.get_reversed_binary()

                    possible_states = game.generate_possible_child_states()

                    pred, idx = player.model.get_move(state, legal_moves)

                    if random() > (0 if self.display else 0.2):
                        best_index = idx
                    else:
                        best_index = np.random.choice(np.arange(len(pred)),
                                                      p=pred)

                    data = possible_states[best_index]

                    game.do_action(data["action"])

                    prev_player = player.name
                    player = self.change_turn(players, player)

                if self.display:
                    visualizer.draw(game.get_state(), self.delay * 3)
                #print(f"Model {strtr} started, and model {prev_player} (player {game.playing}) won!")

                smarter = self.model_postfixes[players[1]]
                if prev_player == smarter:
                    smarter_wins += 1

                #starter = 2 if starter == 1 else 1

                game.reset(starter, hard=True)

            print(
                f"Model {smarter} won {smarter_wins} out of {self.number_of_games} games ({smarter_wins/self.number_of_games*100}%)"
            )