def main(filename=None):
    MATCHES = 20
    HEURISTICS = [("Null", null_score),
                  ("Open", open_move_score),
                  ("Improved", improved_score)]
    MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}
    
    mm_agents = [Agent(CustomPlayer(score_fn=h, **MM_ARGS),
                       "MM_" + name) for name, h in HEURISTICS]    
    ab_agents = [Agent(CustomPlayer(score_fn=h, **AB_ARGS),
                       "AB_" + name) for name, h in HEURISTICS]
    random_agents = [Agent(RandomPlayer(), "Random")]
    alphas = [x * 0.01 for x in range(0, 120, 20)]
    test_agents = [Agent(CustomPlayer(score_fn=moves_combined(a), **CUSTOM_ARGS),
                         "Alpha"+str(a))
                   for a in alphas]
    results = []
    print(datetime.now())
    for agentUT in test_agents:
        print(agentUT.name)
        #agents = mm_agents + random_agents + ab_agents + [agentUT]
        agents = ab_agents + [agentUT]
        results.append(play_round_2(agents, MATCHES))
    print(datetime.now())
    if filename: # save the result into a text
        with open(filename, 'w') as f:
            lines = [str(alphas[i])+'\t'+str(results[i])
                     for i in range(len(alphas))]
            for line in lines:
                print(line, file=f)
Exemple #2
0
def main():

    HEURISTICS = [("Null", null_score),
                  ("Open", open_move_score),
                  ("Improved", improved_score)]
    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
    MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}

    # Create a collection of CPU agents using fixed-depth minimax or alpha beta
    # search, or random selection.  The agent names encode the search method
    # (MM=minimax, AB=alpha-beta) and the heuristic function (Null=null_score,
    # Open=open_move_score, Improved=improved_score). For example, MM_Open is
    # an agent using minimax search with the open moves heuristic.
    mm_agents = [Agent(CustomPlayer(score_fn=h, **MM_ARGS),
                       "MM_" + name) for name, h in HEURISTICS]
    ab_agents = [Agent(CustomPlayer(score_fn=h, **AB_ARGS),
                       "AB_" + name) for name, h in HEURISTICS]
    random_agents = [Agent(RandomPlayer(), "Random")]

    # ID_Improved agent is used for comparison to the performance of the
    # submitted agent for calibration on the performance across different
    # systems; i.e., the performance of the student agent is considered
    # relative to the performance of the ID_Improved agent to account for
    # faster or slower computers.
    test_agents = [Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS), "ID_Improved"),
                   Agent(CustomPlayer(score_fn=custom_score, **CUSTOM_ARGS), "Student")]
    # test_agents = [Agent(CustomPlayer(score_fn=custom_score, **CUSTOM_ARGS), "Student")]
    
    print(DESCRIPTION)

    win_ratio = play_round(test_agents, NUM_MATCHES)
    print("\n\nResults:")
    print("----------")
    print("{!s:<15}{:>10.2f}%".format(test_agents[1].name, win_ratio))
Exemple #3
0
def main():
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}

    score_1_agent = Agent(CustomPlayer(score_fn=custom_score_1, **CUSTOM_ARGS), "Student1")
    score_2_agent = Agent(CustomPlayer(score_fn=custom_score_2, **CUSTOM_ARGS), "Student2")
    score_3_agent = Agent(CustomPlayer(score_fn=custom_score_3, **CUSTOM_ARGS), "Student3")
    score_4_agent = Agent(CustomPlayer(score_fn=custom_score_4, **CUSTOM_ARGS), "Student4")

    test_agents = [for _ in range(10)]

    print(DESCRIPTION)
    for agentUT in test_agents:
        print("")
        print("*************************")
        print("{:^25}".format("Evaluating: " + agentUT.name))
        print("*************************")

        agents = test_agents.copy()
        agents.remove(agentUT)
        agents = agents + [agentUT]
        win_ratio = play_round(agents, NUM_MATCHES)

        print("\n\nResults:")
        print("----------")
        print("{!s:<15}{:>10.2f}%".format(agentUT.name, win_ratio))
Exemple #4
0
def mymain():
    visualizing = False
    #     mm_null_reg_agent = Agent(CustomPlayer(score_fn=null_score, method='minimax', search_depth=3, iterative=False), "mm_null_reg_agent")
    #     mm_open_reg_agent = Agent(CustomPlayer(score_fn=open_move_score, method='minimax', search_depth=3, iterative=False), "mm_open_reg_agent")
    #     mm_impr_reg_agent = Agent(CustomPlayer(score_fn=improved_score, method='minimax', search_depth=3, iterative=False), "mm_impr_reg_agent")
    #     mm_cstm_reg_agent = Agent(CustomPlayer(score_fn=custom_score, method='minimax', search_depth=3, iterative=False), "mm_cstm_reg_agent")
    #     ab_null_reg_agent = Agent(CustomPlayer(score_fn=null_score, method='alphabeta', search_depth=5, iterative=False), "ab_null_reg_agent")
    #     ab_open_reg_agent = Agent(CustomPlayer(score_fn=open_move_score, method='alphabeta', search_depth=3, iterative=False), "ab_open_reg_agent")
    #     ab_impr_reg_agent = Agent(CustomPlayer(score_fn=improved_score, method='alphabeta', search_depth=3, iterative=False), "ab_impr_reg_agent")
    #     ab_cstm_reg_agent = Agent(CustomPlayer(score_fn=custom_score, method='alphabeta', search_depth=3, iterative=False), "ab_cstm_reg_agent")
    #     ab_null_id_agent = Agent(CustomPlayer(score_fn=null_score, method='alphabeta', search_depth=3, iterative=True), "ab_null_id_agent")
    #     ab_open_id_agent = Agent(CustomPlayer(score_fn=open_move_score, method='alphabeta', search_depth=3, iterative=True), "ab_open_id_agent")
    #     ab_impr_id_agent = Agent(CustomPlayer(score_fn=improved_score, method='alphabeta', search_depth=3, iterative=True), "ab_impr_id_agent")
    #     ab_cstm_id_agent = Agent(CustomPlayer(score_fn=custom_score, method='alphabeta', search_depth=3, iterative=True), "ab_cstm_id_agent")

    player1 = Agent(
        CustomPlayer(score_fn=net_mobility_score,
                     method='alphabeta',
                     search_depth=3,
                     iterative=True), "Custom")
    player2 = Agent(
        CustomPlayer(score_fn=improved_score,
                     method='alphabeta',
                     search_depth=3,
                     iterative=True), "Improved")

    # Play a few games:
    for i in range(0, 5):
        game1 = Board(player1.player, player2.player)
        game2 = Board(player2.player, player1.player)

        # Initial location:
        move = random.choice(game1.get_legal_moves())
        game1.apply_move(move)
        game2.apply_move(move)
        move = random.choice(game1.get_legal_moves())
        game1.apply_move(move)
        game2.apply_move(move)

        winner1, moves1, reason1 = game1.play()
        winner1 = 1 if player1.player == winner1 else 2
        winner2, moves2, reason2 = game2.play()
        winner2 = 1 if player1.player == winner2 else 2
        print("Player {} won game 1. Reason: {}".format(winner1, reason1))
        print("Player {} won game 2. Reason: {}".format(winner2, reason2))

        if visualizing:
            print("Replaying moves for game 1...")
            print(moves1)
            visualizer = Visualizer(player1.name, player2.name, moves1)
            visualizer.play()
            visualizer.quit()
            print("Replaying moves for game 2...")
            print(moves2)
            visualizer = Visualizer(player2.name, player1.name, moves2)
            visualizer.play()
            visualizer.quit()

        print("Done")
Exemple #5
0
def main():

    HEURISTICS = [("Null", null_score), ("Open", open_move_score),
                  ("Improved", improved_score)]

    MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
    CUSTOM_ARGS = {"search_depth": 3, "method": 'alphabeta', 'iterative': True}

    # Create a collection of CPU agents using fixed-depth minimax or alpha beta
    # search, or random selection.  The agent names encode the search method
    # (MM=minimax, AB=alpha-beta) and the heuristic function (Null=null_score,
    # Open=open_move_score, Improved=improved_score). For example, MM_Open is
    # an agent using minimax search with the open moves heuristic.
    mm_agents = [
        Agent(CustomPlayer(score_fn=h, **MM_ARGS), "MM/3_" + name)
        for name, h in HEURISTICS
    ]
    ab_agents = [
        Agent(CustomPlayer(score_fn=h, **AB_ARGS), "AB/5_" + name)
        for name, h in HEURISTICS
    ]
    random_agents = [Agent(RandomPlayer(), "Random")]

    # ID_Improved agent is used for comparison to the performance of the
    # submitted agent for calibration on the performance across different
    # systems; i.e., the performance of the student agent is considered
    # relative to the performance of the ID_Improved agent to account for
    # faster or slower computers.
    test_agents = [
        Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS),
              "ID_improved_score"),
        #                     Agent(CustomPlayer(score_fn=net_advantage_score, **CUSTOM_ARGS), "ID_net_advantage_score"),
        #                     Agent(CustomPlayer(score_fn=net_mobility_score, **CUSTOM_ARGS), "ID_net_mobility_score"),
        #                     Agent(CustomPlayer(score_fn=offensive_score, **CUSTOM_ARGS), "ID_offensive_score"),
        #                     Agent(CustomPlayer(score_fn=accessibility_score, **CUSTOM_ARGS), "ID_accessibility_score"),
        #                     Agent(CustomPlayer(score_fn=proximity_score, **CUSTOM_ARGS), "ID_proximity_score"),
        #                     Agent(CustomPlayer(score_fn=combo_nearcenter_avoidopponent_score, **CUSTOM_ARGS), "ID_combo_nearcenter_avoidopponent_score"),
        #                     Agent(CustomPlayer(score_fn=combo_offensive_nearopponent_netmobility_score, **CUSTOM_ARGS), "ID_combo_offensive_nearopponent_netmobility_score"),
        #                     Agent(CustomPlayer(score_fn=combo_netadvantage_nearopponent_score, **CUSTOM_ARGS), "ID_combo_netadvantage_nearopponent_score"),
        Agent(CustomPlayer(score_fn=custom_score, **CUSTOM_ARGS),
              "ID_custom_score")
    ]

    print(DESCRIPTION)
    for agentUT in test_agents:
        print("")
        print("*************************")
        print("{:^25}".format("Evaluating: " + agentUT.name))
        print("*************************")

        agents = random_agents + mm_agents + ab_agents + [agentUT]
        win_ratio = play_round(agents, NUM_MATCHES)

        print("\n\nResults:")
        print("----------")
        print("{!s:<15}{:>10.2f}%".format(agentUT.name, win_ratio))
def main():

    EVAL_FUNCS = [("Null", NullEval), ("Open", OpenMoveEval),
                  ("Improved", ImprovedEval)]
    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
    MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}
    RATINGS = {
        "MM_Null": 1350,
        "MM_Open": 1575,
        "MM_Improved": 1620,
        "AB_Null": 1510,
        "AB_Open": 1640,
        "AB_Improved": 1660,
        "Random": 1150,
        "ID_Improved": 1500,
        "Student": 1500
    }

    mm_agents = [
        Agent(CustomPlayer(eval_fn=fn(), **MM_ARGS), "MM_" + name)
        for name, fn in EVAL_FUNCS
    ]
    ab_agents = [
        Agent(CustomPlayer(eval_fn=fn(), **AB_ARGS), "AB_" + name)
        for name, fn in EVAL_FUNCS
    ]
    random_agents = [Agent(RandomPlayer(), "Random")]
    test_agents = [
        Agent(CustomPlayer(eval_fn=ImprovedEval(), **CUSTOM_ARGS),
              "ID_Improved"),
        Agent(CustomPlayer(eval_fn=CustomEval(), **CUSTOM_ARGS), "Student")
    ]

    for agentUT in test_agents:
        print ""
        print "*************************"
        print "{:^25}".format("Evaluating " + agentUT.name)
        print "*************************"

        agents = mm_agents + ab_agents + random_agents + [agentUT]
        ratings = play_round(
            agents, dict([(a.player, RATINGS[a.name]) for a in agents]),
            NUM_MATCHES)

        ranking = sorted([(a, ratings[a.player]) for a in agents],
                         key=lambda x: x[1])
        print "\n\nResults:"
        print "----------"
        print "{!s:<15}{!s:>10}".format("Name", "Rating")
        print "{!s:<15}{!s:>10}".format("---", "---")
        print "\n".join(
            ["{!s:<15}{:>10.2f}".format(a.name, r) for a, r in ranking])
def test_end_game():
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}
    random_agents = [Agent(RandomPlayer(), "Random")]
    test_agents = [
        Agent(CustomPlayer(score_fn=custom_score, **CUSTOM_ARGS), "Student")
    ]

    player1 = random_agents[0].player
    player2 = test_agents[0].player
    game = Board(player1, player2, width=3, height=3)
    game.apply_move((0, 0))
    game.apply_move((2, 0))
    game.apply_move((1, 2))
    winner, _, termination = game.play(time_limit=TIME_LIMIT)
    print(winner)
def main():
    for tour_num in range(NUM_TOURNAMENTS):
        HEURISTICS = [("Null", null_score),
                      ("Open", open_move_score),
                      ("Improved", improved_score)]
        AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
        MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
        CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}

        mm_agents = [Agent(CustomPlayer(score_fn=h, **MM_ARGS),
                           "MM_" + name) for name, h in HEURISTICS]
        ab_agents = [Agent(CustomPlayer(score_fn=h, **AB_ARGS),
                           "AB_" + name) for name, h in HEURISTICS]
        random_agents = [Agent(RandomPlayer(), "Random")]

        test_agents = [Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS), "ID_Improved"),
                       Agent(CustomPlayer(score_fn=custom_score, **CUSTOM_ARGS), "Student")]

        for agentUT in test_agents:
            agents = random_agents + mm_agents + ab_agents + [agentUT]
            win_ratio = play_round(agents, NUM_MATCHES)
            print("{:>10.2f}".format(win_ratio))

        print("-----")
Exemple #9
0
 def test_opening_move(self):
     """ Test the opening moves. It appears that none of the tests nor the tournament tests this, so
     I added some tests here"""
     player1 = CustomPlayer()
     player2 = CustomPlayer()
     board = isolation.Board(player1, player2, 5, 5)
     move1 = player1.get_move(board, board.get_legal_moves(player1), 1000)
     self.assertTrue(move1 == (3,3))
     board.apply_move(move1)
     move2 = player2.get_move(board, board.get_legal_moves(player2), 1000)
     self.assertTrue(move2 == (3,4))
def test_minimax():
    from isolation import Board
    from game_agent import CustomPlayer
    player1 = CustomPlayer(method='minimax', search_depth=3)
    player2 = GreedyPlayer()

    game = Board(player1, player2)
    # game.apply_move((0,0))
    # game.apply_move((0,1))
    # print(game.to_string())

    # my_move = player1.get_move(game, game.get_legal_moves(), 200)
    # print(my_move)
    # game.apply_move(my_move)

    winner, history, outcome = game.play(10000000000)
    print("\nWinner: {}\nOutcome: {}".format(winner, outcome))
    print(game.to_string())
    print("Move history:\n{!s}".format(history))

    print("-----------------")
Exemple #11
0
from isolation import Board
from sample_players import GreedyPlayer
from sample_players import RandomPlayer
from game_agent import CustomPlayer
from sample_players import null_score

player1 = CustomPlayer(3, null_score, True, 'minimax')
player2 = GreedyPlayer()
game = Board(player1, player2)

game.apply_move((2, 3))
game.apply_move((0, 5))

winner, history, outcome = game.play()

print(
    'student agent with 3 depths, null_score, iterative and minimax VS GreedyPlayer'
)
print("\nWinner: {}\nOutcome: {}".format(winner, outcome))
print(game.to_string())
print("Move history:\n{!s}".format(history))
import time

from isolation import Board

from sample_players import GreedyPlayer

from game_agent import CustomPlayer

player_1 = CustomPlayer()

player_2 = GreedyPlayer()
#player_2 = RandomPlayer()

print(player_1, player_2)

test_game = Board(player_1, player_2)
start = time.time()
winner, moves, reason = test_game.play()
end = time.time()
#print (winner)
if reason == "timeout":
    print("Forfeit due to timeout.")
for move in moves:
    print(move)

print(
    'Play Summary : Time taken = {0}, number of move = {1}, winner= {2}, Reason ={3}'
    .format(end - start, len(moves), winner, reason))
Exemple #13
0
                    print('Illegal move! Try again.')

            except ValueError:
                print('Invalid index! Try again.')

        return legal_moves[index]


if __name__ == "__main__":
    from isolation import Board
    from game_agent import CustomPlayer

    # create an isolation board (by default 7x7)
    player1 = RandomPlayer()
    player2 = GreedyPlayer()
    player3 = CustomPlayer()
    game = Board(player2, player3)

    # place player 1 on the board at row 2, column 3, then place player 2 on
    # the board at row 0, column 5; display the resulting board state.  Note
    # that .apply_move() changes the calling object
    game.apply_move((2, 3))
    game.apply_move((0, 5))
    print(game.to_string())

    # players take turns moving on the board, so player1 should be next to move
    assert (player2 == game.active_player)

    # get a list of the legal moves available to the active player
    print(game.get_legal_moves())
Exemple #14
0
#  | - |   | - | - | 1 |   |   |
#  |   |   | - | - | - |   |   |
#  |   | - | - | - |   | - |   |
#  |   | - | - | - |   | - |   |
#  | - |   | - |   | - |   |   |
#  |   |   | - |   | 2 |   | - |
#
# [[(0, 4), (4, 5)], [(2, 3), (6, 6)], [(0, 2), (5, 4)], [(1, 0), (4, 2)], [(2, 2), (5, 0)], [(0, 1), (6, 2)], [(1, 3), (4, 1)], [(3, 2), (3, 3)], [(2, 4), (1, 2)], [(4, 3), (3, 1)], [(3, 5), (5, 2)], [(1, 4), (6, 4)], [(-1, -1)]]

DIR = [(1, 2), (2, 1), (-1, 2), (-2, 1), (1, -2), (2, -1), (-1, -2), (-2, -1)]

CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}

curr_time_millis = lambda: 1000 * timeit.default_timer()

p2 = CustomPlayer(score_fn=custom_score, **CUSTOM_ARGS)
p1 = CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS)
#p2 = HumanPlayer()

w = 7
g = Board(p1, p2, w, w)

moves = [[(5, 1), (2, 6)], [(3, 0), (0, 5)], [(1, 1), (1, 3)], [(0, 3),
                                                                (2, 1)],
         [(1, 5), (3, 3)], [(2, 3), (5, 2)], [(3, 1), (4, 0)], [(1, 0),
                                                                (3, 2)],
         [(0, 2), (2, 4)], [(1, 4), (4, 3)], [(2, 2), (3, 5)], [(0, 1),
                                                                (5, 4)],
         [(2, 0), (4, 6)], [(4, 1), (2, 5)]]
for (move1, move2) in moves:
    g.apply_move(move1)
Exemple #15
0
def main_mine():

    HEURISTICS = [("Null", null_score),
                  ("Open", open_move_score),
                  ("Improved", improved_score)]
    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
    MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}

    # Create a collection of CPU agents using fixed-depth minimax or alpha beta
    # search, or random selection.  The agent names encode the search method
    # (MM=minimax, AB=alpha-beta) and the heuristic function (Null=null_score,
    # Open=open_move_score, Improved=improved_score). For example, MM_Open is
    # an agent using minimax search with the open moves heuristic.
    mm_agents = [Agent(CustomPlayer(score_fn=h, **MM_ARGS),
                       "MM_" + name) for name, h in HEURISTICS]
    ab_agents = [Agent(CustomPlayer(score_fn=h, **AB_ARGS),
                       "AB_" + name) for name, h in HEURISTICS]
    random_agents = [Agent(RandomPlayer(), "Random")]

    # ID_Improved agent is used for comparison to the performance of the
    # submitted agent for calibration on the performance across different
    # systems; i.e., the performance of the student agent is considered
    # relative to the performance of the ID_Improved agent to account for
    # faster or slower computers.

    print("NEW RUN ******************************************************************")

    from game_agent import custom_score_diff_in_free_percent_of_board
    from game_agent import custom_score_diff_in_mine_and_double_opponent
    from game_agent import custom_score_diff_in_mine_and_double_opponent_chase_incase_of_tie
    from game_agent import custom_score_diff_in_mine_and_double_opponent_run_away_incase_of_tie
    from game_agent import custom_score_divide_own_by_opponent
    from game_agent import custom_score_my_open_moves
    from game_agent import custom_score_simple
    from game_agent import custom_score_diff_in_mine_and_double_opponent_closest_to_center_tie
    from game_agent import custom_score_diff_in_opp_and_double_mine
    test_agents = [
        Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS), "ID_Improved"),
        Agent(CustomPlayer(score_fn=custom_score_my_open_moves, **CUSTOM_ARGS), "Student"),
        Agent(CustomPlayer(score_fn=custom_score_simple, **CUSTOM_ARGS), "Student"),
        Agent(CustomPlayer(score_fn=custom_score_diff_in_mine_and_double_opponent, **CUSTOM_ARGS), "Student"),
        Agent(CustomPlayer(score_fn=custom_score_diff_in_opp_and_double_mine, **CUSTOM_ARGS), "Student"),
        Agent(CustomPlayer(score_fn=custom_score_diff_in_mine_and_double_opponent_chase_incase_of_tie, **CUSTOM_ARGS), "Student"),
        Agent(CustomPlayer(score_fn=custom_score_diff_in_mine_and_double_opponent_run_away_incase_of_tie, **CUSTOM_ARGS), "Student"),
        Agent(CustomPlayer(score_fn=custom_score_diff_in_mine_and_double_opponent_closest_to_center_tie, **CUSTOM_ARGS), "Student"),
        Agent(CustomPlayer(score_fn=custom_score_divide_own_by_opponent, **CUSTOM_ARGS), "Student"),
        Agent(CustomPlayer(score_fn=custom_score_diff_in_free_percent_of_board, **CUSTOM_ARGS), "Student")
    ]

    print(DESCRIPTION)
    for agentUT in test_agents:
        print("")
        print("*************************")
        print("{:^25}".format("Evaluating: " + agentUT.name))
        print("*************************")

        agents = random_agents + mm_agents + ab_agents + [agentUT]
        win_ratio = play_round(agents, NUM_MATCHES)

        print("\n\nResults:")
        print("----------")
        print("{!s:<15}{:>10.2f}%".format(agentUT.name, win_ratio))
from sample_players import null_score
from sample_players import open_move_score
from sample_players import improved_score
from game_agent import CustomPlayer
from game_agent import custom_score
from game_agent import partition
from game_agent import partition_blanks

if __name__ == '__main__':
    # BLANK = 0
    # height = 7
    # width = 7
    # board = [[BLANK for i in range(width)] for j in range(height)]
    # for i in range(7):
    #     board[i][3] = "x"
    #     board[i][4] = "x"
    # print(board)
    player1 = CustomPlayer()
    player2 = CustomPlayer()
    board = Board(player1, player2)
    for i in range(7):
        # board.__board_state__[2][i] = "x"
        board.__board_state__[3][i] = "x"
        board.__board_state__[4][i] = "x"
        board.__board_state__[i][2] = "x"
        board.__board_state__[i][3] = "x"
        # board.__board_state__[i][4] = "x"
    board.apply_move((2, 5))
    print(board.to_string())
    print(partition(board, player1))
    print(partition_blanks(board, (0, 3)))
Exemple #17
0
def main():

    HEURISTICS = [("Null", null_score),
                  ("Open", open_move_score),
                  ("Improved", improved_score)]
    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
    MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}
    # For just comparing the correctness of heuristic.. using a shart search depth could help
    #CUSTOM_ARGS = {"method": 'minimax', 'iterative': False, 'search_depth': 3}

    # Create a collection of CPU agents using fixed-depth minimax or alpha beta
    # search, or random selection.  The agent names encode the search method
    # (MM=minimax, AB=alpha-beta) and the heuristic function (Null=null_score,
    # Open=open_move_score, Improved=improved_score). For example, MM_Open is
    # an agent using minimax search with the open moves heuristic.
    mm_agents = [Agent(CustomPlayer(score_fn=h, **MM_ARGS),
                       "MM_" + name) for name, h in HEURISTICS]
    ab_agents = [Agent(CustomPlayer(score_fn=h, **AB_ARGS),
                       "AB_" + name) for name, h in HEURISTICS]
    random_agents = [Agent(RandomPlayer(), "Random")]

    # ID_Improved agent is used for comparison to the performance of the
    # submitted agent for calibration on the performance across different
    # systems; i.e., the performance of the student agent is considered
    # relative to the performance of the ID_Improved agent to account for
    # faster or slower computers.
    #test_agents = [Agent(CustomPlayer(score_fn=combined_improved_density_at_end, **CUSTOM_ARGS), "Combined improved"),
    #               Agent(CustomPlayer(score_fn=diff_density, **CUSTOM_ARGS), "Diff density"),
    #               Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS), "ID_Improved")
        # Agent(CustomPlayer(score_fn=combined_full, **CUSTOM_ARGS), "Combined full"),
    #               ]

    #test_agents = [Agent(CustomPlayer(score_fn=agrressive_first_then_preserving, **CUSTOM_ARGS), "Agressive first then preserving"),
    #               Agent(CustomPlayer(score_fn=custom_score_location_using_hash_table, **CUSTOM_ARGS), "Location using hash"),
    #               Agent(CustomPlayer(score_fn=inverted_to_center, **CUSTOM_ARGS), "Inverted Location using hash"),
    #               Agent(CustomPlayer(score_fn=custom_score_loser, **CUSTOM_ARGS), "loser"),
    #               Agent(CustomPlayer(score_fn=preserving_score_with_self, **CUSTOM_ARGS), "Preserving score with self"),
    #               Agent(CustomPlayer(score_fn=aggressive_score_with_self, **CUSTOM_ARGS), "Agressive score with self"),
    #               Agent(CustomPlayer(score_fn=custom_score, **CUSTOM_ARGS), "Custom score using coefficient"),
    #               Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS), "ID_Improved")]

    test_agents = [Agent(CustomPlayer(score_fn=combined_improved_density_at_end, **CUSTOM_ARGS), "Combined_improved_and_density"),
               Agent(CustomPlayer(score_fn=diff_density, **CUSTOM_ARGS), "Diff_density"),
               Agent(CustomPlayer(score_fn=improved_with_sleep, **CUSTOM_ARGS), "ID_Improved_slow"),
               Agent(CustomPlayer(score_fn=distance_to_center, **CUSTOM_ARGS), "Distance_to_center"),
               Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS), "ID_Improved")
               ]

    print(DESCRIPTION)
    full_res = {}
    for agentUT in test_agents:
        print("")
        print("*************************")
        print("{:^25}".format("Evaluating: " + agentUT.name))
        print("*************************")
        agents = random_agents + mm_agents + ab_agents + [agentUT]

        win_ratio, res = play_round(agents, NUM_MATCHES)

        print("\n\nResults:")
        print("----------")
        print("{!s:<15}{:>10.2f}%".format(agentUT.name, win_ratio))
        full_res[agentUT.name] = res

    with open('out.json', 'w') as f:
        json.dump(full_res, f)
def main(argv):

    USAGE = """usage: tournament_mp.py [-m <number of matches>] [-p <pool size>] [-o <outputfile>]
            -m number of matches: optional number of matches (each match has 4 games) - default is 5
            -p pool size: optional pool size - default is 3
            -o output file: optional output file name - default is results.txt"""

    # Assumes 2 x dual-core CPUs able to run 3 processes relatively
    # uninterrupted (interruptions cause get_move to timeout)
    pool_size = 3
    outputfilename = 'results.txt'
    num_matches = NUM_MATCHES
    try:
        opts, args = getopt.getopt(argv, "hm:p:o:",
                                   ["matches=", "poolsize=", "ofile="])
    except getopt.GetoptError as err:
        print(err)
        print(USAGE)
        sys.exit(2)
    for opt, arg in opts:
        if opt in ["-h", "--help"]:
            print(USAGE)
            sys.exit()
        elif opt in ("-m", "--matches"):
            num_matches = int(arg)
        elif opt in ("-p", "--poolsize"):
            pool_size = int(arg)
        elif opt in ("-o", "--ofile"):
            outputfilename = arg

    HEURISTICS = [("Null", null_score), ("Open", open_move_score),
                  ("Improved", improved_score)]
    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
    MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}

    # Create a collection of CPU agents using fixed-depth minimax or alpha beta
    # search, or random selection.  The agent names encode the search method
    # (MM=minimax, AB=alpha-beta) and the heuristic function (Null=null_score,
    # Open=open_move_score, Improved=improved_score). For example, MM_Open is
    # an agent using minimax search with the open moves heuristic.
    mm_opponents = [
        Agent(CustomPlayer(score_fn=h, **MM_ARGS), "MM_" + name)
        for name, h in HEURISTICS
    ]
    ab_opponents = [
        Agent(CustomPlayer(score_fn=h, **AB_ARGS), "AB_" + name)
        for name, h in HEURISTICS
    ]
    random_opponents = [Agent(RandomPlayer(), "Random")]
    all_opponents = random_opponents + mm_opponents + ab_opponents

    test_agents = []
    # ID_Improved agent is used for comparison to the performance of the
    # submitted agent for calibration on the performance across different
    # systems; i.e., the performance of the student agent is considered
    # relative to the performance of the ID_Improved agent to account for
    # faster or slower computers.
    test_agents.append(
        Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS),
              "ID_Improved"))

    # Create all the parameterized evaluation function objects
    # Then create all the test agents using those eval functions
    params = [(a, b, c, d, e, f) for a in range(1, 3) for b in range(0, 3)
              for c in range(-1, 2) for d in range(1, 3) for e in range(0, 3)
              for f in range(0, 1)]
    #params = [(0,0,0,0,0,0)]
    #params = [(1, 2, -1, 1, 2, 0)]

    for param in params:
        eval_obj = ParameterizedEvaluationFunction(param)
        test_agents.append(
            Agent(CustomPlayer(score_fn=eval_obj.eval_func, **CUSTOM_ARGS),
                  "Student " + str(param)))

    # Put the start time in the output file
    with open(outputfilename, mode='a') as ofile:
        ofile.write(
            '*******************************************************************************************\n'
        )
        ofile.write(
            'Starting Isolation tournament with %d test agents, %d games per round, and %d sub-processes\n'
            % (len(test_agents), num_matches * 4, pool_size))
        ofile.write('Tournament started at %s\n' %
                    (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))

    # Run the tournament!
    with Pool(processes=pool_size) as pool:
        results = []
        for agentUT in test_agents:
            results.append(
                pool.apply_async(play_round,
                                 args=(all_opponents, agentUT, num_matches)))

        # Write the output... flush each time as it takes a long time to run
        with open(outputfilename, mode='a') as ofile:
            for result in results:
                agent, res = result.get()
                ofile.write('%s got %2.2f\n' % (agent, res))
                ofile.flush()
            ofile.write(
                'Tournament complete at: %s\n' %
                (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
            ofile.write(
                '*******************************************************************************************\n\n'
            )
Exemple #19
0
def main():

    HEURISTICS = [("Null", null_score), ("Open", open_move_score),
                  ("Improved", improved_score)]
    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
    MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}

    # Create a collection of CPU agents using fixed-depth minimax or alpha beta
    # search, or random selection.  The agent names encode the search method
    # (MM=minimax, AB=alpha-beta) and the heuristic function (Null=null_score,
    # Open=open_move_score, Improved=improved_score). For example, MM_Open is
    # an agent using minimax search with the open moves heuristic.
    mm_agents = [
        Agent(CustomPlayer(score_fn=h, **MM_ARGS), "MM_" + name)
        for name, h in HEURISTICS
    ]
    ab_agents = [
        Agent(CustomPlayer(score_fn=h, **AB_ARGS), "AB_" + name)
        for name, h in HEURISTICS
    ]
    random_agents = [Agent(RandomPlayer(), "Random")]

    # ID_Improved agent is used for comparison to the performance of the
    # submitted agent for calibration on the performance across different
    # systems; i.e., the performance of the student agent is considered
    # relative to the performance of the ID_Improved agent to account for
    # faster or slower computers.
    test_agents = [
        Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS),
              "ID_Improved"),
        Agent(CustomPlayer(score_fn=custom_score, **CUSTOM_ARGS), "Student")
    ]
    test_agents = [
        Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS),
              "ID_Improved"),
        Agent(CustomPlayer(score_fn=aggressive_move_heuristic, **CUSTOM_ARGS),
              "AggressiveMovesStudent"),
        Agent(CustomPlayer(score_fn=relaxed_move_heuristic, **CUSTOM_ARGS),
              "RelaxedMovesStudent")
    ]
    test_agents = [
        Agent(
            CustomPlayer(score_fn=relaxed_move_aggressive_distance,
                         **CUSTOM_ARGS), "relaxed_move_aggressive_distance"),
        Agent(
            CustomPlayer(score_fn=relaxed_move_relaxed_distance,
                         **CUSTOM_ARGS), "relaxed_move_relaxed_distance"),
        Agent(
            CustomPlayer(score_fn=relaxed_move_relaxed_distance_norm,
                         **CUSTOM_ARGS), "relaxed_move_relaxed_distance_norm"),
        Agent(
            CustomPlayer(score_fn=relaxed_move_aggressive_distance_norm,
                         **CUSTOM_ARGS),
            "relaxed_move_aggressive_distance_norm"),
        Agent(CustomPlayer(score_fn=aggressive_move_heuristic, **CUSTOM_ARGS),
              "aggressive_move_heuristic"),
        Agent(CustomPlayer(score_fn=relaxed_move_heuristic, **CUSTOM_ARGS),
              "relaxed_move_heuristic"),
        Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS),
              "ID_Improved")
    ]

    res = pd.Series(name='player')
    agent_names = []
    print(DESCRIPTION)
    for agentUT in test_agents:
        print("")
        print("*************************")
        print("{:^25}".format("Evaluating: " + agentUT.name))
        print("*************************")

        agents = random_agents + mm_agents + ab_agents + [agentUT]
        win_ratio = play_round(agents, NUM_MATCHES)

        print("\n\nResults:")
        print("----------")
        print("{!s:<15}{:>10.2f}%".format(agentUT.name, win_ratio))
        res[agentUT.name] = win_ratio
        agent_names.append(agentUT.name)
        res.reset_index().to_hdf('data/full_run_{}.h5'.format(
            "_".join(agent_names)),
                                 'test',
                                 mode='w')
Exemple #20
0
                if not valid_choice:
                    print('Illegal move! Try again.')

            except ValueError:
                print('Invalid index! Try again.')

        return legal_moves[index]


if __name__ == "__main__":
    from isolation import Board

    # create an isolation board (by default 7x7)
    # player1 = RandomPlayer()
    player1 = CustomPlayer(score_fn=custom_score,
                           method='alphabeta',
                           iterative=True)
    # player2 = GreedyPlayer()
    player2 = RandomPlayer()
    game = Board(player1, player2)

    # place player 1 on the board at row 2, column 3, then place player 2 on
    # the board at row 0, column 5; display the resulting board state.  Note
    # that .apply_move() changes the calling object
    # game.apply_move((2, 3))
    # game.apply_move((0, 5))
    # print(game.to_string())

    # players take turns moving on the board, so player1 should be next to move
    assert (player1 == game.active_player)
Exemple #21
0
 def __init__(self):
     self.human_player = None  # it's the human player
     self.ia_player = CustomPlayer(method='alphabeta', iterative=False, score_fn=custom_score_knight_tour)
     self.board = Board(self.human_player, self.ia_player)
Exemple #22
0
def main():

    HEURISTICS = [("Null", null_score),
                  ("Open", open_move_score),
                  ("Improved", improved_score)]
    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
    MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}

    # Create a collection of CPU agents using fixed-depth minimax or alpha beta
    # search, or random selection.  The agent names encode the search method
    # (MM=minimax, AB=alpha-beta) and the heuristic function (Null=null_score,
    # Open=open_move_score, Improved=improved_score). For example, MM_Open is
    # an agent using minimax search with the open moves heuristic.
    mm_agents = [Agent(CustomPlayer(score_fn=h, **MM_ARGS),
                       "MM_" + name) for name, h in HEURISTICS]
    ab_agents = [Agent(CustomPlayer(score_fn=h, **AB_ARGS),
                       "AB_" + name) for name, h in HEURISTICS]
    random_agents = [Agent(RandomPlayer(), "Random")]

    # ID_Improved agent is used for comparison to the performance of the
    # submitted agent for calibration on the performance across different
    # systems; i.e., the performance of the student agent is considered
    # relative to the performance of the ID_Improved agent to account for
    # faster or slower computers.
    test_agents = [Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS), "ID_Improved"),
                   Agent(CustomPlayer(score_fn=aggressive_heuristic, **CUSTOM_ARGS), "Student1"),
                   Agent(CustomPlayer(score_fn=defensive_heuristic, **CUSTOM_ARGS), "Student2"),
                   Agent(CustomPlayer(score_fn=maximizing_win_chances_heuristic, **CUSTOM_ARGS), "Student3"),
                   Agent(CustomPlayer(score_fn=minimizing_losing_chances_heuristic, **CUSTOM_ARGS), "Student4"),
                   Agent(CustomPlayer(score_fn=chances_heuristic, **CUSTOM_ARGS), "Student5"),
                   Agent(CustomPlayer(score_fn=weighted_chances_heuristic, **CUSTOM_ARGS), "Student6"),
                   Agent(CustomPlayer(score_fn=weighted_chances_heuristic_2, **CUSTOM_ARGS), "Student7"),
                   Agent(CustomPlayer(score_fn=point_of_view_heuristic, **CUSTOM_ARGS), "WONG HAO SHAN - 17122789"),
                   Agent(CustomPlayer(score_fn=scoring_heuristic, **CUSTOM_ARGS), "LIM JIA QI - 17134267"),
                   Agent(CustomPlayer(score_fn=min_max_heuristic, **CUSTOM_ARGS), "CHEAH JO YEN - 17059391"),
                   Agent(CustomPlayer(score_fn=weight_factor_heuristic, **CUSTOM_ARGS), "CHONG SIN MEI - 17103500"),
                   Agent(CustomPlayer(score_fn=offensive_to_defensive_heuristic, **CUSTOM_ARGS), "Offensive to Defensive"),
                   Agent(CustomPlayer(score_fn=blocking_opponent_heuristic, **CUSTOM_ARGS), "Blocking the Opponent"),
                   ]

    print(DESCRIPTION)
    for agentUT in test_agents:
        print("")
        print("*************************")
        print("{:^25}".format("Evaluating: " + agentUT.name))
        print("*************************")

        agents = random_agents + mm_agents + ab_agents + [agentUT]
        win_ratio = play_round(agents, NUM_MATCHES)

        print("\n\nResults:")
        print("----------")
        print("{!s:<15}{:>10.2f}%".format(agentUT.name, win_ratio))
Exemple #23
0
from HtmlBoard import HtmlGame
from Sample_players import null_score, open_move_score, improved_score, HumanPlayer
from game_agent import CustomPlayer, custom_score

if __name__ == '__main__':
    from flask import Flask
    app = Flask(
        __name__)  # http://flask.pocoo.org/docs/0.10/quickstart/#quickstart

    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": True}
    player1 = CustomPlayer(score_fn=improved_score, **AB_ARGS)
    player2 = HumanPlayer()
    board_dimensions = (7, 7)
    game = HtmlGame(player1, player2, board_dimensions)

    @app.route("/")
    def root_url():
        return game.get_html()

    @app.route("/move_player_to/<int:x>,<int:y>")
    def move_player_to(x, y):
        game.player_moves_player(x, y)
        return game.get_html()

    @app.route("/robot_takes_turn/")
    def robot_takes_turn():
        game.robot_takes_turn()
        return game.get_html()

    app.run(debug=False
            )  # run with debug=True to allow interaction & feedback when
def mainMod():

    HEURISTICS = [("Null", null_score), ("Open", open_move_score),
                  ("Improved", improved_score)]
    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
    MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}

    # Create a collection of CPU agents using fixed-depth minimax or alpha beta
    # search, or random selection.  The agent names encode the search method
    # (MM=minimax, AB=alpha-beta) and the heuristic function (Null=null_score,
    # Open=open_move_score, Improved=improved_score). For example, MM_Open is
    # an agent using minimax search with the open moves heuristic.
    mm_agents = [
        Agent(CustomPlayer(score_fn=h, **MM_ARGS), "MM_" + name)
        for name, h in HEURISTICS
    ]
    ab_agents = [
        Agent(CustomPlayer(score_fn=h, **AB_ARGS), "AB_" + name)
        for name, h in HEURISTICS
    ]
    random_agents = [Agent(RandomPlayer(), "Random")]

    # ID_Improved agent is used for comparison to the performance of the
    # submitted agent for calibration on the performance across different
    # systems; i.e., the performance of the student agent is considered
    # relative to the performance of the ID_Improved agent to account for
    # faster or slower computers.
    #    test_agents = [Agent(CustomPlayer(score_fn=custom_score_normalizedByBlankSpaces, **CUSTOM_ARGS), "StudentNormalized"),
    #                   Agent(CustomPlayer(score_fn=custom_score_edgeAndCornerLimiting, **CUSTOM_ARGS), "StudentECLimiting"),
    #                   Agent(CustomPlayer(score_fn=custom_score_distaceWeightedPositions, **CUSTOM_ARGS), "StudentDistWeighted")]
    test_agents = [
        Agent(
            CustomPlayer(score_fn=custom_score_distaceWeightedPositions,
                         **CUSTOM_ARGS), "StudentDistWeighted")
    ]
    idImprovedAgent = [
        Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS),
              "ID_Improved")
    ]
    #all_agents = test_agents+[idImprovedAgent]
    print(DESCRIPTION)
    results = dict()
    for agentUT in test_agents:
        for i in range(50):
            print("")
            print("*************************")
            print("{:^25}".format("Evaluating: " + agentUT.name))
            print("*************************")
            agentName = agentUT.name
            agents = idImprovedAgent + [agentUT]
            opponent = idImprovedAgent[0].name
            key = (opponent + "_" + agentName)
            win_ratio = play_round(agents, NUM_MATCHES)
            if (key in results):
                results.get(key).append(win_ratio)
            else:
                results[key] = [win_ratio]
            print("\n\nResults:")
            print("----------")
            print("{!s:<15}{:>10.2f}%".format(agentUT.name, win_ratio))
    with open('HeuristicsResults.csv', 'w') as f:
        [
            f.write('{0},{1}\n'.format(key, value))
            for key, v in results.items() for value in v
        ]
Exemple #25
0
TIME_LIMIT = 150

HEURISTICS = [("Null", null_score), ("Open", open_move_score),
              ("Improved", improved_score)]
AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}

# Create a collection of CPU agents using fixed-depth minimax or alpha beta
# search, or random selection.  The agent names encode the search method
# (MM=minimax, AB=alpha-beta) and the heuristic function (Null=null_score,
# Open=open_move_score, Improved=improved_score). For example, MM_Open is
# an agent using minimax search with the open moves heuristic.
mm_agents = [
    Agent(CustomPlayer(score_fn=h, **MM_ARGS), "MM_" + name)
    for name, h in HEURISTICS
]
ab_agents = [
    Agent(CustomPlayer(score_fn=h, **AB_ARGS), "AB_" + name)
    for name, h in HEURISTICS
]
random_agents = [Agent(RandomPlayer(), "Random")]

# ID_Improved agent is used for comparison to the performance of the
# submitted agent for calibration on the performance across different
# systems; i.e., the performance of the student agent is considered
# relative to the performance of the ID_Improved agent to account for
# faster or slower computers.
# test_agents = [Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS), "ID_Improved"),
#               Agent(CustomPlayer(score_fn=custom_score, **CUSTOM_ARGS), "Student")]
Exemple #26
0
def main():

    HEURISTICS = [("Null", null_score), ("Open", open_move_score),
                  ("Improved", improved_score)]
    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
    MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}

    # Create a collection of CPU agents using fixed-depth minimax or alpha beta
    # search, or random selection.  The agent names encode the search method
    # (MM=minimax, AB=alpha-beta) and the heuristic function (Null=null_score,
    # Open=open_move_score, Improved=improved_score). For example, MM_Open is
    # an agent using minimax search with the open moves heuristic.
    mm_agents = [
        Agent(CustomPlayer(score_fn=h, **MM_ARGS), "MM_" + name)
        for name, h in HEURISTICS
    ]
    ab_agents = [
        Agent(CustomPlayer(score_fn=h, **AB_ARGS), "AB_" + name)
        for name, h in HEURISTICS
    ]
    random_agents = [Agent(RandomPlayer(), "Random")]

    # ID_Improved agent is used for comparison to the performance of the
    # submitted agent for calibration on the performance across different
    # systems; i.e., the performance of the student agent is considered
    # relative to the performance of the ID_Improved agent to account for
    # faster or slower computers.
    test_agents = [
        Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS),
              "ID_Improved"),
        # Agent(CustomPlayer(score_fn=more_improved_score, **CUSTOM_ARGS), "More_Improved"),
        Agent(
            CustomPlayer(score_fn=linear_ratio_improved_score, **CUSTOM_ARGS),
            "Linear_Improved"),
        Agent(
            CustomPlayer(score_fn=nonlinear_ratio_improved_score,
                         **CUSTOM_ARGS), "Non_Linear_Improved"),
        Agent(CustomPlayer(score_fn=second_moves_score, **CUSTOM_ARGS),
              "Second_Moves"),
        Agent(
            CustomPlayer(score_fn=second_moves_in_middle_game_score,
                         **CUSTOM_ARGS), "Second_Moves_In_Middle_Game"),
        Agent(CustomPlayer(score_fn=all_boxes_can_move_score, **CUSTOM_ARGS),
              "All_Boxes_Can_Move"),
    ]

    print(DESCRIPTION)
    for agentUT in test_agents:
        print("")
        print("*************************")
        print("{:^25}".format("Evaluating: " + agentUT.name))
        print("*************************")

        # agents = random_agents + mm_agents + ab_agents + [agentUT]
        agents = test_agents + [agentUT]
        agents.remove(agentUT)
        win_ratio = play_round(agents, NUM_MATCHES)

        print("\n\nResults:")
        print("----------")
        print("{!s:<15}{:>10.2f}%".format(agentUT.name, win_ratio))
def main():

    HEURISTICS = [("Null", null_score),
                  ("Open", open_move_score),
                  ("Improved", improved_score)]
    AB_ARGS = {"search_depth": 5, "method": 'alphabeta', "iterative": False}
    MM_ARGS = {"search_depth": 3, "method": 'minimax', "iterative": False}
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}
    # w1 = [1]

    # custom_agents = [Agent(CustomPlayer(w2=h, **CUSTOM_ARGS),
    #                    "custom_" + str(h)) for h in w2_list]
    # Create a collection of CPU agents using fixed-depth minimax or alpha beta
    # search, or random selection.  The agent names encode the search method
    # (MM=minimax, AB=alpha-beta) and the heuristic function (Null=null_score,
    # Open=open_move_score, Improved=improved_score). For example, MM_Open is
    # an agent using minimax search with the open moves heuristic.
    mm_agents = [Agent(CustomPlayer(score_fn=h, **MM_ARGS),
                       "MM_" + name) for name, h in HEURISTICS]
    ab_agents = [Agent(CustomPlayer(score_fn=h, **AB_ARGS),
                       "AB_" + name) for name, h in HEURISTICS]
    random_agents = [Agent(RandomPlayer(), "Random")]

    # ID_Improved agent is used for comparison to the performance of the
    # submitted agent for calibration on the performance across different
    # systems; i.e., the performance of the student agent is considered
    # relative to the performance of the ID_Improved agent to account for
    # faster or slower computers.
    ID_Improved_agent = [Agent(CustomPlayer(score_fn=improved_score, **CUSTOM_ARGS), "ID_Improved")]
    Student_agent = [Agent(CustomPlayer(score_fn=custom_score, **CUSTOM_ARGS), "Student")]

    print("")
    print("*************************")
    print("{:^25}".format("Evaluating: " + ID_Improved_agent[0].name))
    print("*************************")

    agents = random_agents + mm_agents + ab_agents + ID_Improved_agent
    win_ratio = play_round(agents, NUM_MATCHES)

    print("\n\nResults:")
    print("----------")
    print("{!s:<15}{:>10.2f}%".format(ID_Improved_agent[0].name, win_ratio))


    print(DESCRIPTION)
    print("")
    print("*************************")
    print("{:^25}".format("Evaluating: " + Student_agent[0].name ))
    print("*************************")

    # agents = ID_Improved_agent + Student_agent
    # win_ratio = play_round(agents, NUM_MATCHES)
    w2_list = [0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4]
    w3_list = [0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4]
    for w2_ in w2_list:
        for w3_ in w3_list:
            custom_agent_test = [Agent(CustomPlayerTest(w2=w2_, w3=w3_, **CUSTOM_ARGS), "custom_" +str(w2_) + "_" +str(w3_))]
            agents = random_agents + mm_agents + ab_agents + custom_agent_test
            win_ratio = play_round(agents, NUM_MATCHES)
            print("\n\nResults:")
            print("----------")
            print("{!s:<15}{:>10.2f}%".format(custom_agent_test[0].name, win_ratio))
            print("{!s:<15}{:>10.2f}%".format('likelihood_of_superiority : ',
                                              likelihood_of_superiority(win_ratio, 2 * NUM_MATCHES)))
Exemple #28
0
from isolation import Board

from sample_players import improved_score
# from game_agent import moving_area_score
from game_agent import *

from game_agent import CustomPlayer

# create an isolation board (by default 7x7)
player1 = CustomPlayer(score_fn=smart_score)
player2 = CustomPlayer(score_fn=improved_score)

game = Board(player1, player2)

# place player 1 on the board at row 2, column 3, then place player 2 on
# the board at row 0, column 5; display the resulting board state.  Note
# that .apply_move() changes the calling object
game.apply_move((2, 3))
game.apply_move((0, 5))
print(game.to_string())

# players take turns moving on the board, so player1 should be next to move
assert (player1 == game.active_player)

# get a list of the legal moves available to the active player
print(game.get_legal_moves())

# get a successor of the current state by making a copy of the board and
# applying a move. Notice that this does NOT change the calling object
# (unlike .apply_move()).
new_game = game.forecast_move((1, 1))
                print('Invalid index! Try again.')

        return legal_moves[index]


if __name__ == "__main__":
    from isolation import Board

    # create an isolation board (by default 7x7)
    CUSTOM_ARGS = {"method": 'alphabeta', 'iterative': True}
    CUSTOM_ARGS2 = {
        "search_depth": 5,
        "method": 'alphabeta',
        'iterative': False
    }
    player1 = CustomPlayer(score_fn=open_move_score, **CUSTOM_ARGS)
    #◘player2 = RandomPlayer()
    player2 = CustomPlayer(score_fn=open_move_score, **CUSTOM_ARGS2)
    print('ya qq?')
    game = Board(player1, player2)

    # place player 1 on the board at row 2, column 3, then place player 2 on
    # the board at row 0, column 5; display the resulting board state.  Note
    # that .apply_move() changes the calling object
    #game.apply_move((2, 3))
    #game.apply_move((0, 5))
    for _ in range(2):
        move = random.choice(game.get_legal_moves())
        game.apply_move(move)
    #print(game.to_string())
'''
Created on Mar 7, 2017

@author: richard
'''
import random

from isolation import Board
from sample_players import HumanPlayer
from sample_players import improved_score
from game_agent import CustomPlayer

if __name__ == '__main__':
    human = HumanPlayer()
    computer = CustomPlayer(score_fn=improved_score, method='alphabeta')
    # Randomize who goes first
    if (random.randint(0, 1)):
        print("You are player 'O'")
        game = Board(human, computer)
    else:
        print("You are player 'X'")
        game = Board(computer, human)

    # Randominze first moves for each player
    for _ in range(2):
        game.apply_move(random.choice(game.get_legal_moves()))

    # Start playing!
    winner, _, reason = game.play()

    if winner == human: