def test_aggresive(): custom_score_2_0_5 = lambda game, player: custom_score_2(game, player, 0.5) custom_score_2_1_5 = lambda game, player: custom_score_2(game, player, 1.5) custom_score_2_2_0 = lambda game, player: custom_score_2(game, player, 2.0) test_agents = [ Agent(AlphaBetaPlayer(score_fn=improved_score), "AB_Improved"), Agent(AlphaBetaPlayer(score_fn=custom_score_2_0_5), "AB_Aggr_0.5"), Agent(AlphaBetaPlayer(score_fn=custom_score_2_1_5), "AB_Aggr_1.5"), Agent(AlphaBetaPlayer(score_fn=custom_score_2_2_0), "AB_Aggr_2.0"), ] return test_agents
def test_score_time(self): player1 = game_agent.MinimaxPlayer() player2 = sample_players.GreedyPlayer() game = isolation.Board(player1, player2) for _ in range(0, 2): game.apply_move(random.choice(game.get_legal_moves())) game_agent.custom_score(game, game.get_player_location(game.active_player)) game_agent.custom_score_2(game, game.get_player_location(game.active_player)) game_agent.custom_score_3(game, game.get_player_location(game.active_player))
def test_custom_score_2(self): self.setUp() self.game.apply_move((2, 1)) self.game.apply_move((3, 3)) player1_custom_score = game_agent.custom_score_2( self.game, self.player1) self.assertIsNotNone(player1_custom_score) self.assertIsInstance(player1_custom_score, float)
def test6(): player1 = AlphaBetaPlayer(search_depth=1, score_fn=custom_score) player2 = AlphaBetaPlayer(search_depth=1, score_fn=custom_score_3) game = isolation.Board(player1, player2, height=5, width=5) game.apply_move((0, 3)) print(game.to_string()) game.apply_move((4, 4)) print(game.to_string()) game.apply_move((3, 2)) print(game.to_string()) game.apply_move((1, 1)) game.apply_move((2, 0)) game.apply_move((3, 0)) game.apply_move((1, 2)) print(game.to_string()) print(custom_score(game, game.active_player)) print(custom_score(game, game.inactive_player)) print(custom_score_2(game, game.active_player)) print(custom_score_2(game, game.inactive_player)) print(custom_score_3(game, game.active_player)) print(custom_score_3(game, game.inactive_player)) print(improved_score(game, game.active_player)) print(improved_score(game, game.inactive_player))
def test_get_long_path_potential_score(self): d = 9 self.player1 = game_agent.MinimaxPlayer(search_depth=2) self.player2 = game_agent.MinimaxPlayer(search_depth=2) self.game = isolation.Board(self.player1, self.player2, width=d, height=d) self.game._board_state = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 42 ] self.game.move_count = len( [x for x in self.game._board_state[0:-4] if x == 1]) print(self.game.to_string()) start = time.time() score = game_agent.custom_score_2(self.game, self.player1) elapsed = time.time() - start print('generations_weighted_difference {}'.format(score)) print('elapsed {}'.format(elapsed)) start = time.time() score = game_agent.custom_score_3(self.game, self.player1) elapsed = time.time() - start print('get_long_path_potential_score {}'.format(score)) print('elapsed {}'.format(elapsed)) moves = game_agent.get_moves( self.game, [self.game.get_player_location(self.player1)]) start = time.time() max, avg = game_agent.max_avg_path(self.game, moves) elapsed = time.time() - start print('max_avg_path {} {}'.format(max, avg)) print('elapsed {}'.format(elapsed)) start = time.time() max_pos = 100 max = game_agent.dfs(self.game, self.game.get_player_location(self.player1), max_pos) elapsed = time.time() - start print('dfs {} max positions {}'.format(max, max_pos)) print('elapsed {}'.format(elapsed))
player1 = AlphaBetaPlayer(score_fn=improved_score) player2 = RandomPlayer() # Create an isolation board (by default 7x7) game = Board(player1, player2, 8, 8) forfeited_match = [[2, 3], [4, 4], [0, 4], [5, 6], [2, 5], [6, 4], [1, 3], [4, 5], [0, 1], [2, 6], [2, 2]] for move in forfeited_match: game.apply_move(move) # print(game.get_legal_moves()) # print(len(game.get_legal_moves())) score = custom_score_2(game, player1) time_millis = lambda: 1000 * timeit.default_timer() move_start = time_millis() time_left = lambda: 100 - (time_millis() - move_start) # next_move = player1.get_move(game, time_left) # print(next_move) failed_test_case_7 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 23]
from game_agent import MinimaxPlayer, AlphaBetaPlayer, custom_score, custom_score_2, custom_score_3, cell_distance # create an isolation board (by default 7x7) player1 = AlphaBetaPlayer(score_fn=custom_score_2) player2 = AlphaBetaPlayer(score_fn=custom_score) game = Board(player1, player2) # place player 1 on the board at row 2, column 3, then place player 2 on # the board at row 0, column 5; display the resulting board state. Note # that the .apply_move() method changes the calling object in-place. game.apply_move((2, 3)) game.apply_move((0, 5)) print(game.to_string()) print(cell_distance(game, game.active_player)) print(cell_distance(game, game.inactive_player)) print(custom_score_2(game, game.active_player)) # players take turns moving on the board, so player1 should be next to move assert (player1 == game.active_player) # get a list of the legal moves available to the active player print(game.get_legal_moves()) # get a successor of the current state by making a copy of the board and # applying a move. Notice that this does NOT change the calling object # (unlike .apply_move()). new_game = game.forecast_move((1, 1)) assert (new_game.to_string() != game.to_string()) print("\nOld state:\n{}".format(game.to_string())) print("\nNew state:\n{}".format(new_game.to_string()))