def setUp(self): self.mock_cli_input = MockCLIInput() self.mock_cli_output = MockCLIOutput() self.ui = UIWrapper(self.mock_cli_output) self.rules = Rules() self.game = Game(Player("X", self.mock_cli_input, self.ui), Player("O", MockCLIInput(), self.ui), self.ui, Validations(), self.rules, Board()) self.engine = create_engine(TEST_DB_ADDRESS) self.db = Database(self.engine)
def first_move_played_test(): a = Player("X") a.play_notification = MagicMock() b = Player("O") controller = GameController(a, b, None, None) controller.notify_play() a.play_notification.assert_called_with(controller)
def place_move_test(): a = Player("X") b = Player("O") b.play_notification = MagicMock() controller = GameController(a, b, None, None) controller.place_move(a, 8) assert_equal(8, controller._board.positions.index("X")) b.play_notification.assert_called_with(controller)
def reset_board_test(): a = Player("X") b = Player("O") b.play_notification = MagicMock() controller = GameController(a, b, None, None) controller.place_move(a, 8) controller.reset() positions = controller._board.positions open_positions = len(list(filter(lambda x: x == "", controller._board.positions))) assert_equal(len(positions), open_positions)
def second_move_played_test(): a = Player("X") b = Player("O") b.play_notification = MagicMock() controller = GameController(a, b, None, None) board = GameBoard() board.play_move("X", 0) controller._board = board controller.notify_play() b.play_notification.assert_called_with(controller)
def setUp(self): self.mock_cli_input = MockCLIInput() self.output = MockCLIOutput() self.player1 = Player("X", MockCLIInput(), self.output) self.player2 = Player("O", MockCLIInput(), self.output) self.rules = Rules() self.board = Board() self.game = Game(Player("X", self.mock_cli_input, self.output), Player("O", MockCLIInput(), self.output), self.output, Validations(), self.rules, self.board) self.engine = create_engine(TEST_DB_ADDRESS) self.db = Database(self.engine)
def __init__(self, app): super().__init__() self.app = app # Load UI from python file self.setupUi(self) # Set windon icon self.setWindowIcon(QtGui.QIcon("assets/emoji_x.png")) # Initialise players self.player_x = Player(constants.CROSS, None) self.player_o = Player(constants.NOUGHT, None) self.enabled_button_stylesheet = "background-color: PaleGreen; border: 2px solid limegreen;" # Set icons self.playAsCross.setIcon(QtGui.QIcon("assets//emoji_x.png")) self.playAsNought.setIcon(QtGui.QIcon("assets//emoji_o.png")) self.play3x3.setIcon(QtGui.QIcon("assets//3x3_board.png")) self.play5x5.setIcon(QtGui.QIcon("assets//5x5_board.png")) self.play7x7.setIcon(QtGui.QIcon("assets//7x7_board.png")) # Setup algorithm dropdown self.algorithmDropdown.clear() for algorithm in constants.DESCRIPTIONS.keys(): # Set the data for this item to the algorithm's name so we can update descriptions later. self.algorithmDropdown.addItem(algorithm, algorithm) # Setup button signals self.playAsCross.clicked.connect( functools.partial(self.select_player, self.player_x)) self.playAsNought.clicked.connect( functools.partial(self.select_player, self.player_o)) self.play3x3.clicked.connect( functools.partial(self.select_board_size, 3)) self.play5x5.clicked.connect( functools.partial(self.select_board_size, 5)) self.play7x7.clicked.connect( functools.partial(self.select_board_size, 7)) self.algorithmDropdown.currentIndexChanged.connect( self.select_algorithm) self.playButton.clicked.connect(self.play_game) # Set defaults self.select_player(self.player_x) self.select_board_size(3) self.select_algorithm()
def create_player_o_object(self, saved_game, cli_input, ui): session = self.create_session() player_o = session.query(PlayerO.is_ai).filter(PlayerO.saved_game_id == saved_game.id).first() if player_o[0]: return AIMinimax("O", Rules()) else: return Player("O", cli_input, ui)
def setUp(self): self.engine = create_engine(TEST_DB_ADDRESS) self.db = Database(self.engine) self.mock_cli_input = MockCLIInput() self.mock_cli_output = MockCLIOutput() self.ui = UIWrapper(self.mock_cli_output) self.game = Game(Player("X", self.mock_cli_input, self.ui), Player("O", self.mock_cli_input, self.ui), self.ui, Validations(), Rules(), Board()) self.setup_game = SetupGame(self.mock_cli_input, self.mock_cli_output, self.engine) meta = MetaData(bind=self.engine) self.game_table = Table("saved_games", meta, autoload=True, autoload_with=self.engine)
def playIncompleteGameAIPlaysSecond(self): ai_game = Game(Player("X", self.mock_cli_input, self.ui), AIMinimax("O", Rules()), self.ui, Validations(), Rules(), Board()) ai_game._board.make_move(1, self.game._player1._symbol) ai_game._board.make_move(3, self.game._player2._symbol) self.db.add_game_to_database(ai_game)
class PlayerTest(unittest.TestCase): def setUp(self): self.mock_cli_input = MockCLIInput() self.mock_cli_output = MockCLIOutput() self.ui = UIWrapper(self.mock_cli_output) self.player1 = Player("X", self.mock_cli_input, self.ui) def testPlayerIsInitializedWithASymbol(self): result = self.player1._symbol expected_result = "X" self.assertEqual(result, expected_result, msg='\nRetrieved:\n{0} \nExpected:\n{1}'.format( result, expected_result)) def testPlayerMove(self): result = self.player1.move(Board()) expected_result = 1 self.assertEqual(result, expected_result, msg='\nRetrieved:\n{0} \nExpected:\n{1}'.format( result, expected_result)) def testPlayerMoveAgain(self): self.mock_cli_input.set_value(5) result = self.player1.move(Board()) expected_result = 5 self.assertEqual(result, expected_result, msg='\nRetrieved:\n{0} \nExpected:\n{1}'.format( result, expected_result)) def testPlayerSymbolDisplayedWhenPlayersTurnToMakeAMove(self): self.player1.move(Board()) result = self.mock_cli_output._last_output expected_result = "Player X, please make a move or type 'q' to save and quit game:" self.assertTrue(result in expected_result, msg='\nRetrieved:\n{0} \nExpected:\n{1}'.format( result, expected_result))
def __init__(self, name, mark): Player.__init__(self, name, mark) self.next_move = 10 '''super.name = name
from tictactoe.board import Board from tictactoe.player import Player from tictactoe.bot_random import BotRandom b = Board() p1 = Player('x', b) p2 = BotRandom('o', b) player_time = True while not b.get_winner(): if player_time: print('-' * 80) print(b) l, c = [int(n) for n in input('choose x y: ').split(' ')] p1.play(l, c) player_time = False else: p2.play() player_time = True print('-' * 20, 'final score', '-' * 20) print(b) print('The winner is', b.get_winner())
def setUp(self): self.mock_cli_input = MockCLIInput() self.mock_cli_output = MockCLIOutput() self.ui = UIWrapper(self.mock_cli_output) self.player1 = Player("X", self.mock_cli_input, self.ui)
results.increment_win(winner) if i % (game_number // 10) == 0: print("Games completed: {} ({:.0f}%)".format(i, (i * 100 / game_number))) return results # Train on a 3x3 board by default current_board = Board(BOARD_LAYOUT[3]) # Bind a path to save the data to algorithm_data_path = os.path.join("q_learning_trained_data", f"{current_board.dimensions}x{current_board.dimensions}") previous_algorithms = {} algorithms = "" # Instantiation of Player objects with their respective pieces nought_player, cross_player = Player(NOUGHT, None), Player(CROSS, None) # Linking the board with the Players current_board.player_nought, current_board.player_cross = nought_player, cross_player while True: try: algorithm_to_train = int(input("""[*] There are three training methods:\n [*] (1) Reinforcement Learning\n [*] (2) Minimax - ABP model\n [*] (3) MCTS\n > """)) print(f"[*] Input successful: {algorithm_to_train}") break except ValueError: print("[*] Need to be one of the digits inside the brackets - please try again")
""" counter = Counter(winning_history) p1_win = 100 * counter[1.0] / sum(counter.values()) p2_win = 100 * counter[2.0] / sum(counter.values()) print('Player 1 Wins:', p1_win, '%') print('Player 2 Wins:', p2_win, '%') if __name__ == '__main__': # Create an instance of a Tic Tac Toe game. game = TicTacToe() # The learner object is used to initialize a learning player. # Note: Player one will always start the game. learner = Learner(game, learning_rate=LEARNING_RATE, discount_factor=DISCOUNT_FACTOR) game.p1 = Player(id=1, type='learner', learner=learner) # Let the learning player play against a random player. game.p2 = Player(id=2, type='random') # Play N_ROUNDS rounds of N_GAMES games and print the progress of the winning rate. for i in range(N_ROUNDS): print('Round', i + 1) winning_history = one_round_of_training(game) print_winner_stats(winning_history) # Let the trained agent play against a human player. game.p2.type = 'human' while True: game.start()
def __init__(self, name, mark): Player.__init__(self, name, mark)