示例#1
0
 def replay(self):
     board = Board.load(self.initial_state)
     print(board.pprint())
     for i, move in enumerate(self.moves):
         print('Q', self.q_assessments[i])
         board.move(move)
         print(board.pprint())
示例#2
0
    def test_export_parse(self):
        board = Board(size=9, win_chain_length=5)
        for i in range(10):
            board.make_random_move()
            parsed_board = Board.load(board.export())
            self.assertEqual(board.pprint(lastmove_highlight=False), parsed_board.pprint(lastmove_highlight=False))
            self.assertFalse(parsed_board.game_over())
            self.assertFalse(parsed_board.game_won())

        self.assertTrue(np.equal(parsed_board._matrix, board._matrix).all())
示例#3
0
    def iterate_on(self, record):
        board = Board.load(record.get_initial_state())
        q_assessments = record.get_q_assessments()

        for i, move in enumerate(record.get_moves()):
            if record.get_winning_player() == board.get_player_to_move():
                self.make_feature_tensors(board, move, q_assessments[i][0],
                                          q_assessments[i][1])
            # learn drawn positions
            elif record.get_winning_player() == Player.NONE:
                self.make_feature_tensors(board, move, q_assessments[i][0],
                                          q_assessments[i][1])
            board.move(move)