Пример #1
0
 def __init__(self):
     """
     Initializes a new AI handler instance.
     """
     self.__tp_table = TranspositionTable()
     self.__move_selection_delegate = None
     self.__depth = AI.DEFAULT_DEPTH
     self.__best_move = None
Пример #2
0
 def __init__(self, depth):
     super(Bot, self).__init__()
     self.transpositionTable = TranspositionTable()
     self._depth = depth
     self.last_move = None
     self.color = 0
     self.transposition_hits = 0
     self.nodes_explored = 0
     self.cuts = 0
     self.fails = 0
Пример #3
0
	def __init__(self, depth):
		super(Bot, self).__init__()
		self.transpositionTable = TranspositionTable()
		self._depth = depth
		self.last_move = None
Пример #4
0
class Bot(LiacBot):
	name = 'Bot'

	def __init__(self, depth):
		super(Bot, self).__init__()
		self.transpositionTable = TranspositionTable()
		self._depth = depth
		self.last_move = None
		
	# Retorna board de maior value da lista
	def __maxTake(self, moves):
		(move, board) = moves.head()
		maxi = (move, board, board.value)
		
		for (m, board) in moves:
			val = board.value
			if val > maxi[2]:
				maxi = (m, board, val)
		newMoves = moves.remove(maxi[0:2])
		return (maxi, newMoves)		
	
	def __sortMoves(self, moves):
		for _ in range(5):
			(maxi, newMoves) = self.__maxTake(moves)
		for (_, board) in moves:
			val = board.value
	
	def __negaScout(self, board, depth, alpha, beta):
		if board.value == POS_INF: # WIN
			return (None, POS_INF)
		if board.value == NEG_INF: # LOSE
			return (None, NEG_INF)
		if depth == 0:
			return (None, board.value)
		
		# busca na tabela de transposicao
		ttEntry = self.transpositionTable.look_up(board.string, depth)
		if ttEntry != None:
			move, value = ttEntry
			return (move, value)
		
		moves = board.generate()
		if moves == []: # DRAW
			return (None, 0)
		sorted(moves, key=itemgetter(1), reverse=True)
		
		firstChild = True
		for moveValue in moves:
			(move, _) = moveValue
			board.move(move)
			if firstChild:
				firstChild = False
				bestValue = -(self.__negaScout(board, depth - 1, -beta, -alpha)[1])
				bestMove = move
			else:
				score = -(self.__negaScout(board, depth - 1, -alpha -1, -alpha)[1])
				if alpha < score and score < beta:
					score = -(self.__negaScout(board, depth - 1, -beta, -alpha)[1])
				if bestValue < score:
					bestValue = score
					bestMove = move
			board.unmove(move)
			alpha = max(alpha, bestValue)
			if alpha >= beta:
				break
				
		# armazena na tabela de transposicao
		self.transpositionTable.store(board.string, depth, bestMove, bestValue)
		
		return (bestMove, bestValue)
		
	# gera o proximo movimento a partir da poda alfa e beta
	def __alphaBeta(self, board, depth, alpha, beta):
		if board.value == POS_INF: # WIN
			return (None, POS_INF)
		if board.value == NEG_INF: # LOSE
			return (None, NEG_INF)
		if depth == 0:
			return (None, board.value)
		
		# busca na tabela de transposicao
		ttEntry = self.transpositionTable.look_up(board.string, depth)
		if ttEntry != None:
			move, value = ttEntry
			return move, value
		
		moves = board.generate()
		if moves == []: # DRAW
			return (None, 0)
		moves = sorted(moves, key=itemgetter(1), reverse=True)
		
		bestValue = NEG_INF
		(bestMove, _) = moves[0]
		for moveValue in moves:
			(move, _) = moveValue
			
			board.move(move)
			val = -(self.__alphaBeta(board, depth - 1, -beta, -alpha)[1])
			board.unmove(move)
			
			if bestValue < val:
				bestValue = val
				bestMove = move
			alpha = max(alpha, val)
			if alpha >= beta:
				break
		
		# armazena na tabela de transposicao
		self.transpositionTable.store(board.string, depth, bestMove, bestValue)
		
		return (bestMove, bestValue)

	def on_move(self, state):
		print 'Generating a move...\n',
		board = Board(state)

		if state['bad_move']:
			print state['board']
			raw_input()

		t0 = time.time()
		move, value = self.__negaScout(board, self._depth, NEG_INF, POS_INF)
		t = time.time()
		print 'Time:', t - t0

		self.last_move = move
		print move, ' value: ', value
		self.send_move(move[0], move[1])
		self.color = state["who_moves"]

	def on_game_over(self, state):
		if state['draw']:
			print 'Draw!'
		elif state['winner'] == self.color:
			print 'We won!'
		else:
			print 'We lost!'
Пример #5
0
 def __init__(self, depth):
     super(Bot, self).__init__()
     self.transpositionTable = TranspositionTable()
     self._depth = depth
     self.last_move = None
Пример #6
0
class Bot(LiacBot):
    name = 'BotTESTE'

    def __init__(self, depth):
        super(Bot, self).__init__()
        self.transpositionTable = TranspositionTable()
        self._depth = depth
        self.last_move = None

    # Retorna board de maior value da lista
    def __maxTake(self, moves):
        (move, board) = moves.head()
        maxi = (move, board, board.value)

        for (m, board) in moves:
            val = board.value
            if val > maxi[2]:
                maxi = (m, board, val)
        newMoves = moves.remove(maxi[0:2])
        return (maxi, newMoves)

    def __sortMoves(self, moves):
        for _ in range(5):
            (maxi, newMoves) = self.__maxTake(moves)
        for (_, board) in moves:
            val = board.value

    def __negaScout(self, board, depth, alpha, beta):
        if board.value == POS_INF:  # WIN
            return (None, POS_INF)
        if board.value == NEG_INF:  # LOSE
            return (None, NEG_INF)
        if depth == 0:
            return (None, board.value)

        # busca na tabela de transposicao
        ttEntry = self.transpositionTable.look_up(board.string, depth)
        if ttEntry != None:
            move, value = ttEntry
            return (move, value)

        moves = board.generate()
        if moves == []:  # DRAW
            return (None, 0)
        sorted(moves, key=itemgetter(1), reverse=False)

        firstChild = True
        for moveValue in moves:
            (move, _) = moveValue
            board.move(move)
            if firstChild:
                firstChild = False
                bestValue = -(self.__negaScout(board, depth - 1, -beta,
                                               -alpha)[1])
                bestMove = move
            else:
                score = -(self.__negaScout(board, depth - 1, -alpha - 1,
                                           -alpha)[1])
                if alpha < score and score < beta:
                    score = -(self.__negaScout(board, depth - 1, -beta,
                                               -alpha)[1])
                if bestValue < score:
                    bestValue = score
                    bestMove = move
            board.unmove(move)
            alpha = max(alpha, bestValue)
            if alpha >= beta:
                break

        # armazena na tabela de transposicao
        self.transpositionTable.store(board.string, depth, bestMove, bestValue)

        return (bestMove, bestValue)

    # gera o proximo movimento a partir da poda alfa e beta
    def __alphaBeta(self, board, depth, alpha, beta):
        if board.value == POS_INF:  # WIN
            return (None, POS_INF)
        if board.value == NEG_INF:  # LOSE
            return (None, NEG_INF)
        if depth == 0:
            return (None, board.value)

        # busca na tabela de transposicao
        ttEntry = self.transpositionTable.look_up(board.string, depth)
        if ttEntry != None:
            move, value = ttEntry
            return move, value

        moves = board.generate()
        if moves == []:  # DRAW
            return (None, 0)
        moves = sorted(moves, key=itemgetter(1), reverse=True)

        bestValue = NEG_INF
        (bestMove, _) = moves[0]
        for moveValue in moves:
            (move, _) = moveValue

            board.move(move)
            val = -(self.__alphaBeta(board, depth - 1, -beta, -alpha)[1])
            board.unmove(move)

            if bestValue < val:
                bestValue = val
                bestMove = move
            alpha = max(alpha, val)
            if alpha >= beta:
                break

        # armazena na tabela de transposicao
        self.transpositionTable.store(board.string, depth, bestMove, bestValue)

        return (bestMove, bestValue)

    def on_move(self, state):
        print 'Generating a move...\n',
        board = Board(state)

        if state['bad_move']:
            print state['board']
            raw_input()

        t0 = time.time()
        move, value = self.__negaScout(board, self._depth, NEG_INF, POS_INF)
        t = time.time()
        print 'Time:', t - t0

        self.last_move = move
        print move, ' value: ', value
        self.send_move(move[0], move[1])
        self.color = state["who_moves"]

    def on_game_over(self, state):
        if state['draw']:
            print 'Draw!'
        elif state['winner'] == self.color:
            print 'We won!'
        else:
            print 'We lost!'
Пример #7
0
class AI:
    """
    Describes that class that provides AI to the Connect4 game.
    This class implementation is based on the negamax variant of the alpha
    beta pruning algorithm, combined with caching transposition table and
    our Board bitboard style container.
    For more information & references see the comments below.
    """

    #: Describes the default moves exploring order.
    MOVE_EXPLORING_ORDER = [3, 4, 2, 5, 1, 6, 0]

    #: Creaets a map between the given timeout and the search depth.
    TIMEOUT_DEPTH_MAP = {
        0.001: 1,
        0.010: 3,
        0.050: 4,
        0.1: 4,
        0.3: 5,
        0.5: 6,
        3: 7
    }

    #: Defines the default depth
    DEFAULT_DEPTH = 8

    #: Describe the exception message in case no move available.
    NO_AVAILABLE_MOVE_MESSAGE = "No possible AI moves"

    def __init__(self):
        """
        Initializes a new AI handler instance.
        """
        self.__tp_table = TranspositionTable()
        self.__move_selection_delegate = None
        self.__depth = AI.DEFAULT_DEPTH
        self.__best_move = None

    def reset(self):
        """
        Resets the AI data.
        """
        self.__tp_table.reset()
        self.__best_move = None
        self.__depth = AI.DEFAULT_DEPTH
        self.__move_selection_delegate = None

    def find_legal_move(self, g, func, timeout=None):
        """
        Looks for a legal move.
        :param g: The game instance.
        :param func: A callback fired when a move is selected. Note that
        this callback can be fired multiple times.
        :param timeout: A search timeout [optional].
        """
        board = g.get_board()

        # We can perform any move?
        valid_moves = board.get_valid_moves()
        if board.is_full() or valid_moves == 0:
            raise RuntimeError(AI.NO_AVAILABLE_MOVE_MESSAGE)

        # Init
        self.reset()
        self.__move_selection_delegate = func

        # Handle obvious moves manually
        if self.__handle_obvious_moves(g.get_board()):
            return

        # Did we got a timeout?
        if timeout is not None:
            for v in AI.TIMEOUT_DEPTH_MAP:
                if timeout <= v:
                    self.__depth = AI.TIMEOUT_DEPTH_MAP[v]
                    break

        # Perform an IDS
        self.__iterative_deepening_search(g.get_board())

        # If nothing chosen, just get SOMETHING
        if self.__best_move is None:
            non_losing_moves = board.get_valid_non_losing_moves()
            if non_losing_moves != 0:
                # Do something
                func(self.__get_column_from_bitmask(non_losing_moves))
            else:
                # We're gonna lose....
                func(self.__get_column_from_bitmask(valid_moves))

    # region Private API

    def __handle_obvious_moves(self, board):
        """
        Attempt to create some manual, quick response cases.
        :return: True if the move has been handled, false otherwise.
        """
        # We are going to win?
        winning_moves = board.get_winning_moves()
        if winning_moves != 0:
            self.__move_selection_delegate(
                self.__get_column_from_bitmask(winning_moves))
            return True

        # We have only one valid non-losing move to perform? It'll be
        # usuraly when we'll gonna lose if we won't respond accordingly.
        non_losing_moves = board.get_valid_non_losing_moves()
        if non_losing_moves == 0:
            # We don't have anything safe we can play so we just choose
            # whatever we can. We lost :'(.
            self.__move_selection_delegate(
                self.__get_column_from_bitmask(board.get_valid_moves()))
            return True

        # This is a self-defense move?
        if Board.popcount(non_losing_moves) == 1:
            self.__move_selection_delegate(
                self.__get_column_from_bitmask(non_losing_moves))
            return True

        return False

    def __get_column_from_bitmask(self, bitmask):
        """
        Get the given bitmask move column.
        :param bitmask: The move bitmask.
        :return: The integer column.
        """
        for i in range(Board.WIDTH):
            if bitmask & Board.create_column_bitmask(i) != 0:
                return i

    def __iterative_deepening_search(self, board):
        """
        Performs an Iterative Deepening Search (IDS).
        See: https://chessprogramming.wikispaces.com/Iterative+Deepening.
        """
        for depth in range(0, self.__depth):
            pv_table, score = self.__tp_table_based_search(board,
                                                           Board.MIN_SCORE,
                                                           Board.MAX_SCORE,
                                                           depth + 1,
                                                           ply=1)

            if len(pv_table) > 0:
                self.__best_move = pv_table[0]
                self.__move_selection_delegate(pv_table[0])

    def __tp_table_based_search(self, board, alpha, beta, depth, ply=1):
        """
        Perform a transposition table based graph search. This function
        invokes the actual search algorithm only in case it can't find the
        board evaluation value in the TP table. In case it does, it returns
        it. Note that if a search is really performed, the TP table is
        being updated automatically by this function.
        :param board: The board instance.
        :param alpha: The alpha/be
        ta cutoff algorithm alpha value.
        :param beta: The alpha/beta cutoff algorithm beta value
        :param depth: The searching depth.
        :param ply: The current play.
        :return: A tuple contains the Principal Variation table and the
        evaluation score.
        """
        # Gets the board hash
        key = board.get_hash()

        # Check if we got something in our transposition table
        has_hit, move_column, evaluation_score = self.__tp_table.thaw(
            key, alpha, beta, depth)

        if has_hit:
            if move_column is not None:
                return [move_column], evaluation_score
            return [], evaluation_score

        # Search the move using the PVS variation of the Alpha Beta algorithm.
        move_column, evaluation_score = self.__pvs_search(
            board, alpha, beta, depth, ply)

        # Store the move in our transposition table.
        self.__tp_table.freeze(key, move_column, evaluation_score, alpha, beta,
                               depth)

        return move_column, evaluation_score

    def __pvs_search(self, board, alpha, beta, depth, ply):
        """
        Implements a PVS search to eevaluate the game board and look for
        the best board score in the Connect4 game. This algorithm is
        based on the negamax with alpha-beta puring algorithm.

        For the used searching algorithms, see:
        PV: https://chessprogramming.wikispaces.com/Principal+Variation+Search
        Negamax: https://chessprogramming.wikispaces.com/Negamax
        Alpha-Beta: https://chessprogramming.wikispaces.com/Alpha-Beta

        :param board: The current board state to evaluate and performs the
        moves on. Note that this method assumes that nobody already won and
        that the current player cannot win next move. Thus these conditions
        must be checked before invoking this method.
        :param alpha: The score window within which we are evaluating the
        position for the maximizer (alpha < beta).
        :param beta: The score window within which we are evaluating the
        position for the minimizer (alpha < beta).
        :param depth: The current search depth.
        :param ply: The current ply number.
        :return:
        """
        # We gonna have a draw?
        if board.get_performed_moves_count() >= \
                (Board.WIDTH * Board.HEIGHT - 2):
            return [], Board.DRAW_SCORE  # We're "meh" about it.

        # # Do we got any more moves to do?
        valid_moves = board.get_valid_non_losing_moves()
        if valid_moves == 0:
            # No more valid moves, so return the (relatively) worst scoring
            # value we can have
            return [], Board.MIN_SCORE / ply

        if depth <= 0:
            return [], board.evaluate(0)

        # Iterate on the moves and search for the best move to perform.
        best_pv_table = []
        best_score = alpha
        for i, move_column in enumerate(AI.MOVE_EXPLORING_ORDER):
            # Get the move
            move_mask = valid_moves & Board.create_column_bitmask(move_column)
            if move_mask == 0:
                continue

            # Gets a clone of this board
            new_board = copy(board)

            # Perform 'dat move!
            new_board.perform_move_with_mask(move_mask)

            # We're implementing the PVS algorithm and thus we need to
            # reduce the window based on our iteration, depth and a/b
            # values, so what should we do now?
            if depth == 1 or i == 0 or (beta - alpha) == 1:
                pv_table, score = self.__tp_table_based_search(
                    new_board, -beta, -best_score, depth - 1, ply + 1)

            else:
                # Since we're implementing the PVS alpha/beta variation,
                # we uses zero-window for all of our other searches.
                _, score = self.__tp_table_based_search(
                    new_board, -best_score - 1, -best_score, depth - 1,
                    depth + 1)
                score = -score
                if score > best_score:
                    pv_table, score = self.__tp_table_based_search(
                        new_board, -beta, -best_score, depth - 1, ply + 1)
                else:
                    continue

            # Check the result score
            score = -score
            if score > best_score:
                # Save this score
                best_score = score
                best_pv_table = [move_column] + pv_table
            elif not best_pv_table:
                # Make sure we got a table...
                best_pv_table = [move_column] + pv_table

            if best_score >= beta:
                # Perform a beta cutoff
                break

        return best_pv_table, best_score
Пример #8
0
class Bot(LiacBot):
    name = 'Terminator'

    def __init__(self, depth):
        super(Bot, self).__init__()
        self.transpositionTable = TranspositionTable()
        self._depth = depth
        self.last_move = None
        self.color = 0
        self.transposition_hits = 0
        self.nodes_explored = 0
        self.cuts = 0
        self.fails = 0

    # Retorna board de maior value da lista
    def _maxTake(self, moves):
        (move, board) = moves.head()
        maxi = (move, board, board.value)

        for (m, board) in moves:
            val = board.value
            if val > maxi[2]:
                maxi = (m, board, val)
        newMoves = moves.remove(maxi[0:2])
        return (maxi, newMoves)

    def _sortMoves(self, moves):
        for _ in range(5):
            (maxi, newMoves) = self._maxTake(moves)
        for (_, board) in moves:
            val = board.value

    def _nega_scout(self, board, depth, alpha, beta, color, scout=False):
        self.nodes_explored += 1
        if board.value == POS_INF:  # WIN
            self.cuts += 1
            return None, POS_INF * color
        if board.value == NEG_INF:  # LOSE
            self.cuts += 1
            return None, NEG_INF * color
        if depth == 0:
            return None, board.value * color

        # busca na tabela de transposicao
        tt_entry = self.transpositionTable.look_up((board.string, color), depth)
        if None != tt_entry:
            tt_move, tt_value = tt_entry
            self.transposition_hits += 1
            return tt_move, tt_value

        moves = board.generate(color)
        if not moves:  # DRAW
            self.cuts += 1
            return None, 0

        moves = sorted(moves, key=itemgetter(1), reverse=(color==WHITE))

        first_child = True
        for moveValue in moves:
            move, value = moveValue
            board.move(move, color, value)
            if first_child:
                first_child = False
                best_value = -(self._nega_scout(board, depth - 1, -beta, -alpha, -color, scout)[1])
                best_move = move
            else:
                score = -(self._nega_scout(board, depth - 1, -alpha - 1, -alpha, -color, True)[1])
                if alpha < score < beta:
                    self.fails += 1
                    score = -(self._nega_scout(board, depth - 1, -beta, -alpha, -color, scout)[1])
                if best_value < score:
                    best_value = score
                    best_move = move
            board.unmove(move, color)
            alpha = max(alpha, best_value)
            if alpha >= beta:
                self.cuts += 1
                break
            
        # armazena na tabela de transposicao se nao for o scout
        if not scout:
            self.transpositionTable.store((board.string, color), depth, best_move, best_value)

        return best_move, best_value

    # gera o proximo movimento a partir da poda alfa e beta
    def _alpha_beta(self, board, depth, alpha, beta, color):
        self.nodes_explored += 1
        if board.value == POS_INF:  # WIN
            self.cuts += 1
            return None, POS_INF * color
        if board.value == NEG_INF:  # LOSE
            self.cuts += 1
            return None, NEG_INF * color
        if depth == 0:
            return None, board.value * color

        # busca na tabela de transposicao
        tt_entry = self.transpositionTable.look_up((board.string, color), depth)
        if None != tt_entry:
            tt_move, tt_value = tt_entry
            self.transposition_hits += 1
            return tt_move, tt_value

        moves = board.generate(color)
        if not moves:  # DRAW
            self.cuts += 1
            return None, 0

        moves = sorted(moves, key=itemgetter(1), reverse=(color==WHITE))

        best_move, best_value = moves[0], NEG_INF
        for moveValue in moves:
            move, _ = moveValue

            board.move(move, color)
            val = -(self._alpha_beta(board, depth - 1, -beta, -alpha, -color)[1])
            board.unmove(move, color)

            if best_value < val:
                best_value = val
                best_move = move
            alpha = max(alpha, val)
            if alpha >= beta:
                self.cuts += 1
                break

        # armazena na tabela de transposicao
        self.transpositionTable.store((board.string, color), depth, best_move, best_value * color)

        return best_move, best_value

    def on_move(self, state):
        self.color = state["who_moves"]
        print '--------------------------------------'
        print 'Talk to the hand...\n',
        board = Board(state)

        if state['bad_move']:
            print state['board']
            raw_input()

        self.transposition_hits = 0
        self.nodes_explored = 0
        self.cuts = 0
        t0 = time.time()
        move, value = self._nega_scout(board, self._depth, NEG_INF, POS_INF, self.color)
        #value *= self.color
        t = time.time()
        print 'Time:', t - t0
        print 'TT Size: ', len(self.transpositionTable._table)
        print 'TT Hits: ', self.transposition_hits
        print 'Nodes Explored: ', self.nodes_explored
        print 'Cuts: ', self.cuts
        print 'Scout Fails: ', self.fails
        self.last_move = move
        print move, ' value: ', value
        self.send_move(move[0], move[1])

    def on_game_over(self, state):
        if state['draw']:
            print 'No problemo.'
        elif state['winner'] == self.color:
            print 'You have been terminated.'
        else:
            print 'I\'ll be back.'
Пример #9
0
    def __init__(self,
                 max_iterations=config.MAX_ITER_MTD,
                 max_search_depth=config.MAX_DEPTH,
                 max_score=config.MAX_SCORE,
                 evaluator="negamax"):
        self.max_iterations = max_iterations
        self.max_search_depth = max_search_depth
        self.max_score = max_score
        self.tt = TranspositionTable()

        self.square_values = {
            # Pawn
            1:
            np.array([
                0, 0, 0, 0, 0, 0, 0, 0, 5, 10, 10, -20, -20, 10, 10, 5, 5, -5,
                -10, 0, 0, -10, -5, 5, 0, 0, 0, 20, 20, 0, 0, 0, 5, 5, 10, 25,
                25, 10, 5, 5, 10, 10, 20, 30, 30, 20, 10, 10, 50, 50, 50, 50,
                50, 50, 50, 50, 0, 0, 0, 0, 0, 0, 0, 0
            ]),
            # Knight
            2:
            np.array([
                -50, -40, -30, -30, -30, -30, -40, -50, -40, -20, 0, 5, 5, 0,
                -20, -40, -30, 5, 10, 15, 15, 10, 5, -30, -30, 0, 15, 20, 20,
                15, 0, -30, -30, 5, 15, 20, 20, 15, 5, -30, -30, 0, 10, 15, 15,
                10, 0, -30, -40, -20, 0, 0, 0, 0, -20, -40, -50, -40, -30, -30,
                -30, -30, -40, -50
            ]),
            # Bishop
            3:
            np.array([
                -20, -10, -10, -10, -10, -10, -10, -20, -10, 5, 0, 0, 0, 0, 5,
                -10, -10, 10, 10, 10, 10, 10, 10, -10, -10, 0, 10, 10, 10, 10,
                0, -10, -10, 5, 5, 10, 10, 5, 5, -10, -10, 0, 5, 10, 10, 5, 0,
                -10, -10, 0, 0, 0, 0, 0, 0, -10, -20, -10, -10, -10, -10, -10,
                -10, -20
            ]),
            # Rook
            4:
            np.array([
                0, 0, 0, 5, 5, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, -5, -5, 0, 0, 0,
                0, 0, 0, -5, -5, 0, 0, 0, 0, 0, 0, -5, -5, 0, 0, 0, 0, 0, 0,
                -5, -5, 0, 0, 0, 0, 0, 0, -5, 5, 10, 10, 10, 10, 10, 10, 5, 0,
                0, 0, 0, 0, 0, 0, 0
            ]),
            # Queen
            5:
            np.array([
                -20, -10, -10, -5, -5, -10, -10, -20, -10, 0, 5, 0, 0, 0, 0,
                -10, -10, 5, 5, 5, 5, 5, 0, -10, 0, 0, 5, 5, 5, 5, 0, -5, -5,
                0, 5, 5, 5, 5, 0, -5, -10, 0, 5, 5, 5, 5, 0, -10, -10, 0, 0, 0,
                0, 0, 0, -10, -20, -10, -10, -5, -5, -10, -10, -20
            ]),
            # King
            6:
            np.array([
                20, 30, 10, 0, 0, 10, 30, 20, 20, 20, 0, 0, 0, 0, 20, 20, -10,
                -20, -20, -20, -20, -20, -20, -10, -20, -30, -30, -40, -40,
                -30, -30, -20, -30, -40, -40, -50, -50, -40, -40, -30, -30,
                -40, -40, -50, -50, -40, -40, -30, -30, -40, -40, -50, -50,
                -40, -40, -30, -30, -40, -40, -50, -50, -40, -40, -30
            ]),
            # King End Game
            7:
            np.array([
                -50, -30, -30, -30, -30, -30, -30, -50, -30, -30, 0, 0, 0, 0,
                -30, -30, -30, -10, 20, 30, 30, 20, -10, -30, -30, -10, 30, 40,
                40, 30, -10, -30, -30, -10, 30, 40, 40, 30, -10, -30, -30, -10,
                20, 30, 30, 20, -10, -30, -30, -20, -10, 0, 0, -10, -20, -30,
                -50, -40, -30, -20, -20, -30, -40, -50
            ])
        }
Пример #10
0
class Evaluator():
    def __init__(self,
                 max_iterations=config.MAX_ITER_MTD,
                 max_search_depth=config.MAX_DEPTH,
                 max_score=config.MAX_SCORE,
                 evaluator="negamax"):
        self.max_iterations = max_iterations
        self.max_search_depth = max_search_depth
        self.max_score = max_score
        self.tt = TranspositionTable()

        self.square_values = {
            # Pawn
            1:
            np.array([
                0, 0, 0, 0, 0, 0, 0, 0, 5, 10, 10, -20, -20, 10, 10, 5, 5, -5,
                -10, 0, 0, -10, -5, 5, 0, 0, 0, 20, 20, 0, 0, 0, 5, 5, 10, 25,
                25, 10, 5, 5, 10, 10, 20, 30, 30, 20, 10, 10, 50, 50, 50, 50,
                50, 50, 50, 50, 0, 0, 0, 0, 0, 0, 0, 0
            ]),
            # Knight
            2:
            np.array([
                -50, -40, -30, -30, -30, -30, -40, -50, -40, -20, 0, 5, 5, 0,
                -20, -40, -30, 5, 10, 15, 15, 10, 5, -30, -30, 0, 15, 20, 20,
                15, 0, -30, -30, 5, 15, 20, 20, 15, 5, -30, -30, 0, 10, 15, 15,
                10, 0, -30, -40, -20, 0, 0, 0, 0, -20, -40, -50, -40, -30, -30,
                -30, -30, -40, -50
            ]),
            # Bishop
            3:
            np.array([
                -20, -10, -10, -10, -10, -10, -10, -20, -10, 5, 0, 0, 0, 0, 5,
                -10, -10, 10, 10, 10, 10, 10, 10, -10, -10, 0, 10, 10, 10, 10,
                0, -10, -10, 5, 5, 10, 10, 5, 5, -10, -10, 0, 5, 10, 10, 5, 0,
                -10, -10, 0, 0, 0, 0, 0, 0, -10, -20, -10, -10, -10, -10, -10,
                -10, -20
            ]),
            # Rook
            4:
            np.array([
                0, 0, 0, 5, 5, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, -5, -5, 0, 0, 0,
                0, 0, 0, -5, -5, 0, 0, 0, 0, 0, 0, -5, -5, 0, 0, 0, 0, 0, 0,
                -5, -5, 0, 0, 0, 0, 0, 0, -5, 5, 10, 10, 10, 10, 10, 10, 5, 0,
                0, 0, 0, 0, 0, 0, 0
            ]),
            # Queen
            5:
            np.array([
                -20, -10, -10, -5, -5, -10, -10, -20, -10, 0, 5, 0, 0, 0, 0,
                -10, -10, 5, 5, 5, 5, 5, 0, -10, 0, 0, 5, 5, 5, 5, 0, -5, -5,
                0, 5, 5, 5, 5, 0, -5, -10, 0, 5, 5, 5, 5, 0, -10, -10, 0, 0, 0,
                0, 0, 0, -10, -20, -10, -10, -5, -5, -10, -10, -20
            ]),
            # King
            6:
            np.array([
                20, 30, 10, 0, 0, 10, 30, 20, 20, 20, 0, 0, 0, 0, 20, 20, -10,
                -20, -20, -20, -20, -20, -20, -10, -20, -30, -30, -40, -40,
                -30, -30, -20, -30, -40, -40, -50, -50, -40, -40, -30, -30,
                -40, -40, -50, -50, -40, -40, -30, -30, -40, -40, -50, -50,
                -40, -40, -30, -30, -40, -40, -50, -50, -40, -40, -30
            ]),
            # King End Game
            7:
            np.array([
                -50, -30, -30, -30, -30, -30, -30, -50, -30, -30, 0, 0, 0, 0,
                -30, -30, -30, -10, 20, 30, 30, 20, -10, -30, -30, -10, 30, 40,
                40, 30, -10, -30, -30, -10, 30, 40, 40, 30, -10, -30, -30, -10,
                20, 30, 30, 20, -10, -30, -30, -20, -10, 0, 0, -10, -20, -30,
                -50, -40, -30, -20, -20, -30, -40, -50
            ])
        }

    # Negamax with alpha beta pruning, transposition table, move ordering and iterative deepening.
    def negamax(self, board, depth, alpha, beta, move, evaluator):
        old_alpha = alpha
        # Past moves.
        moves = []
        # Get player number according to the library.
        player = 1 if board.turn else -1
        if depth == 0 or board.is_game_over():
            # entry.score = evaluator(board, entry.result)
            moves.append(move)
            if evaluator == "negamax":
                return moves, self.negamax_evaluate(board) * player, board
            else:
                return moves, self.nn_evaluate(board), board
        # Transposition Table Entry
        ttEntry = self.tt.lookup(board)

        # Transposition Table usage.
        if ttEntry and ttEntry.depth >= depth:
            if ttEntry.flag == 0:
                if not move:
                    move = ttEntry.move
                moves.append(move)
                return moves, ttEntry.value, board
            elif ttEntry.flag == -1:
                alpha = max(alpha, ttEntry.value)
            elif ttEntry.flag == 1:
                beta = min(beta, ttEntry.value)

            # alpha-beta cut-off.
            if alpha >= beta:
                if not move:
                    move = ttEntry.move
                moves.append(move)
                return moves, ttEntry.value, board

        value = float("-inf")
        best_move = None
        legal_moves = board.legal_moves

        # For each legal move in the current board state.
        for i in legal_moves:
            # Push the current move to the board.
            board.push(i)

            # Generate new moves and a value (recursive).
            new_moves, current_value, b = self.negamax(board, depth - 1, -beta,
                                                       -alpha, i, evaluator)
            current_value = -current_value

            # Pop so the current displayed board does not change.
            board.pop()

            # Get the maximum value and set best move.
            if current_value > value:
                value = current_value
                moves = new_moves
                best_move = i

            # Redefine alpha.
            alpha = max(alpha, value)

            # alpha-beta cut-off.
            if alpha >= beta:
                break

        # If there is no best move, get current move.
        if not best_move:
            best_move = move

        if value <= old_alpha:
            flag = 1
        elif value >= beta:
            flag = -1
        else:
            flag = 0

        # Store the current board state with other parameters in the transposition table.
        self.tt.store(board, value, flag, depth, best_move)
        moves.append(best_move)
        return moves, value, board

    def _mtd(self, board, depth, firstGuess):
        guess = firstGuess
        finalBoard = None
        upperBound = self.max_score
        lowerBound = -self.max_score
        i = 0

        while lowerBound < upperBound and i < self.max_iterations:
            if guess == lowerBound:
                gamma = guess + 1
            else:
                gamma = guess
            move, guess, finalBoard = self.negamax(board, depth, gamma - 1,
                                                   gamma, None, "neural")
            if guess < gamma:
                upperBound = guess
            else:
                lowerBound = guess
                i = i + 1
        return move, guess, finalBoard

    # MTDf
    def selectMove(self, board):
        guess1 = 1 << 64
        guess2 = 1 << 64
        finalBoard1 = None
        finalBoard2 = None

        for depth in range(2, self.max_search_depth + 1):
            if depth % 2 == 0:
                move, guess1, finalBoard1 = self._mtd(board, depth, guess1)
            else:
                move, guess2, finalBoard2 = self._mtd(board, depth, guess2)

        if self.max_search_depth % 2 == 0:
            return (move, guess1, finalBoard1)
        else:
            return (move, guess2, finalBoard2)

    def clearTranspositionTable(self):
        self.tt = TranspositionTable()

    # Normal Evaluation Function
    def negamax_evaluate(self, board):
        piece_values = {
            chess.PAWN: 1,
            chess.KNIGHT: 3,
            chess.BISHOP: 3,
            chess.ROOK: 5,
            chess.QUEEN: 9,
            chess.KING: 100
        }

        white_value, black_value = 0, 0
        for p in range(1, 6):
            # Get white pieces.
            white_piece = board.pieces(p, True)
            # Get black pieces.
            black_piece = board.pieces(p, False)
            whites, blacks = 0, 0
            # Get the square value for that piece.
            piece_sq_values = self.square_values[p]
            # Get the piece value.
            piece_value = piece_values[p] * 100

            if len(white_piece) > 0:
                # Get the points from the squares with the existing pieces on that square for white.
                whites = piece_sq_values[np.array(list(white_piece))]

            if len(black_piece) > 0:
                # Get the points from the squares with the existing pieces on that square for black.
                blacks = piece_sq_values[-(np.array(list(black_piece)) + 1)]

            # Get white values
            white_value += (np.sum(whites) + len(white_piece) * piece_value)
            # Get black values
            black_value -= (np.sum(blacks) + len(black_piece) * piece_value)

        # Get final value
        final_value = white_value + black_value
        value = final_value / 3500

        # Set the value -1.0, 1.0 or 0.
        value = 1.0 if value > 1.0 else -1.0 if value < -1.0 else value
        return value

    # Neural Network Evaluation Function
    def nn_evaluate(self, board):
        pawnScore = config.PAWN_SCORE
        materialPnts = [
            pawnScore, 4 * pawnScore,
            int(round(4.1 * pawnScore)), 6 * pawnScore, 12 * pawnScore, 0
        ]
        phasePoints = [0, 1, 1, 2, 4, 0]
        points = 0

        if board.is_game_over():
            if board.result() == "1-0":
                points = 100
            if board.result() == "1/2-1/2":
                points = 0
            else:
                points = -100
            if not board.turn:
                points = -points
            return points

        materialPoints = initialPoints = endPoints = 0
        phase = totalPhase = 24

        for i in range(0, 64):
            piece = board.piece_at(i)
            if piece:
                phase -= phasePoints[piece.piece_type - 1]
                piece_value = materialPnts[piece.piece_type - 1]
                initial = weights.initPosPnts[piece.piece_type - 1]
                final = weights.finalPosPnts[piece.piece_type - 1]

                if piece.color:
                    materialPoints += piece_value
                    initialPoints += initial[i]
                    endPoints += final[i]
                else:
                    materialPoints -= piece_value
                    initialPoints -= initial[63 - i]
                    endPoints -= final[63 - i]

        phase = (phase * 256 + (totalPhase / 2)) / totalPhase
        points = materialPoints + \
            ((initialPoints * (256 - phase)) + (endPoints * phase)) / 256

        if (board.turn == False):
            points = -points
        return points

    def findFeatures(self, board, color):
        phasePoints = [0, 1, 1, 2, 4, 0]
        featuresRawInit = [[0 for i in range(0, 64)] for j in range(0, 6)]
        featuresRawFin = [[0 for i in range(0, 64)] for j in range(0, 6)]
        featuresInit = []
        featuresFin = []
        colorType = 1 if color else -1
        phase = totalPhase = 24

        for i in range(0, 64):
            piece = board.piece_at(i)
            if piece:
                phase -= phasePoints[piece.piece_type - 1]

        phase = (phase * 256 + (totalPhase / 2)) / totalPhase

        for i in range(0, 64):
            piece = board.piece_at(i)
            if piece:
                initial_features = featuresRawInit[piece.piece_type - 1]
                final_features = featuresRawFin[piece.piece_type - 1]

                if piece.color:
                    initial_features[i] += (256.0 - phase) / 256.0 * colorType
                    final_features[i] += phase / 256.0 * colorType
                else:
                    initial_features[63 -
                                     i] -= (256.0 - phase) / 256.0 * colorType
                    final_features[63 - i] -= phase / 256.0 * colorType

        for j in range(6):
            for i in range(64):
                featuresInit.append(featuresRawInit[j][i])
                featuresFin.append(featuresRawFin[j][i])

        return (featuresInit, featuresFin)
Пример #11
0
 def clearTranspositionTable(self):
     self.tt = TranspositionTable()
Пример #12
0
 def __init__(self, fen=None, cache_size=1e7):
     self.p_encoder = Position()
     self.board = chess.Board(fen) if fen else chess.Board()
     self.ttable = TranspositionTable(cache_size)
     self.MOVE_VAL = []
     self.X = 0
Пример #13
0
class ChessEngine:
    def __init__(self, fen=None, cache_size=1e7):
        self.p_encoder = Position()
        self.board = chess.Board(fen) if fen else chess.Board()
        self.ttable = TranspositionTable(cache_size)
        self.MOVE_VAL = []
        self.X = 0

    def reset(self, fen=None):
        self.board = chess.Board()

    def get_results(self):
        res = self.board.result()
        if res == '0-1':
            return -1.0
        elif res == '1-0':
            return 1.0
        return 0.0

    def nn_eval(self):
        # return random.uniform(-1.0, 1.0)
        return model.predict(
            np.asarray([self.p_encoder.binary_encode(self.board)]))[0, 0]

    def conventional_eval(self):

        # TODO enable 3fold rep/50 move rule by improving client-server interaction
        white_val, black_val = 0, 0
        for piece in range(1, 7):
            white_piece = self.board.pieces(piece, True)
            back_piece = self.board.pieces(piece, False)
            white_arr = piece_square_tables[piece][np.array(
                list(white_piece))] if len(white_piece) > 0 else 0
            black_arr = piece_square_tables[piece][-(
                np.array(list(back_piece)) + 1)] if len(back_piece) > 0 else 0
            white_val += (np.sum(white_arr) +
                          len(white_piece) * piece_values[piece])
            black_val -= (np.sum(black_arr) +
                          len(back_piece) * piece_values[piece])

        val = (white_val + black_val) / 3500  #normalize between -1, 1
        if val > 1.0:
            val = 1.0
        elif val < -1.0:
            val = -1.0
        return val

    def move(self, san):
        self.board.push_san(san)

    def _move(self, san):
        self.board.push(san)

    def reset_X(self):
        self.X = 0

    def clear_move_list(self):
        self.MOVE_VAL.clear()

    def take_back(self):
        self.board.pop()
        if not self.board.turn:
            self.board.pop()

    def move_order(self, root, depth):
        move_order = []
        node = root
        turn = self.board.turn
        for i in range(depth):
            if len(node.children) == 0:
                return move_order
            # node = sorted(node.children, key = lambda x: x.val, reverse = turn)[0]
            # o(n) time instead of sorting
            nodes = np.array([[n.val, n] for n in node.children])
            best = nodes[np.argmax(nodes[:, 0])] if turn else nodes[np.argmin(
                nodes[:, 0])]
            move_order.append((round(best[0], 4), best[1].name))
            turn = not turn
            node = best[1]
        return move_order

    def computer_move(self, depth=4, verbose='n', depth_to_sort=1, nn=False):
        if 't' in verbose:
            root = Node('root', depth=0, val=None, no=0)
            self.min_max_tree_alpa_beta_nodes2(-1.0, 1.0, depth, 1, depth,
                                               root)
            move_order = self.move_order(root, depth)
            print(move_order)
            if verbose == 't+':
                for row in RenderTree(
                        root,
                        childiter=lambda x: sorted(
                            x, key=lambda y: y.val, reverse=self.board.turn)):
                    print("%s%d %s %.3f" %
                          (row.pre, row.node.no, row.node.name, row.node.val))
            # print(RenderTree(root))
            # UniqueDotExporter(root, indent=4, nodeattrfunc=lambda node:
            # 'label="%s. %s = %.3f"' % (str(node.no), node.name, node.val)).to_picture("udo.pdf")
            if len(self.MOVE_VAL) == 1:
                val = self.MOVE_VAL[0]
                self.clear_move_list()
                data = {
                    'move': val[0],
                    'eval': val[1],
                    'moveOrder': move_order,
                    'num_pred': self.X
                }
                self.reset_X()
                self.move(val[0])
                return data
            return None
        else:
            moves, val = self.iterative_deepening_cache(
                depth, depth_to_sort, nn)
            if moves:
                print(moves)
                data = {
                    'move': self.board.san(moves[-1]),
                    'eval': -val,
                    'num_pred': self.X
                }
                self.reset_X()
                self._move(moves[-1])
                return data
            return None

    def attacked_by_inferior_piece(self, move, sqr):
        m = self.board.san(move)
        for square in self.board.attackers(not self.board.turn, sqr):
            piece = self.board.piece_type_at(square)
            if m[0] in 'BN' and piece == chess.PAWN:
                return True
            elif m[0] == 'R' and (piece == chess.PAWN or piece == chess.BISHOP
                                  or piece == chess.KNIGHT):
                return True
            elif m[0] == 'Q' and (piece == chess.PAWN or piece == chess.BISHOP
                                  or piece == chess.KNIGHT
                                  or piece == chess.ROOK):
                return True
        return False

    def num_defenders_to_square(self, move):
        return len(self.board.attackers(self.board.turn, move.to_square))

    def num_attackers_to_square(self, move):
        return len(self.board.attackers(not self.board.turn, move.to_square))

    def sort_moves(self):
        legals = []
        post_smart = 0
        for move in self.board.legal_moves:
            m = self.board.san(move)
            # checks if move is checkmate
            if m[-1] == '#':
                return [move]
            # check capture moves
            if 'x' in m:
                # checks if move is a pawn capture, probably a good move
                if m[0].islower():
                    legals.insert(0, move)
                    post_smart += 1
                    continue
                # takes undefended piece, probably a good move
                elif not self.board.is_attacked_by(not self.board.turn,
                                                   move.to_square):
                    legals.insert(0, move)
                    post_smart += 1
                    continue
                else:
                    legals.insert(post_smart, move)
                    continue

            # checks bad queen moves
            if m[0] == 'Q':
                # check if queen going to square controlled by other player, probably a bad move
                if self.board.is_attacked_by(not self.board.turn,
                                             move.to_square):
                    legals.append(move)
                    continue
                else:
                    legals.insert(post_smart, move)
                    continue

            # check if current piece being moved is being attacked by a piece inferior to it and if so,
            # is it being moved to a place where an inferior piece will attack it
            elif self.attacked_by_inferior_piece(move, move.from_square):
                # piece moves to square where different inferior piece is attacking it probably bad move
                if self.attacked_by_inferior_piece(move, move.to_square):
                    legals.append(move)
                    continue
                else:
                    # check square that piece is going to is defended more than attacked - probably a good move
                    if self.num_defenders_to_square(
                            move) >= self.num_attackers_to_square(move):
                        legals.insert(0, move)
                        post_smart += 1
                        continue
                    # if more attackers than defenders, probably a bad move
                    else:
                        legals.append(move)
                        continue
            # piece moves to square where inferior piece is attacking it probably bad move
            elif self.attacked_by_inferior_piece(move, move.to_square):
                legals.append(move)
                continue

            # TODO check bad moves of other pieces
            legals.insert(post_smart, move)

        return legals

    def store_in_table(self, val, flag, depth_, move):
        return self.ttable.store(self.board, val, flag, depth_, move)

    def get_from_table(self):
        res = self.ttable[self.board]
        return res if res else None

    def empty_table(self):
        self.ttable.empty_cache()

    def iterative_deepening_cache(self, depth, depth_to_sort, nn=False):
        moves, val_ = self.negamax_cache(-1.0, 1.0, 1, None, 1, depth_to_sort,
                                         None, nn)
        for i in range(2, depth + 1):
            moves, val_ = self.negamax_cache(-1.0, 1.0, i, None, i,
                                             depth_to_sort, moves, nn)
        return moves, val_

    def iterative_deepening(self, depth, depth_to_sort):
        moves, val_ = self.negamax_it_deep(-1.0, 1.0, 1, None, 1,
                                           depth_to_sort, None)
        for i in range(2, depth + 1):
            moves, val_ = self.negamax_it_deep(-1.0, 1.0, i, None, i,
                                               depth_to_sort, moves)
        return moves, val_

    def negamax_cache(self, alpha, beta, depth, move, original_depth,
                      depth_to_sort, prev_moves, nn):
        self.X += 1
        move_set = []
        alpha_orig = alpha
        color = 1 if self.board.turn else -1
        if self.board.is_game_over(claim_draw=False):
            move_set.append(move)
            return move_set, self.get_results() * color

        cached = self.get_from_table()
        if cached and cached.entry_depth >= depth:
            if cached.flag == EXACT:
                move = cached.move if not move else move
                move_set.append(move)
                return move_set, cached.val
            elif cached.flag == LOWER:
                alpha = max(alpha, cached.val)
            elif cached.flag == UPPER:
                beta = min(beta, cached.val)
            if alpha >= beta:
                move = cached.move if not move else move
                move_set.append(move)
                return move_set, cached.val

        if depth == 0:
            move_set.append(move)
            val = self.nn_eval() if nn else self.conventional_eval()
            return move_set, val * color
        best_move = None
        max_val = -1.0

        if original_depth - depth < depth_to_sort:
            legals = self.sort_moves()
        else:
            legals = list(self.board.legal_moves)

        if prev_moves and len(prev_moves) >= depth and prev_moves[depth -
                                                                  1] in legals:
            legals.insert(0, prev_moves[depth - 1])

        for m in legals:
            self.board.push(m)
            new_move_set, pred_eval = self.negamax_cache(
                -beta, -alpha, depth - 1, m, original_depth, depth_to_sort,
                prev_moves, nn)
            pred_eval = -pred_eval
            self.board.pop()
            if pred_eval > max_val:
                move_set = new_move_set
                max_val, best_move = pred_eval, m

            alpha = max(max_val, alpha)
            if alpha >= beta:
                break
        if max_val <= alpha_orig:
            flag = UPPER
        elif max_val >= beta:
            flag = LOWER
        else:
            flag = EXACT

        if not best_move: best_move = m
        self.store_in_table(max_val, flag, depth, best_move)
        move_set.append(best_move)
        return move_set, max_val

    def negamax_it_deep(self, alpha, beta, depth, move, original_depth,
                        depth_to_sort, prev_moves):
        self.X += 1
        move_set = []
        color = 1 if self.board.turn else -1
        if self.board.is_game_over(claim_draw=False):
            move_set.append(move)
            return move_set, self.get_results() * color
        if depth == 0:
            move_set.append(move)
            return move_set, self.conventional_eval() * color
        best_move = None
        max_val = -1.0

        if original_depth - depth < depth_to_sort:
            legals = self.sort_moves()
        else:
            legals = list(self.board.legal_moves)

        if prev_moves and len(prev_moves) >= depth and prev_moves[depth -
                                                                  1] in legals:
            legals.insert(0, prev_moves[depth - 1])

        for m in legals:
            self.board.push(m)
            new_move_set, pred_eval = self.negamax_it_deep(
                -beta, -alpha, depth - 1, m, original_depth, depth_to_sort,
                prev_moves)
            pred_eval = -pred_eval
            self.board.pop()
            if pred_eval > max_val:
                move_set = new_move_set
                max_val, best_move = pred_eval, m

            alpha = max(max_val, alpha)
            if alpha >= beta:
                break
        if not best_move: best_move = m
        move_set.append(best_move)
        return move_set, max_val

    def negamax(self, alpha, beta, depth, original_depth, depth_to_sort):
        self.X += 1
        alpha_orig = alpha

        color = 1 if self.board.turn else -1
        if self.board.is_game_over(claim_draw=False):
            return self.get_results() * color

        cached = self.get_from_table()
        if cached and cached.entry_depth >= depth:
            if cached.flag == EXACT:
                return cached.val
            elif cached.flag == LOWER:
                alpha = max(alpha, cached.val)
            elif cached.flag == UPPER:
                beta = min(beta, cached.val)
            if alpha >= beta:
                return cached.val

        if depth == 0:
            return self.conventional_eval() * color
        best_move = None
        max_val = -1.0
        if original_depth - depth < depth_to_sort:
            legals = self.sort_moves()
        else:
            legals = self.board.legal_moves
        for move in legals:
            self.board.push(move)
            pred_eval = -self.negamax(-beta, -alpha, depth - 1, original_depth,
                                      depth_to_sort)
            self.board.pop()
            if pred_eval > max_val:
                max_val = pred_eval
                best_move = move
            alpha = max(max_val, alpha)
            if alpha >= beta:
                break
        if depth == original_depth:
            if not best_move: best_move = move
            self.MOVE_VAL.append((self.board.san(best_move), max_val * color))
        if max_val <= alpha_orig:
            flag = UPPER
        elif max_val >= beta:
            flag = LOWER
        else:
            flag = EXACT

        self.store_in_table(max_val, flag, depth)
        return max_val

    def min_max_tree_alpa_beta_fast(self, alpha, beta, depth, original_depth,
                                    depth_to_sort):
        self.X += 1
        if self.board.is_game_over(claim_draw=False):
            return self.get_results()
        if depth == 0:
            return self.conventional_eval()
        best_move = None
        if original_depth - depth < depth_to_sort:
            legals = self.sort_moves()
        else:
            legals = self.board.legal_moves

        if self.board.turn:
            minmax = -1.0
            for move in legals:
                self.board.push(move)

                pred_eval = self.min_max_tree_alpa_beta_fast(
                    alpha, beta, depth - 1, original_depth, depth_to_sort)
                self.board.pop()

                if pred_eval > minmax:
                    minmax, best_move = pred_eval, move

                alpha = max(pred_eval, alpha)
                if alpha >= beta:
                    break
            if depth == original_depth:
                if not best_move: best_move = move
                self.MOVE_VAL.append((self.board.san(best_move), minmax))
        else:
            minmax = 1.0
            for move in legals:
                self.board.push(move)
                pred_eval = self.min_max_tree_alpa_beta_fast(
                    alpha, beta, depth - 1, original_depth, depth_to_sort)
                self.board.pop()
                if pred_eval < minmax:
                    minmax, best_move = pred_eval, move
                beta = min(pred_eval, beta)
                if alpha >= beta:
                    break
            if depth == original_depth:
                if not best_move: best_move = move
                self.MOVE_VAL.append((self.board.san(best_move), minmax))
        return minmax

    def min_max_tree_alpa_beta_nodes2(self, alpha, beta, depth, opst_depth,
                                      original_depth, node):
        self.X += 1
        if self.board.is_game_over():
            res = self.get_results()
            node.val = res
            return res
        if depth == 0:
            eval_ = self.conventional_eval()
            node.val = eval_
            return eval_
        best_move = None
        if depth == original_depth:
            leg = self.sort_moves()
        else:
            leg = self.board.legal_moves
        if self.board.turn:
            max_eval = -1.0
            for move in leg:

                new_node = Node(self.board.san(move),
                                parent=node,
                                depth=opst_depth,
                                val=None,
                                no=self.X)
                self.board.push(move)
                pred_eval = self.min_max_tree_alpa_beta_nodes2(
                    alpha, beta, depth - 1, opst_depth + 1, original_depth,
                    new_node)
                self.board.pop()

                if pred_eval > max_eval:
                    max_eval, best_move = pred_eval, move

                alpha = max(pred_eval, alpha)
                if alpha >= beta:
                    break
            if depth == original_depth:
                if not best_move: best_move = move
                self.MOVE_VAL.append((self.board.san(best_move), max_eval))
                node.name = 'root: best is ' + self.board.san(best_move)
            node.val = max_eval
            return max_eval
        else:
            min_eval = 1.0
            for move in leg:
                new_node = Node(self.board.san(move),
                                parent=node,
                                depth=opst_depth,
                                val=None,
                                no=self.X)
                self.board.push(move)
                pred_eval = self.min_max_tree_alpa_beta_nodes2(
                    alpha, beta, depth - 1, opst_depth + 1, original_depth,
                    new_node)
                self.board.pop()
                if pred_eval < min_eval:
                    min_eval, best_move = pred_eval, move
                beta = min(pred_eval, beta)
                if alpha >= beta:
                    break

            if depth == original_depth:
                if not best_move: best_move = move
                self.MOVE_VAL.append((self.board.san(best_move), min_eval))
                node.name = 'root: best is ' + self.board.san(best_move)
            node.val = min_eval
            return min_eval