コード例 #1
0
def minimax_helper(game, depth, cur_depth):
    #print("Cur Depth " +  str(cur_depth))
    #game.display_board()
    if(game.draw()):
        return 0
    if(game.has_won()):
        if(game.turn == 1):
            #print("We Lost!!")
            return -1000
        return 1000
    if(cur_depth == depth):
        #print(score(game))
        return score(game)
    if(game.turn == 1):
        maxi = -10000
        for row in range(7):
            if(game.legal_move(row)):
                game_next = copy.copy(game)
                game_next.play(row)
                maxi = max(maxi, minimax_helper(game_next, depth, cur_depth + 1))
                #print("This is what we got as the maximum that player 1:")
        return maxi
    if(game.turn == 2):
        mini = 10000
        for row in range(7):
            if(game.legal_move(row)):
                game_next = copy.copy(game)
                game_next.play(row)
                mini = min(mini, minimax_helper(game_next, depth, cur_depth + 1))
        #print("This is what we got as the minimum that player 2:")
        #print(mini)
        return mini
コード例 #2
0
def alphabeta(board, color, alpha, beta, moving_plr, depth):
    global nNode
    if moving_plr is None or depth == 0:
        nNode += 1
        return score(board, color), None
    best = NEGATIVE_INF if moving_plr == color else POSITIVE_INF
    bestmv = None
    moves = legal_moves(board, moving_plr)
    for move in moves:
        board2 = copy_board(board)
        make_move(board2, moving_plr, move)
        mmx = alphabeta(board2, color, alpha, beta,
                        next_player(board2, moving_plr), depth - 1)[0]
        # print(f"depth: {depth} alpha:{alpha} beta:{beta}")
        # print_board(board2)
        if moving_plr == color:
            bestmv = bestmv if best > mmx else move
            best = max(best, mmx)
            alpha = max(best, alpha)
        else:
            best = min(best, mmx)
            beta = min(best, beta)
        if beta <= alpha:  # alpha-beta cutoff
            break
    return best, bestmv
コード例 #3
0
def alphabeta(node: Node, color, alpha: int, beta: int, depth: int):
    global nNode
    if node.moving_plr is None or depth == 0:
        nNode += 1
        return score(node), None
    best = NEGATIVE_INF if node.moving_plr == color else POSITIVE_INF
    bestmv = None
    for move in node.legal_moves:
        board2 = copy_board(node.board)
        make_move(board2, node.moving_plr, move)
        node2 = Node(board2, node.moving_plr)
        # print(f"depth:{depth}, Original board:")
        # print_board(board)
        # print(f"{node.moving_plr} makes a move")
        # print_board(board2)
        mmx = alphabeta(node2, color, delta, epsilon, depth - 1)[0]
        # print(f"depth:{depth} delta: {delta} epsilon: {epsilon} mmx: {mmx}")
        # print_board(board2)
        if node.moving_plr == color:
            bestmv = bestmv if best > mmx else move
            best = max(best, mmx)
            alpha = max(best, alpha)
        else:
            best = min(best, mmx)
            beta = min(best, beta)
        if beta <= alpha:  # alpha-beta cutoff
            break
    return best, bestmv
コード例 #4
0
def negamax_ab(board, color, alpha, beta, moving_plr, depth):
    init_alpha = alpha
    ttval = negatrans_lookup(board, moving_plr)
    if ttval is not None and ttval[0] >= depth:
        if ttval[2] == FLAG_EXACT:
            return ttval[1]
        elif ttval[2] == FLAG_LOWER:
            alpha = max(alpha, ttval[1])
        elif ttval[2] == FLAG_UPPER:
            beta = min(beta, ttval[1])
    if depth == 0 or moving_plr is None:
        return score(board, color) * (1 if color == BLACK else -1)
コード例 #5
0
ファイル: ai.py プロジェクト: DylanJones/othello
def lookahead_helper(board, color, depth, nextmv):
    if depth == 0:
        return score(board, color)
    best = NEGATIVE_INF
    # seriously - this makes no sense.
    # best = 342894366
    for move in legal_moves(board, nextmv):
        board2 = [b[:] for b in board]
        # board2[move[0]][move[1]] = color
        make_move(board, nextmv, move)
        num = lookahead_helper(board2, color, depth - 1,
                               next_player(board2, nextmv))
        if num > best:
            best = num
    return best
コード例 #6
0
ファイル: ai.py プロジェクト: DylanJones/othello
def minimax_helper2(board, color, last_color, depth):
    moving_plr = next_player(board, last_color)
    if moving_plr == None or depth == 0:
        return score(board, color)
    if moving_plr == cinv(color):
        return minimax_helper2(board, cinv(color), last_color, depth)
    best = NEGATIVE_INF
    for move in legal_moves(board, moving_plr):
        # board2 = [list(b) for b in board]
        board2 = copy_board(board)
        make_move(board2, moving_plr, move)
        worstcase = minimax_helper2(board2, color, moving_plr, depth - 1)
        if worstcase > best:
            best = worstcase
    return best
コード例 #7
0
ファイル: ai.py プロジェクト: DylanJones/othello
def minimax_helper(board, color, last_color, depth):
    moving_plr = next_player(board, last_color)
    if moving_plr == None or depth == 0:
        return score(board, color)
    best = NEGATIVE_INF if moving_plr == color else POSITIVE_INF
    for move in legal_moves(board, moving_plr):
        board2 = copy_board(board)
        make_move(board2, moving_plr, move)
        worstcase = minimax_helper(board2, color, moving_plr, depth - 1)
        if moving_plr == color:  # pick the move with the best worse-case
            if worstcase > best:
                best = worstcase
        else:  # other player's turn - pick the worst case
            if worstcase < best:
                best = worstcase
    return best
コード例 #8
0
def alphabeta_memory(board, color, alpha, beta, moving_plr, depth):
    # print_board(board)
    global nNode
    if moving_plr is None or depth == 0:
        nNode += 1
        return score(board, color), None
    moves = legal_moves(board, moving_plr)
    nodes = []
    for move in moves:
        board2 = copy_board(board)
        make_move(board2, moving_plr, move)
        cval = transposition_lookup(board2, moving_plr)
        if cval is not None:
            nodes.append((cval[0], cval[1], move, board2))
        else:
            nodes.append((NEGATIVE_INF, NEGATIVE_INF, move, board2))
    nodes.sort(key=lambda x: x[1], reverse=True)
    best = NEGATIVE_INF if moving_plr == color else POSITIVE_INF
    bestmv = None
    for node in nodes:
        if node[0] < depth:
            mmx = alphabeta_memory(node[3], color, alpha, beta,
                                   next_player(node[3], moving_plr),
                                   depth - 1)[0]
            transposition_add(node[3], moving_plr, depth, mmx)
        else:
            # print('Hit')
            mmx = node[1]
        if moving_plr == color:
            bestmv = bestmv if best > mmx else node[2]
            best = max(best, mmx)
            alpha = max(best, alpha)
        else:
            best = min(best, mmx)
            beta = min(best, beta)
        if beta <= alpha:  # alpha-beta cutoff
            # print("alpha beta cutoff")
            break
    print(
        f'Depth: {depth}, heuristic: {score(board, color)}, minimax value: {best}'
    )
    return best, bestmv
コード例 #9
0
ファイル: ai.py プロジェクト: DylanJones/othello
def alphabeta_helper(board, color, alpha, beta, last_color, depth):
    moving_plr = next_player(board, last_color)
    if moving_plr is None or depth == 0:
        return score(board, color)
    best = NEGATIVE_INF if moving_plr == color else POSITIVE_INF
    moves = legal_moves(board, moving_plr)
    for move in moves:
        board2 = copy_board(board)
        make_move(board2, moving_plr, move)
        mmx = alphabeta_helper(board2, color, alpha, beta, moving_plr,
                               depth - 1)
        if moving_plr == color:
            best = max(best, mmx)
            alpha = max(best, alpha)
        else:
            best = min(best, mmx)
            beta = min(best, beta)
        if beta <= alpha:  # alpha-beta cutoff
            break
    return best
コード例 #10
0
ファイル: ai.py プロジェクト: DylanJones/othello
def alphabeta_helper_memory(board, color, alpha, beta, last_color, depth):
    moving_plr = next_player(board, last_color)
    if moving_plr is None or depth == 0:
        return score(board, color)
    best = NEGATIVE_INF if moving_plr == color else POSITIVE_INF
    moves = legal_moves(board, moving_plr)
    nmv = []
    for move in moves:
        cval = transposition_lookup(board, moving_plr)
        if cval is not None:
            nmv.append((cval[0], cval[1], move))
        else:
            nmv.append((-1, -1, move))
    nmv.sort(key=lambda x: x[1], reverse=False)
    for node in nmv:
        if node[0] < depth:
            # print("mss", node[0], node[1])
            board2 = copy_board(board)
            make_move(board2, moving_plr, node[2])
            mmx = alphabeta_helper_memory(board2, color, alpha, beta,
                                          moving_plr, depth - 1)
        else:
            print("hit", node[0], node[1])
            mmx = node[1]
        if moving_plr == color:
            best = max(best, mmx)
            alpha = max(best, alpha)
        else:
            best = min(best, mmx)
            beta = min(best, beta)
        if node[0] < depth:
            # add this to transpose
            transposition_add(board, moving_plr, depth - 1, mmx)
        if beta <= alpha:  # alpha-beta cutoff
            break
    return best