Ejemplo n.º 1
0
 def min_value(state, alpha, beta, depth):
     #print(depth)
     if cutoff_test(state, depth):
         return eval_fn(state, player)
     v = infinity
     for a in game.actions(state):  ##
         v = min(
             v,
             max_value(game.result(state, a, player, extra_fn), alpha, beta,
                       depth + 1))
         if v <= alpha:
             return v
         beta = min(beta, v)
     return v
Ejemplo n.º 2
0
 def max_value(state, alpha, beta, depth):
     #print(depth)
     if cutoff_test(state, depth):
         return eval_fn(state, player)
     v = -infinity
     for a in game.actions(state):  ##
         v = max(
             v,
             min_value(game.result(state, a, player, extra_fn), alpha, beta,
                       depth + 1))
         if v >= beta:  #feito o corte minMax
             return v
         alpha = max(alpha, v)
     return v
Ejemplo n.º 3
0
def min_value(state: np.ndarray) -> Tuple[int, GameMove]:
    """Look for the move generating minimum value.

    Args:
        state (np.ndarray): Current state

    Returns:
        Tuple[int, Tuple]: Tuple of value and move
    """
    move = None
    v = 20
    for act in game.actions(state):
        v2, act2 = max_value(game.result(state, act))

        if v2 < v:
            v = v2
            move = act

    return v, move
Ejemplo n.º 4
0
def max_value(state: np.ndarray) -> Tuple[int, GameMove]:
    """Look for the move generating the maximum value.

    Args:
        state (np.ndarray): Current state

    Returns:
        Tuple[int, Tuple]: Tuple of value and move
    """
    move = None
    if game.is_terminal(state):
        return game.utility(state), move

    v = -20
    for act in game.actions(state):
        v2, act2 = min_value(game.result(state, act))

        if v2 > v:
            v = v2
            move = act

    return v, move
Ejemplo n.º 5
0
def alphabeta_cutoff_search(state,
                            game,
                            d=4,
                            cutoff_test=None,
                            eval_fn=None,
                            extra_fn=None):
    """Search game to determine best action; use alpha-beta pruning.
    This version cuts off search and uses an evaluation function."""

    player = game.to_move(state)

    # Functions used by alphabeta
    def max_value(state, alpha, beta, depth):
        #print(depth)
        if cutoff_test(state, depth):
            return eval_fn(state, player)
        v = -infinity
        for a in game.actions(state):  ##
            v = max(
                v,
                min_value(game.result(state, a, player, extra_fn), alpha, beta,
                          depth + 1))
            if v >= beta:
                return v
            alpha = max(alpha, v)
        return v

    def min_value(state, alpha, beta, depth):
        #print(depth)
        if cutoff_test(state, depth):
            return eval_fn(state, player)
        v = infinity
        for a in game.actions(state):  ##
            v = min(
                v,
                max_value(game.result(state, a, player, extra_fn), alpha, beta,
                          depth + 1))
            if v <= alpha:
                return v
            beta = min(beta, v)
        return v

    # Body of alphabeta_cutoff_search starts here:
    # The default test cuts off at depth d or at a terminal state
    cutoff_test = (
        cutoff_test
        or (lambda state, depth: depth >= d or game.terminal_test(state)))
    eval_fn = eval_fn or (lambda state: game.utility(state, player))
    extra_fn = extra_fn or (lambda st1: st1.extra)
    #print("Well, I am inside alphabeta and i am going to apply...",extra_fn)
    best_score = -infinity
    beta = infinity
    best_action = None
    movimentos = game.actions(state)  ## jb
    if len(movimentos) == 1:
        return movimentos[0]
    else:
        random.shuffle(movimentos)  ## para dar variabilidade aos jogos
        for a in movimentos:  ##
            v = min_value(game.result(state, a, player, extra_fn), best_score,
                          beta, 1)
            if v > best_score:
                best_score = v
                best_action = a
        return best_action