예제 #1
0
파일: offer.py 프로젝트: ecolitan/fatics
    def __init__(self, game, user):
        Offer.__init__(self, 'draw offer')

        self.a = user
        self.b = game.get_opp(user)
        self.game = game
        offers = [o for o in game.pending_offers if o.name == self.name]
        if len(offers) > 1:
            raise RuntimeError('more than one draw offer in game %d' \
                % game.number)
        if len(offers) > 0:
            o = offers[0]
            if o.a == self.a:
                user.write_('You are already offering a draw.\n')
            else:
                o.accept()
        else:
            # check for draw by 50-move rule, repetition
            # The old fics checked for 50-move draw before repetition,
            # and we do the same so the adjudications are identical.
            if game.variant.pos.is_draw_fifty():
                game.result('Game drawn by the 50 move rule', '1/2-1/2')
                return
            elif game.variant.pos.is_draw_repetition(game.get_user_side(
                    self.a)):
                game.result('Game drawn by repetition', '1/2-1/2')
                return

            game.pending_offers.append(self)
            user.write_('Offering a draw to %s.\n', (self.b.name,))
            self.b.write_('%s offers a draw.\n', (user.name,))
            for p in self.game.observers:
                p.write_('%s offers a draw.\n', (user.name,))

            self._register()
예제 #2
0
파일: offer.py 프로젝트: pi4chess/fatics
    def __init__(self, game, user):
        Offer.__init__(self, 'draw offer')

        self.a = user
        self.b = game.get_opp(user)
        self.game = game
        offers = [o for o in game.pending_offers if o.name == self.name]
        if len(offers) > 1:
            raise RuntimeError('more than one draw offer in game %d' \
                % game.number)
        if len(offers) > 0:
            o = offers[0]
            if o.a == self.a:
                user.write_('You are already offering a draw.\n')
            else:
                o.accept()
        else:
            # check for draw by 50-move rule, repetition
            # The old fics checked for 50-move draw before repetition,
            # and we do the same so the adjudications are identical.
            if game.variant.pos.is_draw_fifty():
                game.result('Game drawn by the 50 move rule', '1/2-1/2')
                return
            elif game.variant.pos.is_draw_repetition(game.get_user_side(
                    self.a)):
                game.result('Game drawn by repetition', '1/2-1/2')
                return

            game.pending_offers.append(self)
            user.write_('Offering a draw to %s.\n', (self.b.name, ))
            self.b.write_('%s offers a draw.\n', (user.name, ))
            for p in self.game.observers:
                p.write_('%s offers a draw.\n', (user.name, ))

            self._register()
예제 #3
0
def create_policytable() -> Dict[GameState, Tuple[int, MoveList]]:
    """Create policytable by iterating through game state space.

    Returns:
        Dict[GameState, Tuple[int, MoveList]]: Map of state to utility and list of moves
    """
    space = game.create_statespace()
    pol_tab = {}

    for state in space:
        move_list = []

        # Convert 1x10 vector into game state tuple
        state = (state[0], np.asarray(state[1:]).reshape(3, 3))

        next_move = minimax_search(state)
        s = state
        while not game.is_terminal(s):
            move_list.append(next_move)

            next_move = minimax_search(s)
            s = game.result(s, next_move)

        u = game.utility(s)

        pol_tab[tuple(game.to_vector(state))] = (u, move_list)

    return pol_tab
예제 #4
0
 def min_value(state, alpha, beta, depth):
     #print(depth)
     if cutoff_test(state, depth):
         return eval_fn(state, player)
     v = infinity
     for a in game.actions(state):  ##
         v = min(
             v,
             max_value(game.result(state, a, player, extra_fn), alpha, beta,
                       depth + 1))
         if v <= alpha:
             return v
         beta = min(beta, v)
     return v
예제 #5
0
 def max_value(state, alpha, beta, depth):
     #print(depth)
     if cutoff_test(state, depth):
         return eval_fn(state, player)
     v = -infinity
     for a in game.actions(state):  ##
         v = max(
             v,
             min_value(game.result(state, a, player, extra_fn), alpha, beta,
                       depth + 1))
         if v >= beta:  #feito o corte minMax
             return v
         alpha = max(alpha, v)
     return v
예제 #6
0
def min_value(state: np.ndarray) -> Tuple[int, GameMove]:
    """Look for the move generating minimum value.

    Args:
        state (np.ndarray): Current state

    Returns:
        Tuple[int, Tuple]: Tuple of value and move
    """
    move = None
    v = 20
    for act in game.actions(state):
        v2, act2 = max_value(game.result(state, act))

        if v2 < v:
            v = v2
            move = act

    return v, move
예제 #7
0
def max_value(state: np.ndarray) -> Tuple[int, GameMove]:
    """Look for the move generating the maximum value.

    Args:
        state (np.ndarray): Current state

    Returns:
        Tuple[int, Tuple]: Tuple of value and move
    """
    move = None
    if game.is_terminal(state):
        return game.utility(state), move

    v = -20
    for act in game.actions(state):
        v2, act2 = min_value(game.result(state, act))

        if v2 > v:
            v = v2
            move = act

    return v, move
예제 #8
0
def main():
    """
    Methode principale du programme,
    porte l'initialisation, la boucle de
    jeu et le calcul du résultat.
    """

    # Demander le nom du joueur
    print("Bonjour ! Prêt pour une nouvelle partie de pendu ?")
    name = input("Entrez votre nom :")

    score.init(name)

    format_print("Votre score actuel est de : {}", score.get())

    game.init(random_word.get())

    print("Mot choisi !\nC'est parti !")
    # Tant que pas trouver afficher le
    # mot decomposé
    first = True
    while game.can_play():

        if first:
            first = False
        else:
            print("Rejouez !")

        format_print("Il vous reste {} coups", game.get_countdown())
        format_print("Mot à trouver : {}", game.get_found())

        # On cherhche la proposition du
        # joueur dans le mot à trouver
        proposal = input("Proposez une lettre :")

        try:
            # Proposition trouvée : on
            # l'affiche et on peut rejouer
            if game.find(proposal):
                format_print("Oui ! Il y a bien un {} dans le mot.", proposal)

            # Sinon on perd un tour
            else:
                format_print("Et non, pas de {} dans le mot !", proposal)

        # Si la proposition n'est pas
        # correcte
        except ValueError as error:
            format_print("Erreur de proposition : {}", error)

    # Si trouvé en moins de 8 coups
    # enregistre le score
    if game.result():
        format_print(
            "Gagné !\nLe mot était bien {word}\nVous gagnez {score} points de score",
            word=game.get_to_find(),
            score=game.get_countdown())

        score.add(game.get_countdown())
        score.save()

    # Sinon, perdu !
    else:
        format_print("Perdu !\nLe mot était : {}", game.get_to_find())
예제 #9
0
def alphabeta_cutoff_search(state,
                            game,
                            d=4,
                            cutoff_test=None,
                            eval_fn=None,
                            extra_fn=None):
    """Search game to determine best action; use alpha-beta pruning.
    This version cuts off search and uses an evaluation function."""

    player = game.to_move(state)

    # Functions used by alphabeta
    def max_value(state, alpha, beta, depth):
        #print(depth)
        if cutoff_test(state, depth):
            return eval_fn(state, player)
        v = -infinity
        for a in game.actions(state):  ##
            v = max(
                v,
                min_value(game.result(state, a, player, extra_fn), alpha, beta,
                          depth + 1))
            if v >= beta:
                return v
            alpha = max(alpha, v)
        return v

    def min_value(state, alpha, beta, depth):
        #print(depth)
        if cutoff_test(state, depth):
            return eval_fn(state, player)
        v = infinity
        for a in game.actions(state):  ##
            v = min(
                v,
                max_value(game.result(state, a, player, extra_fn), alpha, beta,
                          depth + 1))
            if v <= alpha:
                return v
            beta = min(beta, v)
        return v

    # Body of alphabeta_cutoff_search starts here:
    # The default test cuts off at depth d or at a terminal state
    cutoff_test = (
        cutoff_test
        or (lambda state, depth: depth >= d or game.terminal_test(state)))
    eval_fn = eval_fn or (lambda state: game.utility(state, player))
    extra_fn = extra_fn or (lambda st1: st1.extra)
    #print("Well, I am inside alphabeta and i am going to apply...",extra_fn)
    best_score = -infinity
    beta = infinity
    best_action = None
    movimentos = game.actions(state)  ## jb
    if len(movimentos) == 1:
        return movimentos[0]
    else:
        random.shuffle(movimentos)  ## para dar variabilidade aos jogos
        for a in movimentos:  ##
            v = min_value(game.result(state, a, player, extra_fn), best_score,
                          beta, 1)
            if v > best_score:
                best_score = v
                best_action = a
        return best_action