Beispiel #1
0
    def kb_consistent_trumpmarriage(self, state, move):
        # type: (State, move) -> bool

        # each time we check for consistency we initialise a new knowledge-base
        kb = KB()

        # Add general information about the game

        suit = State.get_trump_suit(state)

        if suit == "C":
            card1 = 2
            card2 = 3
            variable_string = "m" + str(card1) + str(card2)
            strategy_variable = Boolean(variable_string)
            kb.add_clause(strategy_variable)
        elif suit == "D":
            card1 = 7
            card2 = 8
            variable_string = "m" + str(card1) + str(card2)
            strategy_variable = Boolean(variable_string)
            kb.add_clause(strategy_variable)
        elif suit == "H":
            card1 = 12
            card2 = 13
            variable_string = "m" + str(card1) + str(card2)
            strategy_variable = Boolean(variable_string)
            kb.add_clause(strategy_variable)
        elif suit == "S":
            card1 = 17
            card2 = 18
            variable_string = "m" + str(card1) + str(card2)
            strategy_variable = Boolean(variable_string)
            kb.add_clause(strategy_variable)

        load.general_information(kb)

        # Add the necessary knowledge about the strategy
        load.strategy_knowledge(kb)

        # This line stores the index of the card in the deck.
        # If this doesn't make sense, refer to _deck.py for the card index mapping
        index = move[0]

        variable_string = "pm" + str(index)
        strategy_variable = Boolean(variable_string)

        # Add the relevant clause to the loaded knowledge base
        kb.add_clause(~strategy_variable)

        # If the knowledge base is not satisfiable, the strategy variable is
        # entailed (proof by refutation)

        return kb.satisfiable()
Beispiel #2
0
def features(state):
    # type: (State) -> tuple[float, ...]
    """
    Extract features from this state. Remember that every feature vector returned should have the same length.

    :param state: A state to be converted to a feature vector
    :return: A tuple of floats: a feature vector representing this state.
    """

    feature_set = []

    # Add player 1's points to feature set
    p1_points = State.get_points(state, 1)

    # Add player 2's points to feature set
    p2_points = State.get_points(state, 2)

    # Add player 1's pending points to feature set
    p1_pending_points = State.get_pending_points(state, 1)

    # Add plauer 2's pending points to feature set
    p2_pending_points = State.get_pending_points(state, 2)

    # Get trump suit
    trump_suit = State.get_trump_suit(state)

    # Add phase to feature set
    phase = State.get_phase(state)

    # Add stock size to feature set
    stock_size = State.get_stock_size(state)

    # Add leader to feature set
    leader = State.leader(state)

    # Add whose turn it is to feature set
    whose_turn = State.whose_turn(state)

    # Add opponent's played card to feature set
    opponents_played_card = State.get_opponents_played_card(state)

    ################## You do not need to do anything below this line ########################

    perspective = state.get_perspective()

    # Perform one-hot encoding on the perspective.
    # Learn more about one-hot here: https://machinelearningmastery.com/how-to-one-hot-encode-sequence-data-in-python/
    perspective = [
        card if card != 'U' else [1, 0, 0, 0, 0, 0] for card in perspective
    ]
    perspective = [
        card if card != 'S' else [0, 1, 0, 0, 0, 0] for card in perspective
    ]
    perspective = [
        card if card != 'P1H' else [0, 0, 1, 0, 0, 0] for card in perspective
    ]
    perspective = [
        card if card != 'P2H' else [0, 0, 0, 1, 0, 0] for card in perspective
    ]
    perspective = [
        card if card != 'P1W' else [0, 0, 0, 0, 1, 0] for card in perspective
    ]
    perspective = [
        card if card != 'P2W' else [0, 0, 0, 0, 0, 1] for card in perspective
    ]

    # Append one-hot encoded perspective to feature_set
    feature_set += list(chain(*perspective))

    # Append normalized points to feature_set
    total_points = p1_points + p2_points
    feature_set.append(p1_points / total_points if total_points > 0 else 0.)
    feature_set.append(p2_points / total_points if total_points > 0 else 0.)

    # Append normalized pending points to feature_set
    total_pending_points = p1_pending_points + p2_pending_points
    feature_set.append(
        p1_pending_points /
        total_pending_points if total_pending_points > 0 else 0.)
    feature_set.append(
        p2_pending_points /
        total_pending_points if total_pending_points > 0 else 0.)

    # Convert trump suit to id and add to feature set
    # You don't need to add anything to this part
    suits = ["C", "D", "H", "S"]
    trump_suit_onehot = [0, 0, 0, 0]
    trump_suit_onehot[suits.index(trump_suit)] = 1
    feature_set += trump_suit_onehot

    # Append one-hot encoded phase to feature set
    feature_set += [1, 0] if phase == 1 else [0, 1]

    # Append normalized stock size to feature set
    feature_set.append(stock_size / 10)

    # Append one-hot encoded leader to feature set
    feature_set += [1, 0] if leader == 1 else [0, 1]

    # Append one-hot encoded whose_turn to feature set
    feature_set += [1, 0] if whose_turn == 1 else [0, 1]

    # Append one-hot encoded opponent's card to feature set
    opponents_played_card_onehot = [
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
    ]
    opponents_played_card_onehot[
        opponents_played_card if opponents_played_card is not None else 20] = 1
    feature_set += opponents_played_card_onehot

    for index, element in enumerate(feature_set):
        if element == None:
            feature_set[index] = 0
    # Return feature set
    return feature_set
Beispiel #3
0
def features(state):
    # type: (State) -> tuple[float, ...]
    """
    Extract features from this state. Remember that every feature vector returned should have the same length.

    :param state: A state to be converted to a feature vector
    :return: A tuple of floats: a feature vector representing this state.
    """

    feature_set = []

    # Add player 1's points to feature set
    p1_points = state.get_points(1)
    # Add player 2's points to feature set
    p2_points = state.get_points(2)
    # Add player 1's pending points to feature set
    p1_pending_points = state.get_pending_points(1)
    # Add plauer 2's pending points to feature set
    p2_pending_points = state.get_pending_points(2)
    # Get trump suit
    trump_suit = State.get_trump_suit(state)
    # Add phase to feature set
    phase = state.get_phase()
    # Add stock size to feature set
    stock_size = state.get_stock_size()
    # Add leader to feature set
    leader = state.leader()
    # Add whose turn it is to feature set
    whose_turn = state.whose_turn()
    # Add opponent's played card to feature set
    opponents_played_card = state.get_opponents_played_card()

    ############################# CUSTOM #################################

    # Add hand to feature set
    hand = state.hand()

    # Number of trump cards in hand
    number_of_trump_cards_in_hand = 0
    for h in hand:
        if Deck.get_suit(h) == trump_suit:
            number_of_trump_cards_in_hand = number_of_trump_cards_in_hand + 1

    # Number of suit-following cards in hand
    number_of_suit_following_cards_in_hand = 0
    for h in hand:
        if state.get_opponents_played_card(
        ):  # if the opponent has played first
            suit_of_played_card = Deck.get_suit(
                state.get_opponents_played_card())
            if Deck.get_suit(h) == suit_of_played_card:
                number_of_suit_following_cards_in_hand = \
                    number_of_suit_following_cards_in_hand + 1

    ################## You do not need to do anything below this line ########################

    perspective = state.get_perspective()

    # Perform one-hot encoding on the perspective.
    # Learn more about one-hot here: https://machinelearningmastery.com/how-to-one-hot-encode-sequence-data-in-python/
    perspective = [
        card if card != 'U' else [1, 0, 0, 0, 0, 0] for card in perspective
    ]
    perspective = [
        card if card != 'S' else [0, 1, 0, 0, 0, 0] for card in perspective
    ]
    perspective = [
        card if card != 'P1H' else [0, 0, 1, 0, 0, 0] for card in perspective
    ]
    perspective = [
        card if card != 'P2H' else [0, 0, 0, 1, 0, 0] for card in perspective
    ]
    perspective = [
        card if card != 'P1W' else [0, 0, 0, 0, 1, 0] for card in perspective
    ]
    perspective = [
        card if card != 'P2W' else [0, 0, 0, 0, 0, 1] for card in perspective
    ]
    # Append one-hot encoded perspective to feature_set
    feature_set += list(chain(*perspective))
    # Append normalized points to feature_set
    total_points = p1_points + p2_points
    feature_set.append(p1_points / total_points if total_points > 0 else 0.)
    feature_set.append(p2_points / total_points if total_points > 0 else 0.)
    # Append normalized pending points to feature_set
    total_pending_points = p1_pending_points + p2_pending_points
    feature_set.append(
        p1_pending_points /
        total_pending_points if total_pending_points > 0 else 0.)
    feature_set.append(
        p2_pending_points /
        total_pending_points if total_pending_points > 0 else 0.)
    # Convert trump suit to id and add to feature set
    # You don't need to add anything to this part
    suits = ["C", "D", "H", "S"]
    trump_suit_onehot = [0, 0, 0, 0]
    trump_suit_onehot[suits.index(trump_suit)] = 1
    feature_set += trump_suit_onehot
    # Append one-hot encoded phase to feature set
    feature_set += [1, 0] if phase == 1 else [0, 1]
    # Append normalized stock size to feature set
    feature_set.append(stock_size / 10)
    # Append one-hot encoded leader to feature set
    feature_set += [1, 0] if leader == 1 else [0, 1]
    # Append one-hot encoded whose_turn to feature set
    feature_set += [1, 0] if whose_turn == 1 else [0, 1]
    # Append one-hot encoded opponent's card to feature set
    opponents_played_card_onehot = [
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
    ]
    opponents_played_card_onehot[
        opponents_played_card if opponents_played_card is not None else 20] = 1
    feature_set += opponents_played_card_onehot

    ############################# CUSTOM #################################

    # Append one-hot encoded number of trump suits in hand to feature set
    trumps_in_hand_onehot = [0 for x in range(6)]
    trumps_in_hand_onehot[number_of_trump_cards_in_hand] = 1
    feature_set += trumps_in_hand_onehot

    # Append one-hot encoded number of suit following cards in hand to
    # feature set (max = 4, vector/list len is 5)
    suit_following_in_hand_onehot = [0 for x in range(5)]
    suit_following_in_hand_onehot[number_of_suit_following_cards_in_hand] = 1
    feature_set += suit_following_in_hand_onehot

    # Return feature set
    return feature_set