Exemplo n.º 1
0
def heuristic(state):
    # type: (State) -> float
    """
    Estimate the value of this state: -1.0 is a certain win for player 2, 1.0 is a certain win for player 1

    :param state:
    :return: A heuristic evaluation for the given state (between -1.0 and 1.0)
    """
    return -util.difference_points(state, 1), None
Exemplo n.º 2
0
    def heuristics(self, state, player, phase, phaseEnterence):
        Bonus = 0.0
        if util.difference_points(state, self.player) >= 40:
            if state.winner()[1] == 3: Bonus += 3
            elif state.winner()[1] == 2: Bonus += 1
            elif state.winner()[1] == 1: Bonus += 0
        else:
            if state.winner()[1] == 3: Bonus += 0
            elif state.winner()[1] == 2: Bonus += 1
            elif state.winner()[1] == 1: Bonus += 3

        if phase == 2:  # If game enters to the phase 2 at some point more trump cards means more points
            for card in phaseEnterence.moves():
                if card[0] != None and util.get_suit(
                        card[0]) == state.get_trump_suit():
                    Bonus += 3

        for card in state.moves(
        ):  # And this is for ending the game with almost zero trumps in either case
            if card[0] != None and util.get_suit(
                    card[0]) != state.get_trump_suit():
                Bonus += 3

        return 1 + Bonus if state.winner()[0] == self.player else -2
Exemplo n.º 3
0
 def heuristic_5b(self, player: int, depth: int,
                  curr_state: State) -> float:
     return util.difference_points(curr_state, self.__me) / self.__WIN_SCORE
Exemplo n.º 4
0
def features(state):
    # type: (State) -> tuple[float, ...]
    """
    Extract features from this state. Remember that every feature vector returned should have the same length.

    :param state: A state to be converted to a feature vector
    :return: A tuple of floats: a feature vector representing this state.
    """

    feature_set = []

    # Add player 1's points to feature set
    p1_points = State.get_points(state, 1)

    # Add player 2's points to feature set
    p2_points = State.get_points(state, 2)

    # Add player 1's pending points to feature set
    p1_pending_points = State.get_pending_points(state, 1)

    # Add plauer 2's pending points to feature set
    p2_pending_points = State.get_pending_points(state, 2)

    # Get trump suit
    trump_suit = State.get_trump_suit(state)

    # Add phase to feature set
    phase = State.get_phase(state)

    # Add stock size to feature set
    stock_size = State.get_stock_size(state)

    # Add leader to feature set
    leader = State.leader(state)

    # Add whose turn it is to feature set
    whose_turn = State.whose_turn(state)

    # Add opponent's played card to feature set
    opponents_played_card = State.get_opponents_played_card(state)

    ############# custom added features #########################
    previous_trick_size = 0
    if State.get_prev_trick(state)[0] is not None:
        previous_trick_size += int(State.get_prev_trick(state)[0])
    if State.get_prev_trick(state)[1] is not None:
        previous_trick_size += int(State.get_prev_trick(state)[1])

    # print(previous_trick_size)

    difference_in_points = util.difference_points(state, 1) + 100

    ################## You do not need to do anything below this line ########################

    perspective = state.get_perspective()

    # Perform one-hot encoding on the perspective.
    # Learn more about one-hot here: https://machinelearningmastery.com/how-to-one-hot-encode-sequence-data-in-python/
    perspective = [
        card if card != 'U' else [1, 0, 0, 0, 0, 0] for card in perspective
    ]
    perspective = [
        card if card != 'S' else [0, 1, 0, 0, 0, 0] for card in perspective
    ]
    perspective = [
        card if card != 'P1H' else [0, 0, 1, 0, 0, 0] for card in perspective
    ]
    perspective = [
        card if card != 'P2H' else [0, 0, 0, 1, 0, 0] for card in perspective
    ]
    perspective = [
        card if card != 'P1W' else [0, 0, 0, 0, 1, 0] for card in perspective
    ]
    perspective = [
        card if card != 'P2W' else [0, 0, 0, 0, 0, 1] for card in perspective
    ]

    # Append one-hot encoded perspective to feature_set
    feature_set += list(chain(*perspective))

    # Append normalized points to feature_set
    total_points = p1_points + p2_points
    feature_set.append(p1_points / total_points if total_points > 0 else 0.)
    feature_set.append(p2_points / total_points if total_points > 0 else 0.)

    # Append normalized pending points to feature_set
    total_pending_points = p1_pending_points + p2_pending_points
    feature_set.append(
        p1_pending_points /
        total_pending_points if total_pending_points > 0 else 0.)
    feature_set.append(
        p2_pending_points /
        total_pending_points if total_pending_points > 0 else 0.)

    # Convert trump suit to id and add to feature set
    # You don't need to add anything to this part
    suits = ["C", "D", "H", "S"]
    trump_suit_onehot = [0, 0, 0, 0]
    trump_suit_onehot[suits.index(trump_suit)] = 1
    feature_set += trump_suit_onehot

    # Append one-hot encoded phase to feature set
    feature_set += [1, 0] if phase == 1 else [0, 1]

    # Append normalized stock size to feature set
    feature_set.append(stock_size / 10)

    # Append one-hot encoded leader to feature set
    feature_set += [1, 0] if leader == 1 else [0, 1]

    # Append one-hot encoded whose_turn to feature set
    feature_set += [1, 0] if whose_turn == 1 else [0, 1]

    # Append one-hot encoded opponent's card to feature set
    opponents_played_card_onehot = [
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
    ]
    opponents_played_card_onehot[
        opponents_played_card if opponents_played_card is not None else 20] = 1
    feature_set += opponents_played_card_onehot

    # Appending custom features
    max_trick_size = [0] * 40  # royal marriage
    max_point_difference = [0] * 200
    # print('diff', util.difference_points(state,1))
    # print('size', difference_in_points)
    # print('len',len(max_point_difference))
    max_trick_size[previous_trick_size] = 1
    max_point_difference[
        difference_in_points] = 1  # twice as big to incorporate negative numbers

    feature_set += max_trick_size
    feature_set += max_point_difference

    for index, element in enumerate(feature_set):
        if element == None:
            feature_set[index] = 0
    # Return feature set
    return feature_set
Exemplo n.º 5
0
 def bottom_decision(self, player, depth, curr_state) -> float:
     return util.difference_points(curr_state, self.__me) / self.__WIN_SCORE