Ejemplo n.º 1
0
def pieces_threatened(game_state, player):
    other = game.other_player(player)

    home_b = count_pieces(game_state[player])
    pieces = 0

    for enemy in game_state[other]:
        xy = enemy[1], enemy[2]

        for move in tokens.available_moves(other):
            if move == "Boom":
                continue
            for dist in range(enemy[0]):
                dist = enemy[0] - dist
                xy2 = game.dir_to_xy(xy, move, dist)
                temp_game = game.Game(game_state)

                if tokens.out_of_board(
                        xy2) or not temp_game.board.is_cell_empty(xy2):
                    continue

                temp_game.move_token(1, xy, move, dist, other)
                temp_game.boom(xy2, other)
                home_a = count_pieces(temp_game.get_game_state()[player])

                pieces += home_b - home_a

    return pieces
Ejemplo n.º 2
0
    def __init__(self, game_, game_state, player):

        self.game = game_
        self.game_state = game_state
        self.player = player
        self.other = game.other_player(player)

        self.turn = 0

        self.tt = {}
        self.away_recently_moved = None
        self.home_recently_moved = None

        self.past_states = []
        self.root = None

        weights = pd.read_csv("genetic_programming/weights.csv",
                              sep=",",
                              header=[0])

        data = weights.sample(axis=0, random_state=random.randint(0, 1000000))

        self.weight_index = data.iloc[0, 0]
        self.weight_score = data.iloc[0, 1]
        self.weight_games = data.iloc[0, 2]
        self.weights = data.iloc[0, 3:].astype(np.float)
Ejemplo n.º 3
0
    def score(self,
              curr_state,
              game_state,
              decision,
              player,
              alpha=None,
              beta=None,
              extend=False):

        game_state_str = get_str(game_state)

        if game_state_str in self.tt:
            home_eval, away_eval = self.tt[game_state_str]
        else:
            if not game_state[player]:
                return float("-inf")
            elif not game_state[game.other_player(player)]:
                return float("inf")

            # Not a quiet node
            if extend and features.pieces_threatened(game_state,
                                                     self.player) > 0:
                return self.quiesce(curr_state, game_state, alpha, beta,
                                    self.player)

            home_eval, away_eval = features.eval_function(
                self, curr_state, game_state, self.player, self.turn)

            self.tt[game_state_str] = (home_eval, away_eval)

        if decision == "p":
            return home_eval
        else:
            return home_eval - away_eval
Ejemplo n.º 4
0
    def action(self):
        """
        This method is called at the beginning of each of your turns to request 
        a choice of action from your program.

        Based on the current state of the game, your player should select and 
        return an allowed action to play on this turn. The action must be
        represented based on the spec's instructions for representing actions.
        """
        # TODO: Decide what action to take, and return it

        self.past_states.append(self.game_state[self.colour])
        self.home_tokens = sum([x[0] for x in self.game_state[self.colour]])
        self.away_tokens = sum(
            [x[0] for x in self.game_state[game.other_player(self.colour)]])

        if self.away_tokens == 1 and self.home_tokens >= 1:
            strategy = self.agent.one_enemy_endgame(self.threshold,
                                                    self.max_depth)
        elif self.away_tokens == 2 and self.home_tokens >= 2:
            strategy = self.agent.two_enemy_endgame(self.threshold,
                                                    self.max_depth)
        else:
            strategy, val = self.agent.mp_mix(self.threshold, self.max_depth)

        n, xy, move, distance = strategy
        if move == "Boom":

            return "BOOM", xy

        else:
            x_a, y_a = xy
            x_b, y_b = game.dir_to_xy(xy, move, distance)

            return "MOVE", n, (x_a, y_a), (x_b, y_b)
Ejemplo n.º 5
0
    def end(self):

        game_state = self.game.get_game_state()
        self.agent.update_weights(game_state)

        with open("genetic_programming/score.json") as file:
            data = json.load(file)

        if game_state[self.colour] and not game_state[game.other_player(
                self.colour)]:
            data[self.colour] += 1
        elif not game_state[self.colour] and game_state[game.other_player(
                self.colour)]:
            data[game.other_player(self.colour)] += 1
        else:
            data["draw"] += 1

        with open("genetic_programming/score.json", 'w') as file:
            json.dump(data, file)
Ejemplo n.º 6
0
def min_dist_to_boom(game_state, player):
    from math import ceil

    if not (game_state[player] and game_state[game.other_player(player)]):
        return 0

    minimum = float("inf")

    for piece1 in game_state[player]:
        x1, y1 = piece1[1], piece1[2]

        for piece2 in game_state[game.other_player(player)]:
            x2, y2 = piece2[1], piece2[2]

            dist = y2 - y1 + x2 - x1

            minimum = min(minimum, ceil(dist / piece1[0]))

    return minimum
Ejemplo n.º 7
0
    def get_children(self, curr_state, game_state, player, strategy):
        children = []
        next_states = self.available_states(game_state, player)

        for next_strategy, next_state in next_states:
            state_score = self.utility(curr_state, next_state, strategy,
                                       player)
            children.append((state_score, (next_strategy, next_state)))

        ordered_children = self.reorder_nodes(children)
        children = [
            self.Node(x, game.other_player(player), None)
            for x in ordered_children
        ]
        return children
Ejemplo n.º 8
0
    def quiesce(self, curr_state, game_state, alpha, beta, player):
        print(alpha, beta)
        home_eval, away_eval = features.eval_function(self, curr_state,
                                                      game_state, self.player,
                                                      self.turn)
        stand_pat = (home_eval - away_eval)

        if stand_pat >= beta:
            return beta
        if alpha < stand_pat:
            alpha = stand_pat

        next_moves = self.available_states(game_state, player)
        for move, next_state in next_moves:
            score = -self.quiesce(curr_state, next_state, -beta, -alpha,
                                  game.other_player(player))
            if score >= beta:
                return beta
            if score > alpha:
                alpha = score
        return alpha
Ejemplo n.º 9
0
def pieces_per_boom(game_state, player):
    other = game.other_player(player)

    damages = []
    away_before = len(game_state[other])

    for piece in game_state[player]:
        temp_game = game.Game(game_state)
        xy = (piece[1], piece[2])

        temp_game.boom(xy, player)
        temp_game_state = temp_game.get_game_state()

        away_after = len(temp_game_state[other])

        damage = away_before - away_after

        damages.append(damage)

    if len(damages) == 0:
        return 0

    return max(damages) * max(damages)
Ejemplo n.º 10
0
def eval_function(agent, curr_state, game_state, player, turn):
    other = game.other_player(player)

    b_home_pieces = curr_state[player]
    b_away_pieces = curr_state[other]

    a_home_pieces = game_state[player]
    a_away_pieces = game_state[other]

    home_num = count_pieces(a_home_pieces)
    away_num = count_pieces(a_away_pieces)
    total_num = home_num + away_num

    if total_num == 0:
        return 0, 0

    home_pieces_diff = count_pieces(b_home_pieces) - home_num
    away_pieces_diff = count_pieces(b_away_pieces) - away_num

    # Higher differences have more impact on the game
    home_pieces_diff = home_pieces_diff * home_pieces_diff
    away_pieces_diff = away_pieces_diff * away_pieces_diff

    home_stacks = count_stacks(a_home_pieces)
    away_stacks = count_stacks(a_away_pieces)

    home_stacks = home_stacks * home_stacks
    away_stacks = away_stacks * away_stacks

    home_min_dist = min_dist_to_boom(game_state, player)
    away_min_dist = min_dist_to_boom(game_state, other)

    home_threatening = pieces_threatened(game_state, player)
    away_threatning = pieces_threatened(game_state, other)

    max_damage = pieces_per_boom(game_state, player)
    max_losses = pieces_per_boom(game_state, other)

    home_board_score = agent.get_board_score(game_state, player)
    away_board_score = agent.get_board_score(game_state, other)

    weights = agent.weights

    home_features = np.array([
        home_num - away_num, home_pieces_diff - away_pieces_diff,
        turn * (home_stacks - away_stacks), turn * home_min_dist,
        max_damage - max_losses, home_threatening - away_threatning,
        home_board_score - away_board_score
    ])

    away_features = np.array([
        away_num - home_num, away_pieces_diff - home_pieces_diff,
        turn * (away_stacks - home_stacks), turn * away_min_dist,
        max_losses - max_damage, away_threatning - home_threatening,
        away_board_score - home_board_score
    ])

    home_final = np.dot(home_features, weights)
    away_final = np.dot(away_features, weights)

    return home_final, away_final
Ejemplo n.º 11
0
def count_all(game_state, player):
    home = sum([x[0] for x in game_state[player]])
    away = sum([x[0] for x in game_state[game.other_player(player)]])

    return home, away