Пример #1
0
 def choose(self, moves: List[Moves.Move], player: Player,
            state: GameSession) -> Moves.Move:
     inpt = input(
         'Player {}, choose move by index (or n = nodes map, e = edges map, b = board, m = moves list):'
         '\n{}\n'.format(
             player, '\n'.join('{:3} - {}'.format(i, m.info())
                               for i, m in enumerate(moves))))
     while True:
         if inpt == 'n':
             print(state.board().nodes_map())
         elif inpt == 'e':
             print(state.board().edges_map())
         elif inpt == 'b':
             print(state.board())
         elif inpt == 'm':
             print(*(m.info() for m in moves), sep='\n')
         else:
             idx = int(inpt)
             if not 0 <= idx < len(moves):
                 print('supply an integer int the range [0, {}] please'.
                       format(len(moves) - 1))
             else:
                 move = moves[idx]
                 return move
         inpt = input(
             'Player {}, choose move by index (or n = nodes map, e = edges map, b = board, m = moves list):'
             '\n'.format(player))
Пример #2
0
 def _calc(self, session: GameSession, player: Player) -> float:
     max_vp_player = max([p for p in session.players() if p != player],
                         key=lambda p: p.vp())
     opp_score = (self.hand_size.value(session, max_vp_player) +
                  10 * self.vp.value(session, max_vp_player) +
                  1.5 * self.road.value(session, max_vp_player))
     return (1 / opp_score) if opp_score != 0 else 0
Пример #3
0
    def optimize_monopoly_choice(session: GameSession, player: Player,
                                 move: Moves):
        """
        finds the most common resource among all other players and calculates
        if it is the best choice for the player, considering the player's hand

        :return:
        """
        p = find_sim_player(session, player)
        score = 0
        all_res_from_players = Hand()
        # all resources from all players:
        [
            all_res_from_players.insert(other_player.resource_hand())
            for other_player in session.players() if other_player != p
        ]
        res_values_all_players = \
            all_res_from_players.map_resources_by_quantity()
        res_values_curr_player = p.resource_hand().map_resources_by_quantity()

        for res_type in res_values_all_players:
            res_values_all_players[
                res_type] -= res_values_curr_player[res_type] / 2

        most_common_res = max(res_values_all_players,
                              key=res_values_all_players.get)

        if move.resource() == most_common_res:
            score += 0.5
        return score
Пример #4
0
def main(log: str = None,
         num_players: int = DEFAULT_NUM_PLAYERS,
         agents: List[str] = DEFAULT_AGENTS,
         **kwargs) -> None:
    players = init_players(num_players, *agents)
    catan_session = GameSession.GameSession(log, *players)
    catan_session.run_game()
Пример #5
0
 def _calc(self, session: GameSession, player: Player) -> float:
     board = session.board()
     tiles_types = set()
     num_tiles = 0
     tiles_prob = 0
     for node in player.settlement_nodes():
         for tile in board.get_adj_tile_ids_to_node(node):
             num_tiles += 1
             tiles_types.add(board.hexes()[tile].resource())
             tiles_prob += Dice.PROBABILITIES[board.hexes()[tile].token()]
     return num_tiles * len(tiles_types) * tiles_prob
Пример #6
0
def objective_function(weights):
    h = Everything(weights=tuple(weights))
    val = 0
    a = Agent.OneMoveHeuristicAgent(h)
    a2 = Agent.OneMoveHeuristicAgent(Everything())
    for i in range(2):
        p1 = Player.Player(a, 'Roy')
        p2 = Player.Player(a2, 'Boaz')
        p3 = Player.Player(a2, 'Amoss')
        session = GameSession.GameSession(None, p1, p2, p3)
        session.run_game()
        val += p1.vp() - p2.vp() - p3.vp()
    return -val
Пример #7
0
def main():  # Запуск игры
    session = gs.GameSession()
    gameType = input('Выберите тип игры: самостоятельный(1) или бот(2) ')
    try:
        gameType = int(gameType)
    except:
        pass
    while gameType != 1 and gameType != 2:
        print("Выберите 1 или 2 ")
        gameType = (str(input()))
        try:
            gameType = int(gameType)
        except:
            print("Выберите 1 или 2 ")
    session.choose_game(gameType)
Пример #8
0
    def optimized_trading_choice(session: GameSession, player: Player,
                                 move: Moves):
        """prefer trading resources for resources you can't get from dice"""
        p = find_sim_player(session, player)
        res_hand = p.resource_hand()
        score = 0
        if move.get_type() == Moves.MoveType.TRADE:
            __board = session.board()
            res_types_from_dice = __board.resources_player_can_get(player)
            gets_type = move.gets().get_cards_types().pop()
            num_instances_gets_type = res_hand.cards_of_type(gets_type)

            # if what you get from trading you can't achieve from dice:
            if gets_type not in res_types_from_dice:
                # raise score:
                score += 1 / (2 * num_instances_gets_type)
        return score
Пример #9
0
def play_round(vectors):
    """
    Plays one round with the given vectors, return the results
    :param vectors: 4 vectors representing the agents
    :return: tuple (list of number of wins, list of total vps) each list is 4 elements
    """
    wins = [0] * len(vectors)
    vps = [0] * len(vectors)
    agents = [vec_to_agent(vec) for vec in vectors]
    for _ in range(ROUND_SIZE):
        players = [Player.Player(agent) for agent in agents]
        session = GameSession.GameSession(None, *players)
        session.run_game()
        winner = session.winner()
        for i, player in enumerate(players):
            if winner == player:
                wins[i] += 1
            vps[i] += player.vp()
    return wins, vps
Пример #10
0
    def choose(self, moves: List[Moves.Move], player: Player,
               state: GameSession) -> Moves.Move:
        for p in state.players():
            if p == player:
                player = p
                break

        self.__curr_depth -= 1
        max_moves = moves
        all_move_values = []
        move_expected_vals = []

        # simulate each move until end of my turn and add final state evaluation to move_expected_vals
        for move_idx, move in enumerate(max_moves):
            all_move_values.append([])
            for _i in range(self.__iterations):
                move_state = deepcopy(state)
                move_state.simulate_game(move)
                self.sim_me(move_state, player)
                for _d in range(self.__depth):
                    self.sim_me(move_state, player)
                    self.sim_opps(move_state, player)
                value_reached = self.__h.value(move_state, player)
                all_move_values[move_idx].append(value_reached)
                del move_state
            avg_move_val = sum(all_move_values[move_idx]) / self.__iterations
            move_expected_vals.append(avg_move_val)

        # generate list of all moves tied for best move #
        max_val = max(move_expected_vals)
        best_moves = []
        for m_i, m in enumerate(max_moves):
            if move_expected_vals[m_i] == max_val:
                best_moves.append(m)

        self.__curr_depth += 1
        if len(best_moves) == 1:  # shortcut to save time
            return best_moves[0]
        else:
            return self.__harry.choose(best_moves, player, state)
Пример #11
0
 def _calc(self, session: GameSession, player: Player) -> float:
     if session.winner() is not None:
         return GameWon.INF if session.winner() == player else -GameWon.INF
     return 0
Пример #12
0
def find_sim_player(session: GameSession, player: Player) -> Player:
    # find the player's turn for the current session simulation
    for sim_player in session.players():
        if sim_player.get_id() == player.get_id():
            return sim_player
Пример #13
0
 def _calc(self, session: GameSession, player: Player) -> float:
     return session.board().road_len(player)
Пример #14
0
 def _calc(self, session: GameSession, player: Player) -> float:
     return sum((session.board().probability_score(player),
                 session.board().expectation_score(player),
                 session.potential_probability_score(player)))
Пример #15
0
 def value(self, session: GameSession, player: Player) -> float:
     for p in session.players():
         if p == player:
             player = p
             break
     return self._calc(session, player) * self.norm
Пример #16
0
#
# my_exp_file.close()

#======================= analyze Huerstics - one move:

my_one_move = open("one_move.txt", "w")
# my_one_move.write("everything_heuristic againt prob\n")
# for i in range(10):
#     E1 = Player.Player(Agent.OneMoveHeuristicAgent(Heuristics.everything_heuristic), "E1")
#     p2 = Player.Player(Agent.OneMoveHeuristicAgent(Heuristics.probability_score_heuristic), "E2")
#     p3 = Player.Player(Agent.RandomAgent(), "p3")
#     p4 = Player.Player(Agent.RandomAgent(), "p4")
#     game = GameSession.GameSession(None, E1, p2, p3, p4)
#     game.run_game()
#     my_one_move.write(game.winning_player.get_name() + '\n')

my_one_move.write("exptimaxprob\n")
for i in range(20):
    E = Player.Player(Agent.MonteCarloAgent(Heuristics.everything_heuristic),
                      "E")
    RE = Player.Player(
        Agent.MonteCarloAgent(Heuristics.relative_everything_heuristic), "RE")
    P = Player.Player(
        Agent.MonteCarloAgent(Heuristics.probability_score_heuristic), "P")
    R4 = Player.Player(Agent.RandomAgent(), "R4")
    game = GameSession.GameSession(None, E, RE, P, R4)
    game.run_game()
    my_one_move.write(str(game.winning_player) + '\n')

my_one_move.close()