示例#1
0
    def play(self, state, remaining_time):
        self.move_nb += 1
        state.__class__ = State
        print(
            f"\nPlayer {self.ME} is playing with {remaining_time} seconds remaining for move #{self.move_nb}"
        )
        print(f"CacheInfo : "
              f"hits={self.cache_successors['hits']}, "
              f"misses={self.cache_successors['misses']}, "
              f"currsize={len(self.cache_successors) - 2}")
        print(f"{state} evaluation={self.evaluate(state):.2f}\n")

        # TODO remove obsolete since stuck player fix
        # if self.repeat_boring_moves:  # fast-forward to save time
        #     assert state.get_latest_player() == self.ME, \
        #         " - ERROR : May not repeat boring moves, latest player isn't self"
        #     print(" - PLAYING BOREDOM")
        #     return self.reverse_last_move(state)

        if self.max_time is None:
            self.max_time = remaining_time
            self.typical_time = remaining_time / self.max_nb_moves
        self.remaining_time = remaining_time

        possible_actions = SeegaRules.get_player_actions(
            state, self.color.value)
        if len(possible_actions) == 1:
            best_action = possible_actions[0]
        elif state.phase == 1:
            best_action = SeegaRules.random_play(
                state, self.ME)  # TODO play smart during phase 1
        else:  # phase == 2
            # TODO remove obsolete since stuck player fix
            # if self.can_start_self_play(state):
            #     best_action = self.make_self_play_move(state, fallback_function=self.iterative_deepening)
            best_action = self.iterative_deepening(state)

        print(f" - SELECTED ACTION : {best_action}")
        self.last_action = best_action
        return best_action
示例#2
0
 def play(self, state, remain_time):
     print(f"\nPlayer {self.position} is playing.")
     print("time remain is ", remain_time, " seconds")
     print(state)
     return SeegaRules.random_play(state, self.position)