示例#1
0
文件: agents.py 项目: jubra97/speedos
    def act(self, state):
        model = state_to_model(state)
        own_id = state["you"]
        _, _, is_endgame, min_player_ids = voronoi(model, own_id)
        if own_id in min_player_ids:
            min_player_ids.remove(own_id)
        if not is_endgame and len(min_player_ids) > 1:
            pos = model.get_agent_by_id(own_id).pos
            opponent_pos = model.get_agent_by_id(min_player_ids[0]).pos
            distance_to_next_opponent = distance.euclidean(pos, opponent_pos)
            state = reduce_state_to_sliding_window(
                state,
                distance_to_next_opponent,
                min_sliding_window_size=self.min_sliding_window_size,
                sliding_window_size_offset=self.sliding_window_size_offset)

        move = multiprocessing.Value('i', 4)
        reached_depth = multiprocessing.Value('i', 0)
        p = multiprocessing.Process(
            target=self.depth_first_iterative_deepening,
            name="DFID",
            args=(move, reached_depth, state))
        p.start()
        p.join(self.time_for_move)

        # Force termination
        if p.is_alive():
            p.terminate()
            p.join()

        return Action(move.value)
示例#2
0
文件: agents.py 项目: jubra97/speedos
 def reset_model(pre_state, max_player, min_player):
     model = state_to_model(pre_state, trace_aware=True)
     own_id = max_player.unique_id
     max_player = model.get_agent_by_id(own_id)
     min_player_id = min_player.unique_id
     min_player = model.get_agent_by_id(min_player_id)
     return model, max_player, min_player
示例#3
0
    def test_default_params(self):
        for game in self.test_games:
            # uncomment this for debugging to find out which game has failed
            # print(f"Checking Game: {game}")
            path_to_game = self.original_games_path + game
            with open(path_to_game, "r") as file:
                game = json.load(file)

            game = self.remove_duplicates(game)

            initial_state = game[0]
            model = state_to_model(
                initial_state, False,
                [DummyAgent for _ in range(len(initial_state["players"]))])
            own_agent = model.get_agent_by_id(1)
            while model.running:
                state = get_state(model, own_agent)

                # execute unit tests
                self.compare_states(game, model, state)
                self.compare_grid_with_cells(model)

                for agent in model.speed_agents:
                    agent.action = self.get_action(game, model,
                                                   str(agent.unique_id))
                    if agent.action == "set_inactive" and agent.active:
                        agent.set_inactive()
                model.step()
示例#4
0
文件: agents.py 项目: jubra97/speedos
    def deep_search(self, state, depth, initial_action):
        own_id = state["you"]

        if not state["players"][str(own_id)]["active"]:
            return
        elif depth == 0:
            self.survival[initial_action] += 1
        else:
            model = state_to_model(state)
            nb_active_agents = len(model.active_speed_agents)
            action_permutations = list(
                permutations(list(Action), nb_active_agents))
            for action_permutation in action_permutations:
                own_agent = model.get_agent_by_id(own_id)
                for idx, agent in enumerate(model.active_speed_agents):
                    agent.action = action_permutation[idx]
                model.step()
                new_state = get_state(model, own_agent, self.deadline)
                # recursion
                if initial_action is None:
                    self.deep_search(new_state, depth - 1, own_agent.action)
                else:
                    self.deep_search(new_state, depth - 1, initial_action)
                model = state_to_model(state)
示例#5
0
文件: agents.py 项目: jubra97/speedos
    def init_multi_minimax(self, game_state):
        game_state["step"] = self.game_step
        model = state_to_model(game_state)
        own_id = game_state["you"]
        _, _, is_endgame, _ = voronoi(model, own_id)
        max_player = model.get_agent_by_id(own_id)

        min_player_ids = list(
            map(lambda a: a.unique_id, model.active_speed_agents))
        min_player_ids.remove(own_id)

        move_to_make = Action.CHANGE_NOTHING
        max_move = float("-inf")
        alpha = float("-inf")
        actions = self.init_actions()
        return model, max_player, min_player_ids, is_endgame, move_to_make, max_move, alpha, actions
示例#6
0
文件: agents.py 项目: jubra97/speedos
    def act(self, state):
        self.reached_depth = (False, 0)
        model = state_to_model(state)
        own_id = state["you"]
        _, _, is_endgame, min_player_ids = voronoi(model, own_id)
        if own_id in min_player_ids:
            min_player_ids.remove(own_id)
        self.is_endgame = is_endgame
        if not is_endgame and len(min_player_ids) > 1:
            pos = model.get_agent_by_id(own_id).pos
            opponent_pos = model.get_agent_by_id(min_player_ids[0]).pos
            distance_to_next_opponent = distance.euclidean(pos, opponent_pos)
            state = reduce_state_to_sliding_window(
                state,
                distance_to_next_opponent,
                min_sliding_window_size=self.min_sliding_window_size,
                sliding_window_size_offset=self.sliding_window_size_offset)

        self.depth_first_iterative_deepening(state)
        return Action(self.move_to_make)
示例#7
0
文件: agents.py 项目: jubra97/speedos
    def act(self, state):
        model = state_to_model(state)
        own_id = state["you"]
        _, _, is_endgame, min_player_ids = voronoi(model, own_id)
        if own_id in min_player_ids:
            min_player_ids.remove(own_id)
        if not is_endgame:
            pos = model.get_agent_by_id(own_id).pos
            opponent_pos = model.get_agent_by_id(min_player_ids[0]).pos
            distance_to_next_opponent = distance.euclidean(pos, opponent_pos)
            state = reduce_state_to_sliding_window(
                state,
                distance_to_next_opponent,
                min_sliding_window_size=self.min_sliding_window_size,
                sliding_window_size_offset=self.sliding_window_size_offset)

        move = multiprocessing.Value('i', 4)
        reached_depth = multiprocessing.Value('i', 0)
        p = multiprocessing.Process(
            target=self.depth_first_iterative_deepening,
            name="DFID",
            args=(move, reached_depth, state))
        p.start()
        send_time = 1
        deadline = datetime.datetime.strptime(state["deadline"],
                                              "%Y-%m-%dT%H:%M:%SZ")
        response = requests.get(self.server_time_url)
        server_time = datetime.datetime.strptime(response.json()["time"],
                                                 "%Y-%m-%dT%H:%M:%SZ")
        av_time = (deadline - server_time).total_seconds() - send_time
        p.join(av_time)

        # If thread is active
        if p.is_alive():
            # Terminate foo
            p.terminate()
            p.join()

        self.game_step += 1
        return Action(move.value)