def get_next_state(self, player_ids: List[int], state: State,
                    actions: List[int]) -> State:
     next_state = State.from_state(state)
     for _ in range(self.action_sampling_rate):
         for i, player in enumerate(player_ids):
             angle = self.calculate_new_angle(state.get_angle(player),
                                              actions[i])
             next_state.set_angle(player, angle)
             position = self.calculate_new_position(
                 state.get_position(player), angle)
             next_state.set_position(player, position)
             next_state.draw_head(self.get_head_position(position, angle))
             next_state.draw_player(player, True)
     # TODO: Look into saving in designated memory space by specifying destination of copy.
     return next_state
    def get_action(self, state):
        state_copy = State.from_state(state)
        if 1 < len(state_copy.alive) < self.n_of_opp or self.first_round:
            self.update_opponents(state_copy)
        values = []
        initial_position = self.get_initial_positions([self.id], state_copy)
        for action in [RIGHT, LEFT, STRAIGHT]:
            self.successors_generated += 1
            self.update_successor_state([self.id], state_copy, [action],
                                        initial_position, True)
            values.append(
                self.alpha_beta(state_copy, self.depth, -np.inf, np.inf, False,
                                action))
            self.update_successor_state([self.id], state_copy, [action],
                                        initial_position, False)

        max_vals = [i for i, val in enumerate(values) if val == max(values)]
        chosen_action = min(max_vals) if len(max_vals) <= 2 else random.choice(
            max_vals)

        return chosen_action