Ejemplo n.º 1
0
    def run(self, start_frame: Dict[int, ip.AgentState]) \
            -> Tuple[ip.StateTrajectory, Dict[int, ip.AgentState], bool, bool, List[ip.Agent]]:
        """ Execute current macro action of ego and forward the state of the environment with collision checking.

        Returns:
            A 5-tuple giving the new state of the environment, the final frame of the simulation,
            whether the ego has reached its goal, whether the ego is still alive, and if it has collided with another
            (possible multiple) agents and if so the colliding agents.
        """
        ego = self._agents[self._ego_id]
        current_observation = ip.Observation(start_frame, self._scenario_map)

        goal_reached = False
        collisions = []

        start_time = len(ego.trajectory_cl.states)
        t = 0
        while t < self._t_max and ego.alive and not goal_reached and not ego.done(
                current_observation):
            new_frame = {}

            # Update agent states
            for agent_id, agent in self._agents.items():
                if not agent.alive:
                    continue
                if agent.done(current_observation):
                    agent.alive = False
                    continue

                new_state = agent.next_state(current_observation)
                agent.trajectory_cl.add_state(new_state, reload_path=False)
                new_frame[agent_id] = new_state

                agent.alive = len(
                    self._scenario_map.roads_at(new_state.position)) > 0

            current_observation = ip.Observation(new_frame, self._scenario_map)

            collisions = self._check_collisions(ego)
            if collisions:
                ego.alive = False
            else:
                goal_reached = ego.goal.reached(ego.state.position)

            # if t % 5 == 0:
            #     self.plot()
            #     plt.show()
            t += 1

        ego.trajectory_cl.calculate_path_and_velocity()
        driven_trajectory = ego.trajectory_cl.slice(start_time, start_time + t)
        return driven_trajectory, current_observation.frame, goal_reached, ego.alive, collisions
Ejemplo n.º 2
0
 def _apply_view_radius(self, observation: ip.Observation):
     if hasattr(self.agent, "view_radius"):
         pos = observation.frame[self.agent_id].position
         new_frame = {
             aid: state
             for aid, state in observation.frame.items()
             if np.linalg.norm(pos -
                               state.position) <= self.agent.view_radius
         }
         return ip.Observation(new_frame, observation.scenario_map)
     return observation
Ejemplo n.º 3
0
    def update_ego_action(self, action: ip.MacroAction, args: Dict,
                          frame: Dict[int, ip.AgentState]) -> ip.MacroAction:
        """ Update the current macro action of the ego vehicle.

        Args:
            action: new macro action to execute
            args: MA initialisation arguments
            frame: Current state of the environment

        Returns:
            The currently execute MA of the ego
        """
        observation = ip.Observation(frame, self._scenario_map)
        ma = self._agents[self._ego_id].update_macro_action(
            action, args, observation)
        return ma
Ejemplo n.º 4
0
    def __init__(self, agent_id: int, frame: Dict[int, ip.AgentState], scenario_map: ip.Map, open_loop: bool = True,
                 **kwargs):
        """ Initialise a new MacroAction (MA)

        Args:
            agent_id: The ID of the agent, this MA is made for
            frame: The start state of the environment
            scenario_map: The road layout of the scenario
            open_loop: If True then use open loop control, else use closed loop control
        """
        self.open_loop = open_loop
        self.agent_id = agent_id
        self.start_frame = frame
        self.final_frame = None
        self.scenario_map = scenario_map

        self._maneuvers = self.get_maneuvers()
        self._current_maneuver = None
        self._current_maneuver_id = 0
        if not self.open_loop:
            self._advance_maneuver(ip.Observation(frame, scenario_map))
Ejemplo n.º 5
0
 def __get_current_observation(self) -> ip.Observation:
     actor_list = self.__world.get_actors()
     vehicle_list = actor_list.filter("*vehicle*")
     agent_id_lookup = dict([(a.actor_id, a.agent_id) for a in self.agents.values() if a is not None])
     frame = {}
     for vehicle in vehicle_list:
         transform = vehicle.get_transform()
         heading = np.deg2rad(-transform.rotation.yaw)
         velocity = vehicle.get_velocity()
         acceleration = vehicle.get_acceleration()
         state = ip.AgentState(time=self.__timestep,
                               position=np.array([transform.location.x, -transform.location.y]),
                               velocity=np.array([velocity.x, -velocity.y]),
                               acceleration=np.array([acceleration.x, -acceleration.x]),
                               heading=heading)
         if vehicle.id in agent_id_lookup:
             agent_id = agent_id_lookup[vehicle.id]
         else:
             agent_id = vehicle.id
         frame[agent_id] = state
     return ip.Observation(frame, self.scenario_map)