Esempio n. 1
0
    def __spawn_agent(self, simulation):
        """Spawn new agents acting as traffic through the given callback function. """
        spawn_points = np.array(self.spawns)
        spawn_locations = np.array([[p.location.x, p.location.y]
                                    for p in spawn_points])

        blueprint = self.__random_blueprint(simulation)

        # Calculate valid spawn points based on spawn radius
        valid_spawns = spawn_points
        if self.__ego is not None:
            ego_position = self.__ego.state.position
            ego_position[1] *= -1
            distances = np.linalg.norm(spawn_locations - ego_position, axis=1)
            valid_spawns = spawn_points[(self.__ego.view_radius <= distances)
                                        & (distances <= self.__spawn_radius)]

        # Sample spawn state and spawn actor
        try_count = 0
        speed = random.uniform(self.__speed_lims[0], self.__speed_lims[1])
        while try_count < self._max_spawn_tries:
            spawn = random.choice(valid_spawns)
            spawn.location.z = 0.1
            heading = np.deg2rad(-spawn.rotation.yaw)
            try:
                vehicle = simulation.world.spawn_actor(blueprint, spawn)
                velocity = carla.Vector3D(speed * np.cos(heading),
                                          -speed * np.sin(heading), 0.0)
                vehicle.set_target_velocity(velocity)
                break
            except:
                try_count += 1
        else:
            logger.debug("Couldn't spawn vehicle!")
            return

        # Create agent and set properties
        initial_state = ip.AgentState(
            time=simulation.timestep,
            position=np.array([spawn.location.x, -spawn.location.y]),
            velocity=np.array([velocity.x, -velocity.y]),
            acceleration=np.array([0.0, 0.0]),
            heading=heading)
        agent = ip.carla.TrafficAgent(vehicle.id,
                                      initial_state,
                                      fps=simulation.fps)
        self.__find_destination(agent)

        # Wrap agent for CARLA control
        agent = ip.carla.CarlaAgentWrapper(agent, vehicle)
        self.__agents[agent.agent_id] = agent
        simulation.agents[agent.agent_id] = agent

        logger.debug(
            f"Agent {agent.agent_id} spawned at {spawn.location} with speed {speed}"
        )
Esempio n. 2
0
 def get_state(self, time: float = None) -> ip.AgentState:
     """ Return current state of the vehicle. """
     return ip.AgentState(
         time=time,
         position=self.center.copy(),
         velocity=self.velocity *
         np.array([np.cos(self.heading),
                   np.sin(self.heading)]),
         acceleration=self.acceleration *
         np.array([np.cos(self.heading),
                   np.sin(self.heading)]),
         heading=self.heading)
Esempio n. 3
0
    def _state_from_tracks(track,
                           idx,
                           scale: float = None,
                           metadata: ip.AgentMetadata = None):
        time = track['frame'][idx]
        heading = np.deg2rad(track['heading'][idx])
        heading = np.unwrap([0, heading])[1]
        position = np.array([track['xCenter'][idx], track['yCenter'][idx]
                             ]) * (scale if scale else 1)
        velocity = np.array([track['xVelocity'][idx], track['yVelocity'][idx]])
        acceleration = np.array(
            [track['xAcceleration'][idx], track['yAcceleration'][idx]])

        return ip.AgentState(time, position, velocity, acceleration, heading,
                             metadata)
Esempio n. 4
0
    def next_state(self, observation: ip.Observation) -> ip.AgentState:
        """ Calculate next action based on trajectory, set appropriate fields in vehicle
        and returns the next agent state. """
        assert self._trajectory is not None, f"Trajectory of Agent {self.agent_id} was None!"
        if self.done(observation):
            return self.state

        action = self.next_action(observation)

        if self.open_loop:
            new_state = ip.AgentState(self._t, self._trajectory.path[self._t],
                                      self._trajectory.velocity[self._t],
                                      self._trajectory.acceleration[self._t],
                                      self._trajectory.heading[self._t])
        else:
            new_state = None

        self.vehicle.execute_action(action, new_state)
        return self.vehicle.get_state(observation.frame[self.agent_id].time +
                                      1)
Esempio n. 5
0
def generate_random_frame(ego: int,
                          layout: ip.Map,
                          spawn_vel_ranges: List[Tuple[ip.Box, Tuple[float, float]]]) -> Dict[int, ip.AgentState]:
    """ Generate a new frame with randomised spawns and velocities for each vehicle.

    Args:
        ego: The id of the ego
        layout: The road layout
        spawn_vel_ranges: A list of pairs of spawn ranges and velocity ranges.

    Returns:
        A new randomly generated frame
    """
    ret = {}
    for i, (spawn, vel) in enumerate(spawn_vel_ranges, ego):
        poly = Polygon(spawn.boundary)
        best_lane = None
        max_overlap = 0.0
        for road in layout.roads.values():
            for lane_section in road.lanes.lane_sections:
                for lane in lane_section.all_lanes:
                    overlap = lane.boundary.intersection(poly).area
                    if overlap > max_overlap:
                        best_lane = lane
                        max_overlap = overlap

        intersections = list(best_lane.midline.intersection(poly).coords)
        start_d = best_lane.distance_at(intersections[0])
        end_d = best_lane.distance_at(intersections[1])
        if start_d > end_d:
            start_d, end_d = end_d, start_d
        position_d = (end_d - start_d) * np.random.random() + start_d
        spawn_position = np.array(best_lane.point_at(position_d))

        ret[i] = ip.AgentState(time=0,
                               position=spawn_position,
                               velocity=(vel[1] - vel[0]) * np.random.random() + vel[0],
                               acceleration=np.array([0.0, 0.0]),
                               heading=best_lane.get_heading_at(position_d))

    return ret
Esempio n. 6
0
 def __get_current_observation(self) -> ip.Observation:
     actor_list = self.__world.get_actors()
     vehicle_list = actor_list.filter("*vehicle*")
     agent_id_lookup = dict([(a.actor_id, a.agent_id) for a in self.agents.values() if a is not None])
     frame = {}
     for vehicle in vehicle_list:
         transform = vehicle.get_transform()
         heading = np.deg2rad(-transform.rotation.yaw)
         velocity = vehicle.get_velocity()
         acceleration = vehicle.get_acceleration()
         state = ip.AgentState(time=self.__timestep,
                               position=np.array([transform.location.x, -transform.location.y]),
                               velocity=np.array([velocity.x, -velocity.y]),
                               acceleration=np.array([acceleration.x, -acceleration.x]),
                               heading=heading)
         if vehicle.id in agent_id_lookup:
             agent_id = agent_id_lookup[vehicle.id]
         else:
             agent_id = vehicle.id
         frame[agent_id] = state
     return ip.Observation(frame, self.scenario_map)
Esempio n. 7
0
    with open(filename, 'wb') as f:
        dill.dump(objects, f)


SCENARIOS = {
    "heckstrasse":
    ip.Map.parse_from_opendrive("scenarios/maps/heckstrasse.xodr"),
    "round": ip.Map.parse_from_opendrive("scenarios/maps/round.xodr"),
    "town1": ip.Map.parse_from_opendrive("scenarios/maps/town01.xodr")
}

round_frame = {
    0:
    ip.AgentState(time=0,
                  position=np.array([96.8, -0.2]),
                  velocity=4,
                  acceleration=0.0,
                  heading=-2 * np.pi / 3),
    1:
    ip.AgentState(time=0,
                  position=np.array([25.0, -36.54]),
                  velocity=4,
                  acceleration=0.0,
                  heading=-0.3),
    2:
    ip.AgentState(time=0,
                  position=np.array([133.75, -61.67]),
                  velocity=4,
                  acceleration=0.0,
                  heading=5 * np.pi / 6),
    3:
Esempio n. 8
0
    3: 5.5,
}

position = {
    0: np.array([6.0, 0.7]),
    1: np.array([19.7, -13.5]),
    2: np.array([73.2, -47.1]),
    3: np.array([61.7, -15.2]),
}

heckstrasse_frame = {}
for aid in position.keys():
    heckstrasse_frame[aid] = ip.AgentState(
        time=0,
        position=position[aid],
        velocity=speed[aid] * np.array(
            [np.cos(heading[aid]), np.sin(heading[aid])]),
        acceleration=np.array([0., 0.]),
        heading=heading[aid])

heckstrasse_goals = {
    0: ip.PointGoal(np.array([17.40, -4.97]), 2),
    1: ip.PointGoal(np.array([75.18, -56.65]), 2),
    2: ip.PointGoal(np.array([62.47, -17.54]), 2),
}

# CHANGE SCENARIOS HERE
scenario_map = SCENARIOS["heckstrasse"]
frame = heckstrasse_frame
goals = heckstrasse_goals
ego_id = 2
Esempio n. 9
0
 def final_agent_state(self) -> ip.AgentState:
     """ AgentState calculated from the final point along the path. """
     return ip.AgentState(0, self.path[-1], self.velocity[-1], self.acceleration[-1], self.heading[-1])
Esempio n. 10
0
 def initial_agent_state(self) -> ip.AgentState:
     """ AgentState calculated from the first point along the path. """
     return ip.AgentState(0, self.path[0], self.velocity[0], self.acceleration[0], self.heading[0])