def process_imu(self, imu_msg):
        """Invoked when an IMU message is received from the simulator.

        Sends IMU measurements to downstream operators.

        Args:
            imu_msg (carla.IMUMeasurement): IMU reading.
        """
        game_time = int(imu_msg.timestamp * 1000)
        timestamp = erdos.Timestamp(coordinates=[game_time])
        watermark_msg = erdos.WatermarkMessage(timestamp)
        with erdos.profile(self.config.name + '.process_imu',
                           self,
                           event_data={'timestamp': str(timestamp)}):
            with self._lock:
                msg = IMUMessage(
                    timestamp,
                    Transform.from_simulator_transform(imu_msg.transform),
                    Vector3D.from_simulator_vector(imu_msg.accelerometer),
                    Vector3D.from_simulator_vector(imu_msg.gyroscope),
                    imu_msg.compass)
                self._imu_stream.send(msg)
                # Note: The operator is set not to automatically propagate
                # watermarks received on input streams. Thus, we can issue
                # watermarks only after the simulator callback is invoked.
                self._imu_stream.send(watermark_msg)
Пример #2
0
    def compute_waypoints(self, source_loc: Location,
                          destination_loc: Location):
        """Computes waypoints between two locations.

        Assumes that the ego vehicle has the same orientation as the lane on
        whch it is on.

        Args:
            source_loc (:py:class:`~pylot.utils.Location`): Source location in
                world coordinates.
            destination_loc (:py:class:`~pylot.utils.Location`): Destination
                location in world coordinates.

        Returns:
            list(:py:class:`~pylot.utils.Transform`): List of waypoint
            transforms.
        """
        start_waypoint = self._get_waypoint(source_loc,
                                            project_to_road=True,
                                            lane_type=carla.LaneType.Driving)
        end_waypoint = self._get_waypoint(destination_loc,
                                          project_to_road=True,
                                          lane_type=carla.LaneType.Driving)
        assert start_waypoint and end_waypoint, 'Map could not find waypoints'
        route = self._grp.trace_route(start_waypoint.transform.location,
                                      end_waypoint.transform.location)
        # TODO(ionel): The planner returns several options in intersections.
        # We always take the first one, but this is not correct.
        return deque([
            Transform.from_simulator_transform(waypoint[0].transform)
            for waypoint in route
        ])
Пример #3
0
 def get_right_lane(self, location: Location):
     waypoint = self._get_waypoint(location)
     if waypoint:
         right_lane_waypoint = waypoint.get_right_lane()
         if right_lane_waypoint:
             return Transform.from_simulator_transform(
                 right_lane_waypoint.transform)
     return None
Пример #4
0
 def get_right_lane(self, location: Location):
     waypoint = self._get_waypoint(location,
                                   project_to_road=False,
                                   lane_type=carla.LaneType.Any)
     if waypoint:
         right_lane_waypoint = waypoint.get_right_lane()
         if right_lane_waypoint:
             return Transform.from_simulator_transform(
                 right_lane_waypoint.transform)
     return None
Пример #5
0
    def from_simulator_actor(cls, actor):
        """Creates a detected speed limit sign from a simulator actor.

        Args:
            actor: A simulator speed limit sign actor.

        Returns:
            :py:class:`.SpeedLimitSign`: A detected speed limit sign.
        """
        from carla import TrafficSign
        if not isinstance(actor, TrafficSign):
            raise ValueError('actor should be of type TrafficSign')
        transform = Transform.from_simulator_transform(actor.get_transform())
        speed_limit = int(actor.type_id.split('.')[-1])
        return cls(speed_limit, 1.0, id=actor.id, transform=transform)
Пример #6
0
    def get_closest_lane_waypoint(self, location: Location) -> Transform:
        """Returns the road closest waypoint to location.

        Args:
            location (:py:class:`~pylot.utils.Location`): Location in world
                coordinates.

        Returns:
            :py:class:`~pylot.utils.Transform`: Transform or None if no
            waypoint is found.
        """
        waypoint = self._get_waypoint(location, project_to_road=True)
        if waypoint:
            return Transform.from_simulator_transform(waypoint.transform)
        else:
            return None
Пример #7
0
    def process_gnss(self, gnss_msg):
        """Invoked when a GNSS measurement is received from the simulator.

        Sends GNSS measurements to downstream operators.
        """
        game_time = int(gnss_msg.timestamp * 1000)
        timestamp = erdos.Timestamp(coordinates=[game_time])
        watermark_msg = erdos.WatermarkMessage(timestamp)
        with erdos.profile(self.config.name + '.process_gnss',
                           self,
                           event_data={'timestamp': str(timestamp)}):
            with self._lock:
                msg = GNSSMessage(
                    timestamp,
                    Transform.from_simulator_transform(gnss_msg.transform),
                    gnss_msg.altitude, gnss_msg.latitude, gnss_msg.longitude)
                self._gnss_stream.send(msg)
                self._gnss_stream.send(watermark_msg)
Пример #8
0
def main(args):
    """ The main function that orchestrates the setup of the world, connection
    to the scenario and the subsequent logging of the frames for computation
    of the mIoU.

    Args:
        args: The argparse.Namespace instance that is retrieved from parsing
        the arguments.
    """

    # Setup the world.
    world = setup_world(args.host, args.port, args.delta)

    # Retrieve the ego-vehicle from the simulation.
    ego_vehicle = None
    while ego_vehicle is None:
        print("Waiting for the scenario to be ready ...")
        sleep(1)
        ego_vehicle = retrieve_actor(world, 'vehicle.*', 'hero')
        world.tick()

    # Transform of the cameras.
    camera_transform = carla.Transform(location=carla.Location(1.0, 0.0, 1.8),
                                       rotation=carla.Rotation(0, 0, 0))

    # Connect the RGB camera to the vehicle.
    rgb_camera = spawn_camera('sensor.camera.rgb', camera_transform,
                              ego_vehicle, *args.res.split('x'))

    # Connect the Semantic segmentation camera to the vehicle.
    semantic_camera = spawn_camera('sensor.camera.semantic_segmentation',
                                   camera_transform, ego_vehicle,
                                   *args.res.split('x'))

    # Connect the depth camera to the vehicle.
    depth_camera = spawn_camera('sensor.camera.depth', camera_transform,
                                ego_vehicle, *args.res.split('x'))

    # Open the CSV file for writing.
    csv_file = open(args.output, 'w')
    csv_writer = csv.writer(csv_file)

    # Create the cleanup function.
    global CLEANUP_FUNCTION
    CLEANUP_FUNCTION = functools.partial(
        cleanup_function,
        world=world,
        cameras=[rgb_camera, semantic_camera, depth_camera],
        csv_file=csv_file)

    # Create a PyGame surface for debugging purposes.
    width, height = map(int, args.res.split('x'))
    surface = None
    if args.visualize:
        surface = pygame.display.set_mode((width, height),
                                          pygame.HWSURFACE | pygame.DOUBLEBUF)

    # Register a callback function with the camera.
    rgb_camera.listen(process_rgb_images)
    semantic_camera.listen(process_semantic_images)

    depth_camera_setup = DepthCameraSetup(
        "depth_camera", width, height,
        Transform.from_simulator_transform(camera_transform))
    depth_camera.listen(
        functools.partial(process_depth_images,
                          depth_camera_setup=depth_camera_setup,
                          ego_vehicle=ego_vehicle,
                          speed=args.speed,
                          csv=csv_writer,
                          surface=surface,
                          visualize=args.visualize))

    try:
        # To keep the thread alive so that the images can be processed.
        while True:
            pass
    except KeyboardInterrupt:
        CLEANUP_FUNCTION()
        sys.exit(0)
Пример #9
0
def process_depth_images(msg,
                         depth_camera_setup,
                         ego_vehicle,
                         speed,
                         csv,
                         surface,
                         visualize=False):
    print("Received a message for the time: {}".format(msg.timestamp))

    # If we are in distance to the destination, stop and exit with success.
    if ego_vehicle.get_location().distance(VEHICLE_DESTINATION) <= 5:
        ego_vehicle.set_velocity(carla.Vector3D())
        CLEANUP_FUNCTION()
        sys.exit(0)

    # Get the RGB image corresponding to the given depth image timestamp.
    rgb_image = retrieve_rgb_image(msg.timestamp)

    # Get the semantic image corresponding to the given depth image timestamp.
    semantic_image = retrieve_semantic_image(msg.timestamp)
    semantic_frame = SegmentedFrame.from_simulator_image(semantic_image,
                                                     depth_frame.camera_setup)

    # Visualize the image and the bounding boxes if needed.
    if visualize:
        draw_image(rgb_image, surface)

    # Transform people into obstacles relative to the current frame.
    bb_surface = None
    resolution = (depth_camera_setup.width, depth_camera_setup.height)
    if visualize:
        bb_surface = pygame.Surface(resolution)
        bb_surface.set_colorkey((0, 0, 0))

    vehicle_transform = Transform.from_simulator_transform(
        ego_vehicle.get_transform())

    depth_frame = DepthFrame.from_simulator_frame(msg, depth_camera_setup)
    # Transform the static camera setup with respect to the location of the
    # vehicle in the world.
    depth_frame.camera_setup.set_transform(vehicle_transform *
                                           depth_frame.camera_setup.transform)

    detected_people = []
    for person in ego_vehicle.get_world().get_actors().filter('walker.*'):
        obstacle = Obstacle.from_simulator_actor(person)
        if obstacle._distance(vehicle_transform) > 125:
            bbox = None
        else:
            bbox = obstacle.populate_bounding_box_2D(depth_frame,
                                                     semantic_frame.frame)
        if bbox is not None:
            detected_people.append(bbox)
            if visualize:
                draw_bounding_box(bbox, bb_surface)

    # We have drawn all the bounding boxes on the bb_surface, now put it on
    # the RGB image surface.
    if visualize:
        surface.blit(bb_surface, (0, 0))
        pygame.display.flip()

    # Compute the mAP.
    print("We detected a total of {} people.".format(len(detected_people)))
    compute_and_log_map(detected_people, msg.timestamp, csv)

    # Move the ego_vehicle according to the given speed.
    ego_vehicle.set_velocity(carla.Vector3D(x=-speed))

    ego_vehicle.get_world().tick()