Example #1
0
def log_bounding_boxes(carla_image, depth_msg, segmented_frame, traffic_lights,
                       tl_color, speed_signs, stop_signs, weather, town):
    game_time = int(carla_image.timestamp * 1000)
    print("Processing game time {} in {} with weather {}".format(
        game_time, town, weather))
    transform = pylot.utils.Transform.from_carla_transform(
        carla_image.transform)
    camera_setup = RGBCameraSetup("rgb_camera", FLAGS.frame_width,
                                  FLAGS.camera_height, transform,
                                  FLAGS.camera_fov)
    frame = CameraFrame.from_carla_frame(carla_image, camera_setup)
    _, world = get_world()
    town_name = world.get_map().name

    speed_limit_det_obstacles = []
    if speed_signs:
        speed_limit_det_obstacles = pylot.simulation.utils.get_detected_speed_limits(
            speed_signs, depth_msg.frame, segmented_frame)

    traffic_stop_det_obstacles = []
    if stop_signs:
        traffic_stop_det_obstacles = pylot.simulation.utils.get_detected_traffic_stops(
            stop_signs, depth_msg.frame)

    traffic_light_det_obstacles = []
    if traffic_lights:
        traffic_light_det_obstacles = get_traffic_light_obstacles(
            traffic_lights, depth_msg.frame, segmented_frame, tl_color,
            town_name)

    det_obstacles = (speed_limit_det_obstacles + traffic_stop_det_obstacles +
                     traffic_light_det_obstacles)
    # Log the frame.
    file_name = '{}signs-{}_{}_{}.png'.format(FLAGS.data_path, game_time,
                                              weather, town)
    rgb_img = Image.fromarray(frame.as_rgb_numpy_array())
    rgb_img.save(file_name)

    if FLAGS.log_bbox_images:
        frame.annotate_with_bounding_boxes(game_time, det_obstacles)
        file_name = '{}annotated-signs-{}_{}_{}.png'.format(
            FLAGS.data_path, game_time, weather, town)
        rgb_img = Image.fromarray(frame.as_rgb_numpy_array())
        rgb_img.save(file_name)

    # Log the bounding boxes.
    bboxes = [obstacle.get_bbox_label() for obstacle in det_obstacles]
    file_name = '{}bboxes-{}_{}_{}.json'.format(FLAGS.data_path, game_time,
                                                weather, town)
    with open(file_name, 'w') as outfile:
        json.dump(bboxes, outfile)

    if FLAGS.visualize_bboxes:
        frame.annotate_with_bounding_boxes(game_time, det_obstacles)
        frame.visualize('bboxes')
def on_camera_msg(carla_image):
    game_time = int(carla_image.timestamp * 1000)
    print("Received camera msg {}".format(game_time))

    camera_transform = pylot.utils.Transform.from_carla_transform(
        carla_image.transform)

    camera_setup = CameraSetup("rgb_camera",
                               "sensor.camera.rgb",
                               800,
                               600,
                               camera_transform,
                               fov=90.0)
    global last_frame
    last_frame = CameraFrame.from_carla_frame(carla_image, camera_setup)
    def process_images(self, carla_image):
        """ Invoked when an image is received from the simulator.

        Args:
            carla_image: a carla.Image.
        """
        game_time = int(carla_image.timestamp * 1000)
        timestamp = erdos.Timestamp(coordinates=[game_time])
        watermark_msg = erdos.WatermarkMessage(timestamp)
        with erdos.profile(self.config.name + '.process_images',
                           self,
                           event_data={'timestamp': str(timestamp)}):
            # Ensure that the code executes serially
            with self._lock:
                msg = None
                if self._camera_setup.camera_type == 'sensor.camera.rgb':
                    msg = FrameMessage(
                        timestamp,
                        CameraFrame.from_carla_frame(carla_image,
                                                     self._camera_setup))
                elif self._camera_setup.camera_type == 'sensor.camera.depth':
                    # Include the transform relative to the vehicle.
                    # Carla carla_image.transform returns the world transform,
                    # but we do not use it directly.
                    msg = DepthFrameMessage(
                        timestamp,
                        DepthFrame.from_carla_frame(
                            carla_image,
                            self._camera_setup,
                            save_original_frame=self._flags.
                            visualize_depth_camera))
                elif (self._camera_setup.camera_type ==
                      'sensor.camera.semantic_segmentation'):
                    msg = SegmentedFrameMessage(
                        timestamp,
                        SegmentedFrame.from_carla_image(
                            carla_image, self._camera_setup))

                if self._release_data:
                    self._camera_stream.send(msg)
                    self._camera_stream.send(watermark_msg)
                else:
                    # Pickle the data, and release it upon release msg receipt.
                    pickled_msg = pickle.dumps(
                        msg, protocol=pickle.HIGHEST_PROTOCOL)
                    with self._pickle_lock:
                        self._pickled_messages[msg.timestamp] = pickled_msg
                    self._notify_reading_stream.send(watermark_msg)
    def process_images(self, carla_image):
        """ Invoked when an image is received from the simulator.

        Args:
            carla_image: a carla.Image.
        """
        # Ensure that the code executes serially
        with self._lock:
            game_time = int(carla_image.timestamp * 1000)
            timestamp = erdos.Timestamp(coordinates=[game_time])
            watermark_msg = erdos.WatermarkMessage(timestamp)

            msg = None
            if self._camera_setup.camera_type == 'sensor.camera.rgb':
                msg = FrameMessage(
                    timestamp,
                    CameraFrame.from_carla_frame(carla_image,
                                                 self._camera_setup))
            elif self._camera_setup.camera_type == 'sensor.camera.depth':
                # Include the transform relative to the vehicle.
                # Carla carla_image.transform returns the world transform, but
                # we do not use it directly.
                msg = DepthFrameMessage(
                    timestamp,
                    DepthFrame.from_carla_frame(carla_image,
                                                self._camera_setup))
            elif self._camera_setup.camera_type == \
                 'sensor.camera.semantic_segmentation':
                msg = SegmentedFrameMessage(
                    timestamp,
                    SegmentedFrame.from_carla_image(carla_image,
                                                    self._camera_setup))
            # Send the message containing the frame.
            self._camera_stream.send(msg)
            # Note: The operator is set not to automatically propagate
            # watermark messages received on input streams. Thus, we can
            # issue watermarks only after the Carla callback is invoked.
            self._camera_stream.send(watermark_msg)