示例#1
0
    def on_msg_camera_stream(self, msg, detected_lanes_stream):
        """Invoked whenever a frame message is received on the stream.

        Args:
            msg: A :py:class:`~pylot.perception.messages.FrameMessage`.
            detected_lanes_stream (:py:class:`erdos.WriteStream`): Stream on
                which the operator sends
                :py:class:`~pylot.perception.messages.DetectedLaneMessage`
                messages.
        """
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self.config.name))
        assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
        # Make a copy of the image coming into the operator.
        image = np.copy(msg.frame.as_numpy_array())

        # Get the dimensions of the image.
        x_lim, y_lim = image.shape[1], image.shape[0]

        # Convert to grayscale.
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # Apply gaussian blur.
        image = cv2.GaussianBlur(image, (self._kernel_size, self._kernel_size),
                                 0)

        # Apply the Canny Edge Detector.
        image = cv2.Canny(image, 30, 60)

        # Define a region of interest.
        points = np.array(
            [[
                (0, y_lim),  # Bottom left corner.
                (0, y_lim - 60),
                (x_lim // 2 - 20, y_lim // 2),
                (x_lim // 2 + 20, y_lim // 2),
                (x_lim, y_lim - 60),
                (x_lim, y_lim),  # Bottom right corner.
            ]],
            dtype=np.int32)
        image = self._region_of_interest(image, points)

        # Hough lines.
        image = self._draw_lines(image)

        if self._flags.visualize_lane_detection:
            final_img = np.copy(msg.frame.as_numpy_array())
            final_img = cv2.addWeighted(final_img, 0.8, image, 1.0, 0.0)
            frame = CameraFrame(final_img, 'BGR', msg.frame.camera_setup)
            frame.visualize(self.config.name,
                            msg.timestamp,
                            pygame_display=pylot.utils.PYGAME_DISPLAY)

        detected_lanes_stream.send(erdos.Message(msg.timestamp, image))
示例#2
0
    def on_msg_camera_stream(self, msg, detected_lanes_stream):
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self._name))
        start_time = time.time()
        assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
        image_np = msg.frame.frame

        # TODO(ionel): Implement lane detection.
        edges = self.apply_canny(image_np)
        lines_edges = self.apply_hough(image_np, edges)

        kernel_size = 3
        grad_x = self.apply_sobel(image_np,
                                  orient='x',
                                  sobel_kernel=kernel_size,
                                  thresh_min=0,
                                  thresh_max=255)
        grad_y = self.apply_sobel(image_np,
                                  orient='y',
                                  sobel_kernel=kernel_size,
                                  thresh_min=0,
                                  thresh_max=255)
        mag_binary = self.magnitude_threshold(image_np,
                                              sobel_kernel=kernel_size,
                                              thresh_min=0,
                                              thresh_max=255)
        dir_binary = self.direction_threshold(image_np,
                                              sobel_kernel=kernel_size,
                                              thresh_min=0,
                                              thresh_max=np.pi / 2)

        s_binary = self.extract_s_channel(image_np)

        # Select the pixels where both x and y gradients meet the threshold
        # criteria, or the gradient magnitude and direction are both with
        # the threshold values.
        combined = np.zeros_like(dir_binary)
        combined[((grad_x == 1) & (grad_y == 1)) | ((mag_binary == 1) &
                                                    (dir_binary == 1))] = 1

        combined_binary = np.zeros_like(grad_x)
        combined_binary[(s_binary == 1) | (grad_x == 1)] = 1

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self._name, msg.timestamp,
                                                     runtime))

        if self._flags.visualize_lane_detection:
            frame = CameraFrame(lines_edges, 'BGR', msg.frame.camera_setup)
            frame.visualize(self._name, msg.timestamp)

        detected_lanes_stream.send(erdos.Message(msg.timestamp, image_np))
示例#3
0
    def run_step(self, input_data, timestamp):
        self._logger.debug("Current game time {}".format(timestamp))
        erdos_timestamp = erdos.Timestamp(coordinates=[timestamp])

        self.send_waypoints_msg(erdos_timestamp)

        for key, val in input_data.items():
            # print("{} {} {}".format(key, val[0], type(val[1])))
            if key in self._camera_streams:
                self._camera_streams[key].send(
                    pylot.perception.messages.FrameMessage(
                        CameraFrame(val[1], 'BGR', self._camera_setups[key]),
                        erdos_timestamp))
                self._camera_streams[key].send(
                    erdos.WatermarkMessage(erdos_timestamp))
            elif key == 'can_bus':
                self.send_can_bus_msg(val[1], erdos_timestamp)
            elif key == 'hdmap':
                self.send_hd_map_msg(val[1], erdos_timestamp)
            elif key == 'LIDAR':
                self.send_lidar_msg(val[1], self._lidar_transform,
                                    erdos_timestamp)
            else:
                self._logger.warning("Sensor {} not used".format(key))

        # Wait until the control is set.
        control_msg = self._control_stream.read()
        output_control = carla.VehicleControl()
        output_control.throttle = control_msg.throttle
        output_control.brake = control_msg.brake
        output_control.steer = control_msg.steer
        output_control.reverse = control_msg.reverse
        output_control.hand_brake = control_msg.hand_brake
        output_control.manual_gear_shift = False
        return output_control
示例#4
0
def log_bounding_boxes(simulator_image, depth_msg, segmented_frame,
                       traffic_lights, tl_color, speed_signs, stop_signs,
                       weather, town):
    game_time = int(simulator_image.timestamp * 1000)
    print("Processing game time {} in {} with weather {}".format(
        game_time, town, weather))
    transform = pylot.utils.Transform.from_simulator_transform(
        simulator_image.transform)
    camera_setup = RGBCameraSetup("rgb_camera", FLAGS.frame_width,
                                  FLAGS.camera_height, transform,
                                  FLAGS.camera_fov)
    frame = CameraFrame.from_simulator_frame(simulator_image, camera_setup)
    _, world = get_world()
    town_name = world.get_map().name

    speed_limit_det_obstacles = []
    if speed_signs:
        speed_limit_det_obstacles = \
            pylot.simulation.utils.get_detected_speed_limits(
                speed_signs, depth_msg.frame, segmented_frame)

    traffic_stop_det_obstacles = []
    if stop_signs:
        traffic_stop_det_obstacles = \
            pylot.simulation.utils.get_detected_traffic_stops(
                stop_signs, depth_msg.frame)

    traffic_light_det_obstacles = []
    if traffic_lights:
        traffic_light_det_obstacles = get_traffic_light_obstacles(
            traffic_lights, depth_msg.frame, segmented_frame, tl_color,
            town_name)

    det_obstacles = (speed_limit_det_obstacles + traffic_stop_det_obstacles +
                     traffic_light_det_obstacles)
    # Log the frame.
    file_name = '{}signs-{}_{}_{}.png'.format(FLAGS.data_path, game_time,
                                              weather, town)
    rgb_img = Image.fromarray(frame.as_rgb_numpy_array())
    rgb_img.save(file_name)

    if FLAGS.log_bbox_images:
        frame.annotate_with_bounding_boxes(game_time, det_obstacles)
        file_name = '{}annotated-signs-{}_{}_{}.png'.format(
            FLAGS.data_path, game_time, weather, town)
        rgb_img = Image.fromarray(frame.as_rgb_numpy_array())
        rgb_img.save(file_name)

    # Log the bounding boxes.
    bboxes = [obstacle.get_in_log_format() for obstacle in det_obstacles]
    file_name = '{}bboxes-{}_{}_{}.json'.format(FLAGS.data_path, game_time,
                                                weather, town)
    with open(file_name, 'w') as outfile:
        json.dump(bboxes, outfile)

    if FLAGS.visualize_bboxes:
        frame.annotate_with_bounding_boxes(game_time, det_obstacles)
        frame.visualize('bboxes')
    def on_position_update(self, timestamp: Timestamp,
                           detected_lane_stream: WriteStream):
        """Invoked on the receipt of an update to the position of the vehicle.

        Uses the position of the vehicle to get future waypoints and draw
        lane markings using those waypoints.

        Args:
            pose_msg: Contains the current location of the ego vehicle.
        """
        self._logger.debug('@{}: received watermark'.format(timestamp))
        bgr_msg = self._bgr_msgs.popleft()
        pose_msg = self._pose_msgs.popleft()
        vehicle_location = pose_msg.data.transform.location
        if self._map:
            lanes = self._map.get_all_lanes(vehicle_location)
            for lane in lanes:
                lane.draw_on_world(self._world)
            if self._flags.log_lane_detection_camera:
                camera_setup = bgr_msg.frame.camera_setup
                black_img = np.zeros(
                    (camera_setup.height, camera_setup.width, 3),
                    dtype=np.dtype("uint8"))
                frame = CameraFrame(black_img, 'BGR', camera_setup)
                for lane in lanes:
                    lane.draw_on_frame(frame,
                                       inverse_transform=pose_msg.data.
                                       transform.inverse_transform())
                self._logger.debug('@{}: detected {} lanes'.format(
                    bgr_msg.timestamp, len(lanes)))
                frame.save(bgr_msg.timestamp.coordinates[0],
                           self._flags.data_path, "lane")
        else:
            self._logger.debug('@{}: map is not ready yet'.format(
                pose_msg.timestamp))
            lanes = []
        output_msg = LanesMessage(pose_msg.timestamp, lanes)
        detected_lane_stream.send(output_msg)
示例#6
0
def on_camera_msg(carla_image):
    game_time = int(carla_image.timestamp * 1000)
    print("Received camera msg {}".format(game_time))

    camera_transform = pylot.utils.Transform.from_carla_transform(
        carla_image.transform)

    camera_setup = CameraSetup("rgb_camera",
                               "sensor.camera.rgb",
                               800,
                               600,
                               camera_transform,
                               fov=90.0)
    global last_frame
    last_frame = CameraFrame.from_carla_frame(carla_image, camera_setup)
    def process_images(self, simulator_image):
        """ Invoked when an image is received from the simulator.

        Args:
            simulator_image: a carla.Image.
        """
        game_time = int(simulator_image.timestamp * 1000)
        timestamp = erdos.Timestamp(coordinates=[game_time])
        watermark_msg = erdos.WatermarkMessage(timestamp)
        with erdos.profile(self.config.name + '.process_images',
                           self,
                           event_data={'timestamp': str(timestamp)}):
            # Ensure that the code executes serially
            with self._lock:
                msg = None
                if self._camera_setup.camera_type == 'sensor.camera.rgb':
                    msg = FrameMessage(
                        timestamp,
                        CameraFrame.from_simulator_frame(
                            simulator_image, self._camera_setup))
                elif self._camera_setup.camera_type == 'sensor.camera.depth':
                    # Include the transform relative to the vehicle.
                    # simulator_image.transform returns the world transform,
                    # but we do not use it directly.
                    msg = DepthFrameMessage(
                        timestamp,
                        DepthFrame.from_simulator_frame(
                            simulator_image,
                            self._camera_setup,
                            save_original_frame=self._flags.
                            visualize_depth_camera))
                elif (self._camera_setup.camera_type ==
                      'sensor.camera.semantic_segmentation'):
                    msg = SegmentedFrameMessage(
                        timestamp,
                        SegmentedFrame.from_simulator_image(
                            simulator_image, self._camera_setup))

                if self._release_data:
                    self._camera_stream.send(msg)
                    self._camera_stream.send(watermark_msg)
                else:
                    # Pickle the data, and release it upon release msg receipt.
                    pickled_msg = pickle.dumps(
                        msg, protocol=pickle.HIGHEST_PROTOCOL)
                    with self._pickle_lock:
                        self._pickled_messages[msg.timestamp] = pickled_msg
                    self._notify_reading_stream.send(watermark_msg)
 def on_camera_frame(self, data):
     self._counter += 1
     if self._counter % self._modulo_to_send != 0:
         return
     cv2_image = self._bridge.imgmsg_to_cv2(data, "bgr8")
     resized_image = cv2.resize(
         cv2.flip(cv2_image, -1),
         (self._flags.camera_image_width, self._flags.camera_image_height))
     numpy_array = np.asarray(resized_image)
     timestamp = erdos.Timestamp(coordinates=[self._msg_cnt])
     camera_frame = CameraFrame(numpy_array, 'BGR', self._camera_setup)
     self._camera_stream.send(FrameMessage(timestamp, camera_frame))
     watermark_msg = erdos.WatermarkMessage(timestamp)
     self._camera_stream.send(watermark_msg)
     self._logger.debug('@{}: sent message'.format(timestamp))
     self._msg_cnt += 1
示例#9
0
    def run_step(self, input_data, timestamp):
        game_time = int(timestamp * 1000)
        self._logger.debug("Current game time {}".format(game_time))
        erdos_timestamp = erdos.Timestamp(coordinates=[game_time])

        if not self._sent_open_drive and \
               self.track != Track.ALL_SENSORS_HDMAP_WAYPOINTS:
            # We do not have access to the open drive map. Send top watermark.
            self._sent_open_drive = True
            self._open_drive_stream.send(
                erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))

        self.send_waypoints_msg(erdos_timestamp)

        for key, val in input_data.items():
            # print("{} {} {}".format(key, val[0], type(val[1])))
            if key in self._camera_streams:

                self._camera_streams[key].send(
                    pylot.perception.messages.FrameMessage(
                        erdos_timestamp,
                        CameraFrame(val[1][:, :, :3], 'BGR',
                                    self._camera_setups[key])))
                self._camera_streams[key].send(
                    erdos.WatermarkMessage(erdos_timestamp))
            elif key == 'can_bus':
                self.send_pose_msg(val[1], erdos_timestamp)
            elif key == 'hdmap':
                self.send_hd_map_msg(val[1], erdos_timestamp)
            elif key == 'LIDAR':
                self.send_lidar_msg(val[1], self._lidar_transform,
                                    erdos_timestamp)
            else:
                self._logger.warning("Sensor {} not used".format(key))

        # Wait until the control is set.
        while True:
            control_msg = self._control_stream.read()
            if not isinstance(control_msg, erdos.WatermarkMessage):
                output_control = carla.VehicleControl()
                output_control.throttle = control_msg.throttle
                output_control.brake = control_msg.brake
                output_control.steer = control_msg.steer
                output_control.reverse = control_msg.reverse
                output_control.hand_brake = control_msg.hand_brake
                output_control.manual_gear_shift = False
                return output_control
    def process_images(self, carla_image):
        """ Invoked when an image is received from the simulator.

        Args:
            carla_image: a carla.Image.
        """
        # Ensure that the code executes serially
        with self._lock:
            game_time = int(carla_image.timestamp * 1000)
            timestamp = erdos.Timestamp(coordinates=[game_time])
            watermark_msg = erdos.WatermarkMessage(timestamp)

            msg = None
            if self._camera_setup.camera_type == 'sensor.camera.rgb':
                msg = FrameMessage(
                    timestamp,
                    CameraFrame.from_carla_frame(carla_image,
                                                 self._camera_setup))
            elif self._camera_setup.camera_type == 'sensor.camera.depth':
                # Include the transform relative to the vehicle.
                # Carla carla_image.transform returns the world transform, but
                # we do not use it directly.
                msg = DepthFrameMessage(
                    timestamp,
                    DepthFrame.from_carla_frame(carla_image,
                                                self._camera_setup))
            elif self._camera_setup.camera_type == \
                 'sensor.camera.semantic_segmentation':
                msg = SegmentedFrameMessage(
                    timestamp,
                    SegmentedFrame.from_carla_image(carla_image,
                                                    self._camera_setup))
            # Send the message containing the frame.
            self._camera_stream.send(msg)
            # Note: The operator is set not to automatically propagate
            # watermark messages received on input streams. Thus, we can
            # issue watermarks only after the Carla callback is invoked.
            self._camera_stream.send(watermark_msg)
示例#11
0
    def run_step(self, input_data, timestamp):
        game_time = int(timestamp * 1000)
        self._logger.debug("Current game time {}".format(game_time))
        erdos_timestamp = erdos.Timestamp(coordinates=[game_time])

        if not self._sent_open_drive and self.track != Track.MAP:
            # We do not have access to the open drive map. Send top watermark.
            self._sent_open_drive = True
            self._open_drive_stream.send(
                erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
        # Send the waypoints.
        self.send_waypoints_msg(erdos_timestamp)

        speed_data = None
        imu_data = None
        gnss_data = None
        carla_pc = None
        for key, val in input_data.items():
            # val is a tuple of (data timestamp, data).
            # print("Sensor {} at {}".format(key, val[0]))
            if key in self._camera_streams:
                self._camera_streams[key].send(
                    pylot.perception.messages.FrameMessage(
                        erdos_timestamp,
                        CameraFrame(val[1][:, :, :3], 'BGR',
                                    self._camera_setups[key])))
                self._camera_streams[key].send(
                    erdos.WatermarkMessage(erdos_timestamp))
            elif key == 'imu':
                imu_data = val[1]
            elif key == 'speed':
                speed_data = val[1]
            elif key == 'gnss':
                gnss_data = val[1]
            elif key == 'opendrive':
                self.send_opendrive_map_msg(val[1], erdos_timestamp)
            elif key == 'LIDAR':
                carla_pc = val[1]
            else:
                self._logger.warning("Sensor {} not used".format(key))

        self.send_vehicle_id_msg()
        self.send_ground_data(erdos_timestamp)

        if FLAGS.localization:
            self.send_imu_msg(imu_data, erdos_timestamp)
            self.send_gnss_msg(gnss_data, erdos_timestamp)
            self.send_initial_pose_msg(erdos_timestamp)
        elif FLAGS.carla_localization:
            self.send_perfect_pose_msg(erdos_timestamp)
        else:
            self.send_pose_msg(speed_data, imu_data, gnss_data,
                               erdos_timestamp)

        if using_lidar():
            self.send_lidar_msg(carla_pc, self._lidar_transform,
                                erdos_timestamp)

        if pylot.flags.must_visualize():
            import pygame
            from pygame.locals import K_n
            events = pygame.event.get()
            for event in events:
                if event.type == pygame.KEYUP:
                    if event.key == K_n:
                        self._control_display_stream.send(
                            erdos.Message(erdos.Timestamp(coordinates=[0]),
                                          event.key))

        # Wait until the control is set.
        while True:
            control_msg = self._control_stream.read()
            if not isinstance(control_msg, erdos.WatermarkMessage):
                output_control = carla.VehicleControl()
                output_control.throttle = control_msg.throttle
                output_control.brake = control_msg.brake
                output_control.steer = control_msg.steer
                output_control.reverse = control_msg.reverse
                output_control.hand_brake = control_msg.hand_brake
                output_control.manual_gear_shift = False
                return output_control
示例#12
0
    def run_step(self, input_data, timestamp):
        start_time = time.time()
        game_time = int(timestamp * 1000)
        erdos_timestamp = erdos.Timestamp(coordinates=[game_time])

        # Parse the sensor data the agent receives from the leaderboard.
        speed_data = None
        imu_data = None
        gnss_data = None
        for key, val in input_data.items():
            # val is a tuple of (timestamp, data).
            if key in self._camera_streams:
                # The data is for one of the cameras. Transform it to a Pylot
                # CameraFrame, and send it on the camera's corresponding
                # stream.
                self._camera_streams[key].send(
                    pylot.perception.messages.FrameMessage(
                        erdos_timestamp,
                        CameraFrame(val[1][:, :, :3], 'BGR',
                                    self._camera_setups[key])))
                self._camera_streams[key].send(
                    erdos.WatermarkMessage(erdos_timestamp))
            elif key == 'imu':
                imu_data = val[1]
            elif key == 'speed':
                speed_data = val[1]
            elif key == 'gnss':
                gnss_data = val[1]
            elif key == 'opendrive':
                if not self._open_drive_stream.is_closed():
                    # The data is only sent once because it does not change
                    # throught the duration of a scenario.
                    self._open_drive_stream.send(
                        erdos.Message(erdos_timestamp, val[1]['opendrive']))
                    # Inform the operators that read the open drive stream that
                    # they will not receive any other messages on this stream.
                    self._open_drive_stream.send(
                        erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))
            elif key == 'LIDAR':
                # Send the LiDAR point cloud.
                self.send_lidar_msg(self._point_cloud_stream, val[1],
                                    erdos_timestamp, self._lidar_setup)
            else:
                self.logger.warning("Sensor {} not used".format(key))

        # Send the route the vehicle must follow.
        self.send_global_trajectory_msg(self._global_trajectory_stream,
                                        erdos_timestamp)

        # The following two methods are only relevant when the agent
        # is using perfect perception.
        self.send_vehicle_id_msg(self._vehicle_id_stream)
        self.send_perfect_detections(self._perfect_obstacles_stream,
                                     self._perfect_traffic_lights_stream,
                                     erdos_timestamp, CENTER_CAMERA_LOCATION)

        # Send localization data.
        self.send_localization(erdos_timestamp, imu_data, gnss_data,
                               speed_data)

        # Ensure that the open drive stream is closed when the agent
        # is not running on the MAP track.
        if not self._open_drive_stream.is_closed() and self.track != Track.MAP:
            # We do not have access to the open drive map. Send top watermark.
            self._open_drive_stream.send(
                erdos.WatermarkMessage(erdos.Timestamp(is_top=True)))

        sensor_send_runtime = (time.time() - start_time) * 1000
        self.csv_logger.info('{},{},sensor_send_runtime,{:.4f}'.format(
            pylot.utils.time_epoch_ms(), game_time, sensor_send_runtime))

        process_visualization_events(self._control_display_stream)

        # Return the control command received on the control stream.
        command = read_control_command(self._control_stream)
        e2e_runtime = (time.time() - start_time) * 1000
        self.csv_logger.info('{},{},e2e_runtime,{:.4f}'.format(
            pylot.utils.time_epoch_ms(), game_time, e2e_runtime))
        if FLAGS.simulator_mode == 'synchronous':
            return command
        elif FLAGS.simulator_mode == 'pseudo-asynchronous':
            return command, int(e2e_runtime - sensor_send_runtime)
        else:
            raise ValueError('Unexpected simulator_mode {}'.format(
                FLAGS.simulator_mode))
示例#13
0
    def on_watermark(self, timestamp):
        self._logger.debug("@{}: received watermark.".format(timestamp))
        pose_msg = self.get_message(self._pose_msgs, timestamp, "Pose")
        bgr_msg = self.get_message(self._bgr_msgs, timestamp, "BGR")
        tl_camera_msg = self.get_message(self._tl_camera_msgs, timestamp,
                                         "TLCamera")
        depth_msg = self.get_message(self._depth_msgs, timestamp, "Depth")
        point_cloud_msg = self.get_message(self._point_cloud_msgs, timestamp,
                                           "PointCloud")
        segmentation_msg = self.get_message(self._segmentation_msgs, timestamp,
                                            "Segmentation")
        imu_msg = self.get_message(self._imu_msgs, timestamp, "IMU")
        obstacle_msg = None
        # obstacle_msg = self.get_message(self._obstacle_msgs, timestamp, "Obstacle")
        obstacle_error_msg = self.get_message(self._obstacle_error_msgs, timestamp, "ObstacleError")                                        
        traffic_light_msg = self.get_message(self._traffic_light_msgs,
                                             timestamp, "TrafficLight")
        tracked_obstacle_msg = self.get_message(self._tracked_obstacle_msgs,
                                                timestamp, "TrackedObstacle")
        lane_detection_msg = self.get_message(self._lane_detection_msgs,
                                              timestamp, "Lanes")
        prediction_camera_msg = self.get_message(self._prediction_camera_msgs,
                                                 timestamp, "PredictionCamera")
        prediction_msg = self.get_message(self._prediction_msgs, timestamp,
                                          "Prediction")
        waypoint_msg = self.get_message(self._waypoint_msgs, timestamp,
                                        "Waypoint")
        control_msg = self.get_message(self._control_msgs, timestamp,
                                       "Control")
        if pose_msg:
            ego_transform = pose_msg.data.transform
        else:
            ego_transform = None

        # Add the visualizations on world.
        if self._flags.visualize_pose:
            self._visualize_pose(ego_transform)
        if self._flags.visualize_imu:
            self._visualize_imu(imu_msg)

        sensor_to_display = self.display_array[self.current_display]
        if sensor_to_display == "RGB" and bgr_msg:
            bgr_msg.frame.visualize(self.display, timestamp=timestamp)
        elif sensor_to_display == "Obstacle" and bgr_msg and obstacle_msg:
            bgr_msg.frame.annotate_with_bounding_boxes(timestamp,
                                                       obstacle_msg.obstacles,
                                                       ego_transform)
            bgr_msg.frame.visualize(self.display, timestamp=timestamp)
        elif sensor_to_display == "ObstacleError" and bgr_msg and obstacle_error_msg: # CHANGE W/ ERROR MESSAGE
            bgr_msg.frame.annotate_with_bounding_boxes(timestamp,
                                                       obstacle_error_msg.obstacles,
                                                       ego_transform)
            bgr_msg.frame.visualize(self.display, timestamp=timestamp)
        elif (sensor_to_display == "TLCamera" and tl_camera_msg
              and traffic_light_msg):
            tl_camera_msg.frame.annotate_with_bounding_boxes(
                timestamp, traffic_light_msg.obstacles)
            tl_camera_msg.frame.visualize(self.display, timestamp=timestamp)
        elif (sensor_to_display == "TrackedObstacle" and bgr_msg
              and tracked_obstacle_msg):
            bgr_msg.frame.annotate_with_bounding_boxes(
                timestamp, tracked_obstacle_msg.obstacle_trajectories,
                ego_transform)
            bgr_msg.frame.visualize(self.display)
        elif sensor_to_display == "Waypoint" and (bgr_msg and pose_msg
                                                  and waypoint_msg):
            bgr_frame = bgr_msg.frame
            if self._flags.draw_waypoints_on_camera_frames:
                bgr_frame.camera_setup.set_transform(
                    pose_msg.data.transform * bgr_frame.camera_setup.transform)
                waypoint_msg.waypoints.draw_on_frame(bgr_frame)
            if self._flags.draw_waypoints_on_world:
                waypoint_msg.waypoints.draw_on_world(self._world)
            bgr_frame.visualize(self.display, timestamp=timestamp)
        elif (sensor_to_display == "PredictionCamera" and prediction_camera_msg
              and prediction_msg):
            frame = prediction_camera_msg.frame
            frame.transform_to_cityscapes()
            for obstacle_prediction in prediction_msg.predictions:
                obstacle_prediction.draw_trajectory_on_frame(frame)
            frame.visualize(self.display, timestamp=timestamp)
        elif sensor_to_display == "PointCloud" and point_cloud_msg:
            point_cloud_msg.point_cloud.visualize(
                self.display, self._flags.camera_image_width,
                self._flags.camera_image_height)
        elif (sensor_to_display == "Lanes" and bgr_msg and lane_detection_msg):
            for lane in lane_detection_msg.data:
                lane.draw_on_frame(bgr_msg.frame)
            bgr_msg.frame.visualize(self.display, timestamp)
        elif sensor_to_display == "Depth" and depth_msg:
            depth_msg.frame.visualize(self.display, timestamp=timestamp)
        elif sensor_to_display == "Segmentation" and segmentation_msg:
            segmentation_msg.frame.visualize(self.display, timestamp=timestamp)
        elif sensor_to_display == "PlanningWorld":
            if prediction_camera_msg is None:
                # Top-down prediction is not available. Show planning
                # world on a black image.
                black_img = np.zeros((self._bird_eye_camera_setup.height,
                                      self._bird_eye_camera_setup.width, 3),
                                     dtype=np.dtype("uint8"))
                frame = CameraFrame(black_img, 'RGB',
                                    self._bird_eye_camera_setup)
            else:
                frame = prediction_camera_msg.frame
                frame.transform_to_cityscapes()
            if lane_detection_msg:
                lanes = lane_detection_msg.data
            else:
                lanes = None
            self._planning_world.update(timestamp,
                                        pose_msg.data,
                                        prediction_msg.predictions,
                                        traffic_light_msg.obstacles,
                                        None,
                                        lanes=lanes)
            self._planning_world.update_waypoints(None, waypoint_msg.waypoints)
            self._planning_world.draw_on_frame(frame)
            frame.visualize(self.display, timestamp=timestamp)

        self.render_text(pose_msg.data, control_msg, timestamp)