Пример #1
0
    def on_msg_camera_stream(self, msg):
        """Camera stream callback method.
        Invoked upon the receipt of a message on the camera stream.
        """
        self._logger.info('{} received frame {}'.format(
            self.name, msg.timestamp))
        start_time = time.time()
        assert msg.encoding == 'BGR', 'Expects BGR frames'
        image = np.expand_dims(msg.frame.transpose([2, 0, 1]), axis=0)
        tensor = torch.tensor(image).float().cuda() / 255.0
        output = self._network(tensor)
        # XXX(ionel): Check if the model outputs Carla Cityscapes values or
        # correct Cityscapes values.
        output = transform_to_cityscapes_palette(
            torch.argmax(output, dim=1).cpu().numpy()[0])

        output = rgb_to_bgr(output)

        if self._flags.visualize_segmentation_output:
            add_timestamp(msg.timestamp, output)
            cv2.imshow(self.name, output)
            cv2.waitKey(1)

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))

        output_msg = SegmentedFrameMessage(output, runtime, msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
Пример #2
0
def save_frame(file_name, frame):
    """ Saves the given frame at the given file name.

    Args:
        file_name: The file name to save the segmented image to.
        frame: The segmented frame to save.
    """
    frame = transform_to_cityscapes_palette(frame)
    cv2.imwrite(file_name, frame)
Пример #3
0
 def on_top_down_segmented_frame(self, msg):
     self._top_down_segmented_frame_cnt += 1
     if self._top_down_segmented_frame_cnt % self._flags.log_every_nth_frame != 0:
         return
     frame = transform_to_cityscapes_palette(msg.frame)
     # Write the segmented image.
     img = Image.fromarray(np.uint8(frame))
     file_name = '{}carla-top-down-segmented-{}.png'.format(
         self._flags.data_path, msg.timestamp.coordinates[0])
     img.save(file_name)
Пример #4
0
        def on_segmented_frame(msg):

            # log every nth frame
            self._frame_cnt[identifier] += 1
            if self._frame_cnt[
                    identifier] % self._flags.log_every_nth_frame != 0:
                return

            # Write the segmented image.
            frame = transform_to_cityscapes_palette(msg.frame)
            img = Image.fromarray(np.uint8(frame))
            file_name = '{}-{}.png'.format(identifier,
                                           msg.timestamp.coordinates[0])
            path = os.path.join(self._flags.data_path, file_name)
            img.save(path)
Пример #5
0
    def on_top_down_segmentation_update(self, msg):
        top_down = np.uint8(transform_to_cityscapes_palette(msg.frame))

        # Save the segmented channels
        for k, v in LABEL_2_PIXEL.items():
            mask = np.all(top_down == v, axis=-1)
            tmp = np.zeros(top_down.shape[:2])
            tmp[mask] = 1
            name = '{}{}-{}.png'.format(self._flags.data_path, k,
                                        msg.timestamp.coordinates[0])
            img = Image.fromarray(tmp)
            img = img.convert('RGB')
            img.save(name)

        top_down_img = Image.fromarray(top_down)
        top_down_img.save('{}{}-{}.png'.format(self._flags.data_path,
                                               "top_down_segmentation",
                                               msg.timestamp.coordinates[0]))
Пример #6
0
    def on_notification(self, msg):
        # Pop the oldest message from each buffer.
        with self._lock:
            if not self.synchronize_msg_buffers(
                    msg.timestamp,
                    [self._tracking_msgs, self._top_down_segmentation_msgs]):
                return
            tracking_msg = self._tracking_msgs.popleft()
            segmentation_msg = self._top_down_segmentation_msgs.popleft()

        self._logger.info('Timestamps {} {}'.format(
            tracking_msg.timestamp, segmentation_msg.timestamp))

        assert (tracking_msg.timestamp == segmentation_msg.timestamp)

        self._frame_cnt += 1

        display_img = np.uint8(transform_to_cityscapes_palette(
            segmentation_msg.frame))
        for obj in tracking_msg.obj_trajectories:
            # Intrinsic matrix of the top down segmentation camera.
            intrinsic_matrix = pylot.simulation.utils.create_intrinsic_matrix(
                                   self._top_down_camera_setup.width,
                                   self._top_down_camera_setup.height,
                                   fov=self._top_down_camera_setup.fov)
            # Convert to screen points.
            screen_points = pylot.simulation.utils.locations_3d_to_view(
                                obj.trajectory,
                                self._top_down_camera_setup.transform.matrix,
                                intrinsic_matrix)

            # Draw trajectory points on segmented image.
            for point in screen_points:
                if (0 <= point.x <= self._flags.carla_camera_image_width) and \
                   (0 <= point.y <= self._flags.carla_camera_image_height):
                    cv2.circle(display_img,
                               (int(point.x), int(point.y)),
                               3, self._colors[obj.obj_class], -1)
        pylot.utils.add_timestamp(msg.timestamp, display_img)
        cv2.imshow('img', display_img)
        cv2.waitKey(1)
Пример #7
0
 def on_ground_segmented_frame(self, msg):
     # Buffer the ground truth frames.
     game_time = msg.timestamp.coordinates[0]
     self._ground_frames.append(
         (game_time, transform_to_cityscapes_palette(msg.frame)))
Пример #8
0
 def display_front_frame(self, msg):
     frame = transform_to_cityscapes_palette(msg.frame)
     img = Image.fromarray(np.uint8(frame))
     open_cv_image = rgb_to_bgr(np.array(img))
     cv2.imshow(self.name, open_cv_image)
     cv2.waitKey(1)