def on_msg_camera_stream(self, msg, segmented_stream):
        """Invoked upon the receipt of a message on the camera stream.

        Args:
            msg: A :py:class:`~pylot.perception.messages.FrameMessage`.
            segmented_stream (:py:class:`erdos.WriteStream`): Stream on which
                the operator sends
                :py:class:`~pylot.perception.messages.SegmentedFrameMessage`
                messages.
        """
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self.config.name))
        start_time = time.time()
        assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
        image = torch.from_numpy(msg.frame.frame.transpose(
            [2, 0, 1])).unsqueeze(0).float()
        image_var = Variable(image, requires_grad=False, volatile=True)

        final = self._model(image_var)[0]
        _, pred = torch.max(final, 1)

        pred = pred.cpu().data.numpy()[0]
        # After we apply the pallete, the image is in RGB format
        image_np = self._pallete[pred.squeeze()]

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        frame = SegmentedFrame(image_np, 'cityscapes', msg.frame.camera_setup)
        if self._flags.visualize_segmentation_output:
            frame.visualize(self.config.name,
                            msg.timestamp,
                            pygame_display=pylot.utils.PYGAME_DISPLAY)
        segmented_stream.send(
            SegmentedFrameMessage(msg.timestamp, frame, runtime))
Esempio n. 2
0
 def __send_sensor_data(self, sensor_data, timestamp, watermark):
     for name, measurement in sensor_data.items():
         data_stream = self.get_output_stream(name)
         if data_stream.get_label('camera_type') == 'sensor.camera.rgb':
             # Transform the Carla RGB images to BGR.
             data_stream.send(
                 pylot.simulation.messages.FrameMessage(
                     pylot.utils.bgra_to_bgr(to_bgra_array(measurement)),
                     timestamp))
         elif data_stream.get_label(
                 'camera_type') == 'sensor.camera.semantic_segmentation':
             frame = labels_to_array(measurement)
             data_stream.send(SegmentedFrameMessage(frame, 0, timestamp))
         elif data_stream.get_label('camera_type') == 'sensor.camera.depth':
             # NOTE: depth_to_array flips the image.
             data_stream.send(
                 pylot.simulation.messages.DepthFrameMessage(
                     depth_to_array(measurement), self._transforms[name],
                     measurement.fov, timestamp))
         elif data_stream.get_label(
                 'sensor_type') == 'sensor.lidar.ray_cast':
             pc_msg = pylot.simulation.messages.PointCloudMessage(
                 measurement.data, self._transforms[name], timestamp)
             data_stream.send(pc_msg)
         else:
             data_stream.send(Message(measurement, timestamp))
         data_stream.send(watermark)
Esempio n. 3
0
    def process_images(self, carla_image):
        """ Invoked when an image is received from the simulator.

        Args:
            carla_image: a carla.Image.
        """
        # Ensure that the code executes serially
        with self._lock:
            game_time = int(carla_image.timestamp * 1000)
            timestamp = Timestamp(coordinates=[game_time])
            watermark_msg = WatermarkMessage(timestamp)

            msg = None
            if self._camera_setup.camera_type == 'sensor.camera.rgb':
                msg = pylot.simulation.messages.FrameMessage(
                    pylot.utils.bgra_to_bgr(to_bgra_array(carla_image)),
                    timestamp)
            elif self._camera_setup.camera_type == 'sensor.camera.depth':
                # Include the transform relative to the vehicle.
                # Carla carla_image.transform returns the world transform, but
                # we do not use it directly.
                msg = pylot.simulation.messages.DepthFrameMessage(
                    depth_to_array(carla_image),
                    self._camera_setup.get_transform(), carla_image.fov,
                    timestamp)
            elif self._camera_setup.camera_type == 'sensor.camera.semantic_segmentation':
                frame = labels_to_array(carla_image)
                msg = SegmentedFrameMessage(frame, 0, timestamp)
                # Send the message containing the frame.
            self.get_output_stream(self._camera_setup.name).send(msg)
            # Note: The operator is set not to automatically propagate
            # watermark messages received on input streams. Thus, we can
            # issue watermarks only after the Carla callback is invoked.
            self.get_output_stream(self._camera_setup.name).send(watermark_msg)
Esempio n. 4
0
    def on_msg_camera_stream(self, msg, segmented_stream):
        """Camera stream callback method.
        Invoked upon the receipt of a message on the camera stream.
        """
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self._name))
        start_time = time.time()
        assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
        image = torch.from_numpy(msg.frame.frame.transpose(
            [2, 0, 1])).unsqueeze(0).float()
        image_var = Variable(image, requires_grad=False, volatile=True)

        final = self._model(image_var)[0]
        _, pred = torch.max(final, 1)

        pred = pred.cpu().data.numpy()[0]
        # After we apply the pallete, the image is in RGB format
        image_np = self._pallete[pred.squeeze()]

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self._name, msg.timestamp,
                                                     runtime))
        frame = SegmentedFrame(image_np, 'cityscapes', msg.frame.camera_setup)
        if self._flags.visualize_segmentation_output:
            frame.visualize(self._name, msg.timestamp)
        segmented_stream.send(
            SegmentedFrameMessage(frame, msg.timestamp, runtime))
    def on_msg_camera_stream(self, msg):
        """Camera stream callback method.
        Invoked upon the receipt of a message on the camera stream.
        """
        self._logger.info('{} received frame {}'.format(
            self.name, msg.timestamp))
        start_time = time.time()
        assert msg.encoding == 'BGR', 'Expects BGR frames'
        image = np.expand_dims(msg.frame.transpose([2, 0, 1]), axis=0)
        tensor = torch.tensor(image).float().cuda() / 255.0
        output = self._network(tensor)
        # XXX(ionel): Check if the model outputs Carla Cityscapes values or
        # correct Cityscapes values.
        output = transform_to_cityscapes_palette(
            torch.argmax(output, dim=1).cpu().numpy()[0])

        output = rgb_to_bgr(output)

        if self._flags.visualize_segmentation_output:
            add_timestamp(msg.timestamp, output)
            cv2.imshow(self.name, output)
            cv2.waitKey(1)

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))

        output_msg = SegmentedFrameMessage(output, runtime, msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
    def on_msg_camera_stream(self, msg):
        """Camera stream callback method.
        Invoked upon the receipt of a message on the camera stream.
        """
        self._logger.info('{} received frame {}'.format(
            self.name, msg.timestamp))
        start_time = time.time()
        assert msg.encoding == 'BGR', 'Expects BGR frames'
        image = torch.from_numpy(msg.frame.transpose([2, 0, 1
                                                      ])).unsqueeze(0).float()
        image_var = Variable(image, requires_grad=False, volatile=True)

        final = self._model(image_var)[0]
        _, pred = torch.max(final, 1)

        pred = pred.cpu().data.numpy()[0]
        image_np = self._pallete[pred.squeeze()]
        # After we apply the pallete, the image is in RGB format
        image_np = rgb_to_bgr(image_np)

        if self._flags.visualize_segmentation_output:
            add_timestamp(msg.timestamp, image_np)
            cv2.imshow(self.name, image_np)
            cv2.waitKey(1)

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))

        output_msg = SegmentedFrameMessage(image_np, runtime, msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
    def process_images(self, simulator_image):
        """ Invoked when an image is received from the simulator.

        Args:
            simulator_image: a carla.Image.
        """
        game_time = int(simulator_image.timestamp * 1000)
        timestamp = erdos.Timestamp(coordinates=[game_time])
        watermark_msg = erdos.WatermarkMessage(timestamp)
        with erdos.profile(self.config.name + '.process_images',
                           self,
                           event_data={'timestamp': str(timestamp)}):
            # Ensure that the code executes serially
            with self._lock:
                msg = None
                if self._camera_setup.camera_type == 'sensor.camera.rgb':
                    msg = FrameMessage(
                        timestamp,
                        CameraFrame.from_simulator_frame(
                            simulator_image, self._camera_setup))
                elif self._camera_setup.camera_type == 'sensor.camera.depth':
                    # Include the transform relative to the vehicle.
                    # simulator_image.transform returns the world transform,
                    # but we do not use it directly.
                    msg = DepthFrameMessage(
                        timestamp,
                        DepthFrame.from_simulator_frame(
                            simulator_image,
                            self._camera_setup,
                            save_original_frame=self._flags.
                            visualize_depth_camera))
                elif (self._camera_setup.camera_type ==
                      'sensor.camera.semantic_segmentation'):
                    msg = SegmentedFrameMessage(
                        timestamp,
                        SegmentedFrame.from_simulator_image(
                            simulator_image, self._camera_setup))

                if self._release_data:
                    self._camera_stream.send(msg)
                    self._camera_stream.send(watermark_msg)
                else:
                    # Pickle the data, and release it upon release msg receipt.
                    pickled_msg = pickle.dumps(
                        msg, protocol=pickle.HIGHEST_PROTOCOL)
                    with self._pickle_lock:
                        self._pickled_messages[msg.timestamp] = pickled_msg
                    self._notify_reading_stream.send(watermark_msg)
    def process_images(self, carla_image):
        """ Invoked when an image is received from the simulator.

        Args:
            carla_image: a carla.Image.
        """
        # Ensure that the code executes serially
        with self._lock:
            game_time = int(carla_image.timestamp * 1000)
            timestamp = erdos.Timestamp(coordinates=[game_time])
            watermark_msg = erdos.WatermarkMessage(timestamp)

            msg = None
            if self._camera_setup.camera_type == 'sensor.camera.rgb':
                msg = FrameMessage(
                    timestamp,
                    CameraFrame.from_carla_frame(carla_image,
                                                 self._camera_setup))
            elif self._camera_setup.camera_type == 'sensor.camera.depth':
                # Include the transform relative to the vehicle.
                # Carla carla_image.transform returns the world transform, but
                # we do not use it directly.
                msg = DepthFrameMessage(
                    timestamp,
                    DepthFrame.from_carla_frame(carla_image,
                                                self._camera_setup))
            elif self._camera_setup.camera_type == \
                 'sensor.camera.semantic_segmentation':
                msg = SegmentedFrameMessage(
                    timestamp,
                    SegmentedFrame.from_carla_image(carla_image,
                                                    self._camera_setup))
            # Send the message containing the frame.
            self._camera_stream.send(msg)
            # Note: The operator is set not to automatically propagate
            # watermark messages received on input streams. Thus, we can
            # issue watermarks only after the Carla callback is invoked.
            self._camera_stream.send(watermark_msg)