def on_notification(self, msg):
        game_time = msg.timestamp.coordinates[0]
        if not self._last_notification:
            self._last_notification = game_time
            return
        else:
            self._sim_interval = (game_time - self._last_notification)
            self._last_notification = game_time

        while len(self._detector_start_end_times) > 0:
            (end_time, start_time) = self._detector_start_end_times[0]
            # We can compute mAP if the endtime is not greater than the ground
            # time.
            if end_time <= game_time:
                # This is the closest ground bounding box to the end time.
                heapq.heappop(self._detector_start_end_times)
                end_bboxes = self.__get_ground_obstacles_at(end_time)
                if self._flags.detection_eval_use_accuracy_model:
                    # Not using the detector's outputs => get ground bboxes.
                    start_bboxes = self.__get_ground_obstacles_at(start_time)
                    if (len(start_bboxes) > 0 or len(end_bboxes) > 0):
                        precisions = []
                        for iou in self._iou_thresholds:
                            (precision, _) = get_precision_recall_at_iou(
                                end_bboxes, start_bboxes, iou)
                            precisions.append(precision)
                        avg_precision = (float(sum(precisions)) /
                                         len(precisions))
                        self._logger.info('precision-IoU is: {}'.format(
                            avg_precision))
                        self._csv_logger.info('{},{},{},{}'.format(
                            time_epoch_ms(), self.name, 'precision-IoU',
                            avg_precision))
                else:
                    # Get detector output obstacles.
                    det_objs = self.__get_obstacles_at(start_time)
                    if (len(det_objs) > 0 or len(end_bboxes) > 0):
                        mAP = get_pedestrian_mAP(end_bboxes, det_objs)
                        self._logger.info('mAP is: {}'.format(mAP))
                        self._csv_logger.info('{},{},{},{}'.format(
                            time_epoch_ms(), self.name, 'mAP', mAP))
                self._logger.info('Computing accuracy for {} {}'.format(
                    end_time, start_time))
            else:
                # The remaining entries require newer ground bboxes.
                break

        self.__garbage_collect_obstacles()
    def on_msg_camera_stream(self, msg):
        if self._last_seq_num + 1 != msg.timestamp.coordinates[1]:
            self._logger.error(
                'Expected msg with seq num {} but received {}'.format(
                    (self._last_seq_num + 1), msg.timestamp.coordinates[1]))
            if self._flags.fail_on_message_loss:
                assert self._last_seq_num + 1 == msg.timestamp.coordinates[1]
        self._last_seq_num = msg.timestamp.coordinates[1]

        self._logger.info('{} received frame {}'.format(
            self.name, msg.timestamp))
        start_time = time.time()
        image_np = msg.data
        results = self._detector.run(image_np)
        output = self.__get_output_bboxes(results['results'])
        if self._flags.visualize_detector_output:
            visualize_bboxes(self.name, msg.timestamp, image_np, output,
                             self._bbox_colors)

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))
        output_msg = Message((output, runtime), msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
Esempio n. 3
0
    def on_msg_camera_stream(self, msg):
        if self._last_seq_num + 1 != msg.timestamp.coordinates[1]:
            self._logger.error(
                'Expected msg with seq num {} but received {}'.format(
                    (self._last_seq_num + 1), msg.timestamp.coordinates[1]))
            if self._flags.fail_on_message_loss:
                assert self._last_seq_num + 1 == msg.timestamp.coordinates[1]
        self._last_seq_num = msg.timestamp.coordinates[1]

        self._logger.info('{} received frame {}'.format(
            self.name, msg.timestamp))
        start_time = time.time()
        # The models expect BGR images.
        image_np = msg.data
        # Expand dimensions since the model expects images to have
        # shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image_np, axis=0)
        (boxes, scores, classes, num_detections) = self._tf_session.run(
            [
                self._detection_boxes, self._detection_scores,
                self._detection_classes, self._num_detections
            ],
            feed_dict={self._image_tensor: image_np_expanded})

        num_detections = int(num_detections[0])
        classes = classes[0][:num_detections]
        labels = [self._coco_labels[label] for label in classes]
        boxes = boxes[0][:num_detections]
        scores = scores[0][:num_detections]

        self._logger.info('Object boxes {}'.format(boxes))
        self._logger.info('Object scores {}'.format(scores))
        self._logger.info('Object labels {}'.format(labels))

        index = 0
        output = []
        im_width = image_np.shape[1]
        im_height = image_np.shape[0]

        while index < len(boxes) and index < len(scores):
            if scores[index] >= self._flags.detector_min_score_threshold:
                ymin = int(boxes[index][0] * im_height)
                xmin = int(boxes[index][1] * im_width)
                ymax = int(boxes[index][2] * im_height)
                xmax = int(boxes[index][3] * im_width)
                corners = (xmin, xmax, ymin, ymax)
                output.append((corners, scores[index], labels[index]))
            index += 1

        if self._flags.visualize_detector_output:
            visualize_bboxes(self.name, msg.timestamp, image_np, output,
                             self._bbox_colors)

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))
        output_msg = Message((output, runtime), msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
    def on_msg_camera_stream(self, msg):
        """Camera stream callback method.
        Invoked upon the receipt of a message on the camera stream.
        """
        self._logger.info('{} received frame {}'.format(
            self.name, msg.timestamp))
        start_time = time.time()
        assert msg.encoding == 'BGR', 'Expects BGR frames'
        image = np.expand_dims(msg.frame.transpose([2, 0, 1]), axis=0)
        tensor = torch.tensor(image).float().cuda() / 255.0
        output = self._network(tensor)
        # XXX(ionel): Check if the model outputs Carla Cityscapes values or
        # correct Cityscapes values.
        output = transform_to_cityscapes_palette(
            torch.argmax(output, dim=1).cpu().numpy()[0])

        output = rgb_to_bgr(output)

        if self._flags.visualize_segmentation_output:
            add_timestamp(msg.timestamp, output)
            cv2.imshow(self.name, output)
            cv2.waitKey(1)

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))

        output_msg = SegmentedFrameMessage(output, runtime, msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
Esempio n. 5
0
    def on_frame_msg(self, msg):
        # Ensure that we're not missing messages.
        if self._last_seq_num + 1 != msg.timestamp.coordinates[1]:
            self._logger.error(
                'Expected msg with seq num {} but received {}'.format(
                    (self._last_seq_num + 1), msg.timestamp.coordinates[1]))
            if self._flags.fail_on_message_loss:
                assert self._last_seq_num + 1 == msg.timestamp.coordinates[1]
        self._last_seq_num = msg.timestamp.coordinates[1]

        self._lock.acquire()
        start_time = time.time()
        frame = msg.data
        # Store frames so that they can be re-processed once we receive the
        # next update from the detector.
        self._to_process.append((msg.timestamp, frame))
        # Track if we have a tracker ready to accept new frames.
        if self._ready_to_update:
            self.__track_bboxes_on_frame(frame, msg.timestamp, False)
        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))
        self._lock.release()
Esempio n. 6
0
    def publish_world_data(self):
        read_start_time = time.time()
        measurements, sensor_data = self.client.read_data()
        measure_time = time.time()

        self._logger.info(
            'Got readings for game time {} and platform time {}'.format(
                measurements.game_timestamp, measurements.platform_timestamp))

        timestamp = Timestamp(coordinates=[measurements.game_timestamp])
        watermark = WatermarkMessage(timestamp)

        # Send player data on data streams.
        self.__send_player_data(measurements.player_measurements, timestamp,
                                watermark)
        # Extract agent data from measurements.
        agents = self.__get_ground_agents(measurements)
        # Send agent data on data streams.
        self.__send_ground_agent_data(agents, timestamp, watermark)
        # Send sensor data on data stream.
        self.__send_sensor_data(sensor_data, timestamp, watermark)
        # Get Autopilot control data.
        if self._auto_pilot:
            self.control = measurements.player_measurements.autopilot_control
        end_time = time.time()
        measurement_runtime = (measure_time - read_start_time) * 1000
        total_runtime = (end_time - read_start_time) * 1000
        self._logger.info('Carla measurement time {}; total time {}'.format(
            measurement_runtime, total_runtime))
        self._csv_logger.info('{},{},{},{}'.format(time_epoch_ms(), self.name,
                                                   measurement_runtime,
                                                   total_runtime))
Esempio n. 7
0
    def eval_depth(self, msg):
        start_time = time.time()

        imgL = self._left_imgs.popleft()
        imgR = self._right_imgs.popleft()

        cudnn.benchmark = False
        self._model.eval()
        imgL = imgL.float().cuda().unsqueeze(0)
        imgR = imgR.float().cuda().unsqueeze(0)
        with torch.no_grad():
            outputs = self._model(imgL, imgR)
            output = torch.squeeze(outputs[2], 1)
        output = output.squeeze().cpu().numpy()
        # Process the output (disparity) to depth, model-dependent
        depth = preprocess.disp2depth(output)
        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))

        if self._flags.visualize_depth_est:
            plt.imshow(output, cmap='viridis')
            plt.show()

        output_msg = DepthFrameMessage(depth, self._transform, 90,
                                       msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
    def on_msg_camera_stream(self, msg):
        if self._last_seq_num + 1 != msg.timestamp.coordinates[1]:
            self._logger.error(
                'Expected msg with seq num {} but received {}'.format(
                    (self._last_seq_num + 1), msg.timestamp.coordinates[1]))
            if self._flags.fail_on_message_loss:
                assert self._last_seq_num + 1 == msg.timestamp.coordinates[1]
        self._last_seq_num = msg.timestamp.coordinates[1]

        self._logger.info('%s received frame %s', self.name, msg.timestamp)
        start_time = time.time()
        image = bgra_to_bgr(msg.data)
        image = np.expand_dims(image.transpose([2, 0, 1]), axis=0)
        tensor = torch.tensor(image).float().cuda() / 255.0
        output = self._network(tensor)
        # XXX(ionel): Check if the model outputs Carla Cityscapes values or
        # correct Cityscapes values.
        output = transfrom_to_cityscapes(
            torch.argmax(output, dim=1).cpu().numpy()[0])

        output = rgb_to_bgr(output)

        if self._flags.visualize_segmentation_output:
            add_timestamp(msg.timestamp, output)
            cv2.imshow(self.name, output)
            cv2.waitKey(1)

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))

        output_msg = Message((output, runtime), msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
    def on_msg_camera_stream(self, msg):
        """Camera stream callback method.
        Invoked upon the receipt of a message on the camera stream.
        """
        self._logger.info('{} received frame {}'.format(
            self.name, msg.timestamp))
        start_time = time.time()
        assert msg.encoding == 'BGR', 'Expects BGR frames'
        image = torch.from_numpy(msg.frame.transpose([2, 0, 1
                                                      ])).unsqueeze(0).float()
        image_var = Variable(image, requires_grad=False, volatile=True)

        final = self._model(image_var)[0]
        _, pred = torch.max(final, 1)

        pred = pred.cpu().data.numpy()[0]
        image_np = self._pallete[pred.squeeze()]
        # After we apply the pallete, the image is in RGB format
        image_np = rgb_to_bgr(image_np)

        if self._flags.visualize_segmentation_output:
            add_timestamp(msg.timestamp, image_np)
            cv2.imshow(self.name, image_np)
            cv2.waitKey(1)

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))

        output_msg = SegmentedFrameMessage(image_np, runtime, msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
Esempio n. 10
0
    def on_msg_camera_stream(self, msg):
        if self._last_seq_num + 1 != msg.timestamp.coordinates[1]:
            self._logger.error(
                'Expected msg with seq num {} but received {}'.format(
                    (self._last_seq_num + 1), msg.timestamp.coordinates[1]))
            if self._flags.fail_on_message_loss:
                assert self._last_seq_num + 1 == msg.timestamp.coordinates[1]
        self._last_seq_num = msg.timestamp.coordinates[1]

        start_time = time.time()
        image_np = bgra_to_bgr(msg.data)

        # TODO(ionel): Implement lane detection.

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))

        if self._flags.visualize_lane_detection:
            add_timestamp(msg.timestamp, image_np)
            cv2.imshow(self.name, image_np)
            cv2.waitKey(1)

        output_msg = Message(image_np, msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
Esempio n. 11
0
 def on_vehicle_pos_update(self, msg):
     start_time = time.time()
     payload = self.get_waypoints(msg.data)
     runtime = (time.time() - start_time) * 1000
     self._csv_logger.info('{},{},{}'.format(time_epoch_ms(), self.name,
                                             runtime))
     self.get_output_stream('waypoints').send(
         Message(payload, msg.timestamp))
Esempio n. 12
0
 def __compute_mean_iou(self, ground_frame, segmented_frame):
     (mean_iou, class_iou) = compute_semantic_iou(ground_frame,
                                                  segmented_frame)
     self._logger.info('IoU class scores: {}'.format(class_iou))
     self._logger.info('mean IoU score: {}'.format(mean_iou))
     self._csv_logger.info('{},{},{},{}'.format(
         time_epoch_ms(), self.name, self._flags.eval_segmentation_metric,
         mean_iou))
    def on_ground_segmented_frame(self, msg):
        start_time = time.time()
        # We don't fully transform it to cityscapes palette to avoid
        # introducing extra latency.
        frame_masks = generate_masks(msg.frame)

        if len(self._ground_masks) > 0:
            if self._time_delta is None:
                self._time_delta = (msg.timestamp.coordinates[0] -
                                    self._ground_masks[0][0])
            else:
                # Check that no frames got dropped.
                recv_time_delta = (msg.timestamp.coordinates[0] -
                                   self._ground_masks[-1][0])
                assert self._time_delta == recv_time_delta

            # Pop the oldest frame if it's older than the max latency
            # we're interested in.
            if (msg.timestamp.coordinates[0] - self._ground_masks[0][0] >
                    self._flags.eval_ground_truth_max_latency):
                self._ground_masks.popleft()

            cur_time = time_epoch_ms()
            for timestamp, ground_masks in self._ground_masks:
                (mean_iou, class_iou) = compute_semantic_iou_from_masks(
                    ground_masks, frame_masks)
                time_diff = msg.timestamp.coordinates[0] - timestamp
                self._logger.info(
                    'Segmentation ground latency {} ; mean IoU {}'.format(
                        time_diff, mean_iou))
                self._csv_logger.info('{},{},mIoU,{},{}'.format(
                    cur_time, self.name, time_diff, mean_iou))
                pedestrian_key = 4
                if pedestrian_key in class_iou:
                    self._logger.info(
                        'Segmentation ground latency {} ; pedestrian IoU {}'.
                        format(time_diff, class_iou[pedestrian_key]))
                    self._csv_logger.info('{},{},pedestrianIoU,{},{}'.format(
                        cur_time, self.name, time_diff,
                        class_iou[pedestrian_key]))

                vehicle_key = 10
                if vehicle_key in class_iou:
                    self._logger.info(
                        'Segmentation ground latency {} ; vehicle IoU {}'.
                        format(time_diff, class_iou[vehicle_key]))
                    self._csv_logger.info('{},{},vehicleIoU,{},{}'.format(
                        cur_time, self.name, time_diff,
                        class_iou[vehicle_key]))

        # Append the processed image to the buffer.
        self._ground_masks.append((msg.timestamp.coordinates[0], frame_masks))

        runtime = (time.time() - start_time) * 1000
        self._logger.info(
            'Segmentation eval ground runtime {}'.format(runtime))
Esempio n. 14
0
    def on_msg_camera_stream(self, msg):
        """ Invoked when the operator receives a message on the data stream."""
        self._logger.info('{} received frame {}'.format(
            self.name, msg.timestamp))
        start_time = time.time()
        # The models expect BGR images.
        assert msg.encoding == 'BGR', 'Expects BGR frames'
        image_np = msg.frame
        # Expand dimensions since the model expects images to have
        # shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image_np, axis=0)
        (boxes, scores, classes, num_detections) = self._tf_session.run(
            [
                self._detection_boxes, self._detection_scores,
                self._detection_classes, self._num_detections
            ],
            feed_dict={self._image_tensor: image_np_expanded})

        num_detections = int(num_detections[0])
        res_classes = classes[0][:num_detections]
        res_boxes = boxes[0][:num_detections]
        res_scores = scores[0][:num_detections]

        # TODO(ionel): BIG HACK TO FILTER OUT UNKNOWN CLASSES!
        boxes = []
        scores = []
        labels = []
        for i in range(0, num_detections):
            if res_classes[i] in self._coco_labels:
                labels.append(self._coco_labels[res_classes[i]])
                boxes.append(res_boxes[i])
                scores.append(res_scores[i])

        detected_objects = self.__convert_to_detected_objs(
            boxes, scores, labels, msg.height, msg.width)
        self._logger.info('Detected objects: {}'.format(detected_objects))

        if (self._flags.visualize_detector_output
                or self._flags.log_detector_output):
            annotate_image_with_bboxes(msg.timestamp, image_np,
                                       detected_objects, self._bbox_colors)
            if self._flags.visualize_detector_output:
                visualize_image(self.name, image_np)
            if self._flags.log_detector_output:
                save_image(bgr_to_rgb(image_np), msg.timestamp,
                           self._flags.data_path,
                           'detector-{}'.format(self.name))

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))
        output_msg = DetectorMessage(detected_objects, runtime, msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
Esempio n. 15
0
 def __compute_mean_iou(self, ground_frame, segmented_frame):
     # Transfrom the ground frame to Cityscapes palette; the segmented
     # frame is transformed by segmentation operators.
     ground_frame = labels_to_cityscapes_palette(ground_frame)
     (mean_iou, class_iou) = compute_semantic_iou(ground_frame,
                                                  segmented_frame)
     self._logger.info('IoU class scores: {}'.format(class_iou))
     self._logger.info('mean IoU score: {}'.format(mean_iou))
     self._csv_logger.info('{},{},{},{}'.format(
         time_epoch_ms(), self.name, self._flags.eval_segmentation_metric,
         mean_iou))
Esempio n. 16
0
    def on_msg_camera_stream(self, msg):
        start_time = time.time()
        assert msg.encoding == 'BGR', 'Expects BGR frames'
        image_np = msg.frame

        # TODO(ionel): Implement lane detection.
        edges = self.apply_canny(image_np)
        lines_edges = self.apply_hough(image_np, edges)

        kernel_size = 3
        grad_x = self.apply_sobel(image_np,
                                  orient='x',
                                  sobel_kernel=kernel_size,
                                  thresh_min=0,
                                  thresh_max=255)
        grad_y = self.apply_sobel(image_np,
                                  orient='y',
                                  sobel_kernel=kernel_size,
                                  thresh_min=0,
                                  thresh_max=255)
        mag_binary = self.magnitude_threshold(image_np,
                                              sobel_kernel=kernel_size,
                                              thresh_min=0,
                                              thresh_max=255)
        dir_binary = self.direction_threshold(image_np,
                                              sobel_kernel=kernel_size,
                                              thresh_min=0,
                                              thresh_max=np.pi / 2)

        s_binary = self.extract_s_channel(image_np)

        # Select the pixels where both x and y gradients meet the threshold
        # criteria, or the gradient magnitude and direction are both with
        # the threshold values.
        combined = np.zeros_like(dir_binary)
        combined[((grad_x == 1) & (grad_y == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1

        combined_binary = np.zeros_like(grad_x)
        combined_binary[(s_binary == 1) | (grad_x == 1)] = 1

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(
            time_epoch_ms(), self.name, msg.timestamp, runtime))

        if self._flags.visualize_lane_detection:
            add_timestamp(msg.timestamp, lines_edges)
            cv2.imshow(self.name, lines_edges)
            cv2.waitKey(1)

        output_msg = Message(image_np, msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
Esempio n. 17
0
 def on_can_bus_update(self, msg):
     start_time = time.time()
     (wp_angle, wp_vector, wp_angle_speed, wp_vector_speed) = self.get_waypoints(msg.data.transform)
     runtime = (time.time() - start_time) * 1000
     self._csv_logger.info('{},{},{}'.format(
         time_epoch_ms(), self.name, runtime))
     output_msg = WaypointsMessage(
         msg.timestamp,
         wp_angle=wp_angle,
         wp_vector=wp_vector,
         wp_angle_speed=wp_angle_speed,
         wp_vector_speed=wp_vector_speed)
     self.get_output_stream('waypoints').send(output_msg)
Esempio n. 18
0
 def on_frame_msg(self, msg):
     """ Invoked when a FrameMessage is received on the camera stream."""
     self._lock.acquire()
     start_time = time.time()
     assert msg.encoding == 'BGR', 'Expects BGR frames'
     frame = msg.frame
     # Store frames so that they can be re-processed once we receive the
     # next update from the detector.
     self._to_process.append((msg.timestamp, frame))
     # Track if we have a tracker ready to accept new frames.
     if self._ready_to_update:
         self.__track_bboxes_on_frame(frame, msg.timestamp, False)
     # Get runtime in ms.
     runtime = (time.time() - start_time) * 1000
     self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                  self.name, msg.timestamp,
                                                  runtime))
     self._lock.release()
    def on_msg_camera_stream(self, msg):
        self._logger.info('{} received frame {}'.format(
            self.name, msg.timestamp))
        start_time = time.time()
        assert msg.encoding == 'BGR', 'Expects BGR frames'
        image_np = msg.frame
        results = self._detector.run(image_np)
        detected_objs = self.__get_output_bboxes(results['results'])
        self._logger.info('Detected objects: {}'.format(detected_objs))
        if self._flags.visualize_detector_output:
            visualize_bboxes(self.name, msg.timestamp, image_np, detected_objs,
                             self._bbox_colors)

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))
        output_msg = DetectorMessage(detected_objs, runtime, msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
Esempio n. 20
0
    def on_msg_camera_stream(self, msg):
        """Camera stream callback method.
        Invoked upon the receipt of a message on the camera stream.
        """
        if self._last_seq_num + 1 != msg.timestamp.coordinates[1]:
            self._logger.error(
                'Expected msg with seq num {} but received {}'.format(
                    (self._last_seq_num + 1), msg.timestamp.coordinates[1]))
            if self._flags.fail_on_message_loss:
                assert self._last_seq_num + 1 == msg.timestamp.coordinates[1]
        self._last_seq_num = msg.timestamp.coordinates[1]

        self._logger.info('{} received frame {}'.format(
            self.name, msg.timestamp))
        start_time = time.time()
        image = bgra_to_bgr(msg.data)
        image = torch.from_numpy(image.transpose([2, 0,
                                                  1])).unsqueeze(0).float()
        image_var = Variable(image, requires_grad=False, volatile=True)

        final = self._model(image_var)[0]
        _, pred = torch.max(final, 1)

        pred = pred.cpu().data.numpy()[0]
        image_np = self._pallete[pred.squeeze()]
        # After we apply the pallete, the image is in RGB format
        image_np = rgb_to_bgr(image_np)

        if self._flags.visualize_segmentation_output:
            add_timestamp(msg.timestamp, image_np)
            cv2.imshow(self.name, image_np)
            cv2.waitKey(1)

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, msg.timestamp,
                                                     runtime))

        output_msg = Message((image_np, runtime), msg.timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
Esempio n. 21
0
    def fuse(self):
        # Return if we don't have car position, distances or objects.
        start_time = time.time()
        if min(map(
                len,
            [self._car_positions, self._distances, self._objects])) == 0:
            return
        self.__discard_old_data()
        object_positions = self.__calc_object_positions(
            self._objects[0][1], self._distances[0][1],
            self._car_positions[0][1][0],
            np.arccos(self._car_positions[0][1][1][0]))
        timestamp = self._objects[0][0]

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},{}'.format(time_epoch_ms(), self.name,
                                                runtime))

        output_msg = Message(object_positions, timestamp)
        self.get_output_stream(self._output_stream_name).send(output_msg)
Esempio n. 22
0
    def on_ground_obstacles(self, msg):
        # Ignore the first several seconds of the simulation because the car is
        # not moving at the beginning.
        game_time = msg.timestamp.coordinates[0]
        bboxes = []
        # Select the pedestrian bounding boxes.
        for det_obj in msg.detected_objects:
            if det_obj.label == 'pedestrian':
                bboxes.append(det_obj.corners)

        # Remove the buffered bboxes that are too old.
        while (len(self._ground_bboxes) > 0
               and game_time - self._ground_bboxes[0][0] >
               self._flags.eval_ground_truth_max_latency):
            self._ground_bboxes.popleft()

        for (old_game_time, old_bboxes) in self._ground_bboxes:
            # Ideally, we would like to take multiple precision values at
            # different recalls and average them, but we can't vary model
            # confidence, so we just return the actual precision.
            if (len(bboxes) > 0 or len(old_bboxes) > 0):
                latency = game_time - old_game_time
                precisions = []
                for iou in self._iou_thresholds:
                    (precision,
                     _) = get_precision_recall_at_iou(bboxes, old_bboxes, iou)
                    precisions.append(precision)
                self._logger.info("Precision {}".format(precisions))
                avg_precision = float(sum(precisions)) / len(precisions)
                self._logger.info(
                    "The latency is {} and the average precision is {}".format(
                        latency, avg_precision))
                self._csv_logger.info('{},{},{},{}'.format(
                    time_epoch_ms(), self.name, latency, avg_precision))

        # Buffer the new bounding boxes.
        self._ground_bboxes.append((game_time, bboxes))
Esempio n. 23
0
    def read_carla_data(self):
        read_start_time = time.time()
        measurements, sensor_data = self.client.read_data()
        measure_time = time.time()

        self._logger.info(
            'Got readings for game time {} and platform time {}'.format(
                measurements.game_timestamp, measurements.platform_timestamp))

        # Send measurements
        player_measurements = measurements.player_measurements
        vehicle_pos = ((player_measurements.transform.location.x,
                        player_measurements.transform.location.y,
                        player_measurements.transform.location.z),
                       (player_measurements.transform.orientation.x,
                        player_measurements.transform.orientation.y,
                        player_measurements.transform.orientation.z))

        world_transform = Transform(player_measurements.transform)

        timestamp = Timestamp(
            coordinates=[measurements.game_timestamp, self.message_num])
        self.message_num += 1
        ray.register_custom_serializer(Message, use_pickle=True)
        ray.register_custom_serializer(WatermarkMessage, use_pickle=True)
        watermark = WatermarkMessage(timestamp)
        self.get_output_stream('world_transform').send(
            Message(world_transform, timestamp))
        self.get_output_stream('world_transform').send(watermark)
        self.get_output_stream('vehicle_pos').send(
            Message(vehicle_pos, timestamp))
        self.get_output_stream('vehicle_pos').send(watermark)
        acceleration = (player_measurements.acceleration.x,
                        player_measurements.acceleration.y,
                        player_measurements.acceleration.z)
        self.get_output_stream('acceleration').send(
            Message(acceleration, timestamp))
        self.get_output_stream('acceleration').send(watermark)
        self.get_output_stream('forward_speed').send(
            Message(player_measurements.forward_speed, timestamp))
        self.get_output_stream('forward_speed').send(watermark)
        self.get_output_stream('vehicle_collisions').send(
            Message(player_measurements.collision_vehicles, timestamp))
        self.get_output_stream('vehicle_collisions').send(watermark)
        self.get_output_stream('pedestrian_collisions').send(
            Message(player_measurements.collision_pedestrians, timestamp))
        self.get_output_stream('pedestrian_collisions').send(watermark)
        self.get_output_stream('other_collisions').send(
            Message(player_measurements.collision_other, timestamp))
        self.get_output_stream('other_collisions').send(watermark)
        self.get_output_stream('other_lane').send(
            Message(player_measurements.intersection_otherlane, timestamp))
        self.get_output_stream('other_lane').send(watermark)
        self.get_output_stream('offroad').send(
            Message(player_measurements.intersection_offroad, timestamp))
        self.get_output_stream('offroad').send(watermark)

        vehicles = []
        pedestrians = []
        traffic_lights = []
        speed_limit_signs = []

        for agent in measurements.non_player_agents:
            if agent.HasField('vehicle'):
                pos = messages.Transform(agent.vehicle.transform)
                bb = messages.BoundingBox(agent.vehicle.bounding_box)
                forward_speed = agent.vehicle.forward_speed
                vehicle = messages.Vehicle(pos, bb, forward_speed)
                vehicles.append(vehicle)
            elif agent.HasField('pedestrian'):
                if not self.agent_id_map.get(agent.id):
                    self.pedestrian_count += 1
                    self.agent_id_map[agent.id] = self.pedestrian_count

                pedestrian_index = self.agent_id_map[agent.id]
                pos = messages.Transform(agent.pedestrian.transform)
                bb = messages.BoundingBox(agent.pedestrian.bounding_box)
                forward_speed = agent.pedestrian.forward_speed
                pedestrian = messages.Pedestrian(pedestrian_index, pos, bb,
                                                 forward_speed)
                pedestrians.append(pedestrian)
            elif agent.HasField('traffic_light'):
                transform = messages.Transform(agent.traffic_light.transform)
                traffic_light = messages.TrafficLight(
                    transform, agent.traffic_light.state)
                traffic_lights.append(traffic_light)
            elif agent.HasField('speed_limit_sign'):
                transform = messages.Transform(
                    agent.speed_limit_sign.transform)
                speed_sign = messages.SpeedLimitSign(
                    transform, agent.speed_limit_sign.speed_limit)
                speed_limit_signs.append(speed_sign)

        vehicles_msg = Message(vehicles, timestamp)
        self.get_output_stream('vehicles').send(vehicles_msg)
        self.get_output_stream('vehicles').send(watermark)
        pedestrians_msg = Message(pedestrians, timestamp)
        self.get_output_stream('pedestrians').send(pedestrians_msg)
        self.get_output_stream('pedestrians').send(watermark)
        traffic_lights_msg = Message(traffic_lights, timestamp)
        self.get_output_stream('traffic_lights').send(traffic_lights_msg)
        self.get_output_stream('traffic_lights').send(watermark)
        traffic_sings_msg = Message(speed_limit_signs, timestamp)
        self.get_output_stream('traffic_signs').send(traffic_sings_msg)
        self.get_output_stream('traffic_signs').send(watermark)

        # Send sensor data
        for name, measurement in sensor_data.items():
            data_stream = self.get_output_stream(name)
            if data_stream.get_label('camera_type') == 'SceneFinal':
                # Transform the Carla RGB images to BGR.
                data_stream.send(
                    Message(
                        pylot_utils.bgra_to_bgr(to_bgra_array(measurement)),
                        timestamp))
            else:
                data_stream.send(Message(measurement, timestamp))
            data_stream.send(watermark)

        self.client.send_control(**self.control)
        end_time = time.time()

        measurement_runtime = (measure_time - read_start_time) * 1000
        total_runtime = (end_time - read_start_time) * 1000
        self._logger.info('Carla measurement time {}; total time {}'.format(
            measurement_runtime, total_runtime))
        self._csv_logger.info('{},{},{},{}'.format(time_epoch_ms(), self.name,
                                                   measurement_runtime,
                                                   total_runtime))
Esempio n. 24
0
    def on_notification(self, msg):
        # Check that we didn't skip any notification. We only skip
        # notifications if messages or watermarks are lost.
        if self._last_notification != -1:
            assert self._last_notification + 1 == msg.timestamp.coordinates[1]
        self._last_notification = msg.timestamp.coordinates[1]

        # Ignore the first two messages. We use them to get sim time
        # between frames.
        if self._last_notification < 2:
            if self._last_notification == 0:
                self._sim_interval = int(msg.timestamp.coordinates[0])
            elif self._last_notification == 1:
                # Set he real simulation interval.
                self._sim_interval = int(
                    msg.timestamp.coordinates[0]) - self._sim_interval
            return

        game_time = msg.timestamp.coordinates[0]
        # Transform the 3D boxes at time watermark game time to 2D.
        (ped_bboxes, vec_bboxes) = self.__get_bboxes()
        # Add the pedestrians to the ground obstacles buffer.
        self._ground_obstacles.append((game_time, ped_bboxes))

        while len(self._detector_start_end_times) > 0:
            (end_time, start_time) = self._detector_start_end_times[0]
            # We can compute mAP if the endtime is not greater than the ground time.
            if end_time <= game_time:
                # This is the closest ground bounding box to the end time.
                heapq.heappop(self._detector_start_end_times)
                end_bboxes = self.__get_ground_obstacles_at(end_time)
                if self._flags.detection_eval_use_accuracy_model:
                    # Not using the detector's outputs => get ground bboxes.
                    start_bboxes = self.__get_ground_obstacles_at(start_time)
                    if (len(start_bboxes) > 0 or len(end_bboxes) > 0):
                        precisions = []
                        for iou in self._iou_thresholds:
                            (precision, _) = get_precision_recall_at_iou(
                                end_bboxes, start_bboxes, iou)
                            precisions.append(precision)
                        avg_precision = float(
                            sum(precisions)) / len(precisions)
                        self._logger.info(
                            'precision-IoU is: {}'.format(avg_precision))
                        self._csv_logger.info('{},{},{},{}'.format(
                            time_epoch_ms(), self.name, 'precision-IoU',
                            avg_precision))
                else:
                    # Get detector output obstacles.
                    det_output = self.__get_obstacles_at(start_time)
                    if (len(det_output) > 0 or len(end_bboxes) > 0):
                        mAP = detection_utils.get_pedestrian_mAP(
                            end_bboxes, det_output)
                        self._logger.info('mAP is: {}'.format(mAP))
                        self._csv_logger.info('{},{},{},{}'.format(
                            time_epoch_ms(), self.name, 'mAP', mAP))
                self._logger.info('Computing accuracy for {} {}'.format(
                    end_time, start_time))
            else:
                # The remaining entries require newer ground bboxes.
                break

        self.__garbage_collect_obstacles()
    def on_notification(self, msg):
        # Check that we didn't skip any notification. We only skip
        # notifications if messages or watermarks are lost.
        if self._last_notification != -1:
            assert self._last_notification + 1 == msg.timestamp.coordinates[1]
        self._last_notification = msg.timestamp.coordinates[1]

        # Ignore the first several seconds of the simulation because the car is
        # not moving at the beginning.
        if msg.timestamp.coordinates[
                0] <= self._flags.eval_ground_truth_ignore_first:
            return

        # Get the data from the start of all the queues and make sure that
        # we did not miss any data.
        depth_msg = self._depth_imgs.popleft()
        bgr_msg = self._bgr_imgs.popleft()
        world_trans_msg = self._world_transforms.popleft()
        pedestrians_msg = self._pedestrians.popleft()

        self._logger.info('Timestamps {} {} {} {}'.format(
            depth_msg.timestamp, bgr_msg.timestamp, world_trans_msg.timestamp,
            pedestrians_msg.timestamp))

        assert (depth_msg.timestamp == bgr_msg.timestamp ==
                world_trans_msg.timestamp == pedestrians_msg.timestamp)

        depth_array = depth_to_array(depth_msg.data)
        world_transform = world_trans_msg.data

        bboxes = []
        self._logger.info('Number of pedestrians {}'.format(
            len(pedestrians_msg.data)))
        for (pedestrian_index, obj_transform, obj_bbox,
             _) in pedestrians_msg.data:
            bbox = get_2d_bbox_from_3d_box(depth_array, world_transform,
                                           obj_transform, obj_bbox,
                                           self._rgb_transform,
                                           self._rgb_intrinsic,
                                           self._rgb_img_size, 1.5, 3.0)
            if bbox is not None:
                bboxes.append(bbox)

        # Remove the buffered bboxes that are too old.
        while (len(self._ground_bboxes) > 0 and msg.timestamp.coordinates[0] -
               self._ground_bboxes[0][0].coordinates[0] >
               self._flags.eval_ground_truth_max_latency):
            self._ground_bboxes.popleft()

        for (old_timestamp, old_bboxes) in self._ground_bboxes:
            # Ideally, you would like to take multiple precision values at different
            # recalls and average them, but we can't vary model confidence, so we just
            # return the actual precision.
            if (len(bboxes) > 0 or len(old_bboxes) > 0):
                latency = msg.timestamp.coordinates[
                    0] - old_timestamp.coordinates[0]
                precisions = []
                for iou in self._iou_thresholds:
                    (precision,
                     _) = get_precision_recall_at_iou(bboxes, old_bboxes, iou)
                    precisions.append(precision)
                self._logger.info("Precision {}".format(precisions))
                avg_precision = float(sum(precisions)) / len(precisions)
                self._logger.info(
                    "The latency is {} and the average precision is {}".format(
                        latency, avg_precision))
                self._csv_logger.info('{},{},{},{}'.format(
                    time_epoch_ms(), self.name, latency, avg_precision))

        # Buffer the new bounding boxes.
        self._ground_bboxes.append((msg.timestamp, bboxes))
Esempio n. 26
0
    def compute_command(self, can_bus_msg, waypoint_msg, tl_msg, obstacles_msg,
                        pc_msg, depth_msg, timestamp):
        start_time = time.time()
        vehicle_transform = can_bus_msg.data.transform
        vehicle_speed = can_bus_msg.data.forward_speed
        wp_angle = waypoint_msg.wp_angle
        wp_vector = waypoint_msg.wp_vector
        wp_angle_speed = waypoint_msg.wp_angle_speed
        target_speed = waypoint_msg.target_speed
        # Transform point cloud to camera coordinates.
        point_cloud = None
        if pc_msg:
            point_cloud = pylot.simulation.utils.lidar_point_cloud_to_camera_coordinates(
                pc_msg.point_cloud)
        depth_frame = None
        if depth_msg:
            depth_frame = depth_msg.frame

        traffic_lights = self.__transform_tl_output(tl_msg, vehicle_transform,
                                                    point_cloud, depth_frame)
        game_time = timestamp.coordinates[0]
        if len(traffic_lights) > 0:
            self._last_traffic_light_game_time = game_time
        (pedestrians,
         vehicles) = self.__transform_detector_output(obstacles_msg,
                                                      vehicle_transform,
                                                      point_cloud, depth_frame)

        # if self._map.is_on_opposite_lane(vehicle_transform):
        #     # Ignore obstacles
        #     self._logger.info('Ego-vehicle {} on opposite lange'.format(
        #         vehicle_transform))
        #     pedestrians = []
        #     vehicles = []
        #     traffic_lights = []

        self._logger.info('{} Current speed {} and location {}'.format(
            timestamp, vehicle_speed, vehicle_transform))
        self._logger.info('{} Pedestrians {}'.format(timestamp, pedestrians))
        self._logger.info('{} Vehicles {}'.format(timestamp, vehicles))

        speed_factor, _ = self.__stop_for_agents(vehicle_transform, wp_angle,
                                                 wp_vector, vehicles,
                                                 pedestrians, traffic_lights,
                                                 timestamp)

        new_target_speed = self.reduce_speed_when_approaching_intersection(
            vehicle_transform, vehicle_speed, target_speed, game_time)
        if new_target_speed != target_speed:
            self._logger.info(
                'Proximity to intersection, reducing speed from {} to {}'.
                format(target_speed, new_target_speed))
            target_speed = new_target_speed

        self._logger.info('{} Current speed factor {}'.format(
            timestamp, speed_factor))

        control_msg = self.get_control_message(wp_angle, wp_angle_speed,
                                               speed_factor, vehicle_speed,
                                               target_speed, timestamp)

        if control_msg.throttle > 0.001:
            self._last_moving_time = game_time
            self._num_control_override = 0

        if self._num_control_override > 0:
            self._num_control_override -= 1
            control_msg.throttle = 0.75

        # Might be stuck because of a faulty detector.
        # Override control message if we haven't been moving for a while.
        if game_time - self._last_moving_time > 30000:
            self._num_control_override = 6
            control_msg = ControlMessage(0, 0.75, 0, False, False, timestamp)

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self.name, timestamp,
                                                     runtime))

        self.get_output_stream('control_stream').send(control_msg)