コード例 #1
0
    def on_imu_update(self, msg):
        """ Invoked upon receipt of an IMU sensor update.

        Logs the lateral and longitudinal acceleration and jerk.

        Args:
            msg (:py:class:`pylot.perception.messages.IMUMessage`): The IMU
                message sent by the sensor.
        """
        sim_time = msg.timestamp.coordinates[0]
        lateral_acc, longitudinal_acc = msg.acceleration.y, msg.acceleration.x
        # Log the lateral and the longitudinal acceleration.
        self._csv_logger.info('{},{},acceleration,lateral,{:.4f}'.format(
            time_epoch_ms(), sim_time, lateral_acc))
        self._csv_logger.info('{},{},acceleration,longitudinal,{:.4f}'.format(
            time_epoch_ms(), sim_time, longitudinal_acc))

        # Calculate the jerk, and log both lateral and longitudinal jerk.
        if self._last_timestamp:
            time_diff_sec = (msg.timestamp.coordinates[0] -
                             self._last_timestamp.coordinates[0]) / 1000.0
            lateral_jerk = (self._last_lateral_acc -
                            lateral_acc) / time_diff_sec
            longitudinal_jerk = (self._last_longitudinal_acc -
                                 longitudinal_acc) / time_diff_sec
            self._csv_logger.info('{},{},jerk,lateral,{:.4f}'.format(
                time_epoch_ms(), sim_time, lateral_jerk))
            self._csv_logger.info('{},{},jerk,longitudinal,{:.4f}'.format(
                time_epoch_ms(), sim_time, longitudinal_jerk))

        # Save the new acceleration and timestamp.
        self._last_lateral_acc = lateral_acc
        self._last_longitudinal_acc = longitudinal_acc
        self._last_timestamp = msg.timestamp
コード例 #2
0
    def on_watermark(self, timestamp):
        """Invoked when all input streams have received a watermark.

        Args:
            timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
                the watermark.
        """
        assert len(timestamp.coordinates) == 1
        op_start_time = time.time()
        game_time = timestamp.coordinates[0]
        if not self._last_notification:
            self._last_notification = game_time
            return
        else:
            self._sim_interval = (game_time - self._last_notification)
            self._last_notification = game_time

        while len(self._tracker_start_end_times) > 0:
            (end_time, start_time) = self._tracker_start_end_times[0]
            # We can compute tracker metrics if the endtime is not greater than
            # the ground time.
            if end_time <= game_time:
                # This is the closest ground bounding box to the end time.
                heapq.heappop(self._tracker_start_end_times)
                ground_obstacles = self.__get_ground_obstacles_at(end_time)
                # Get tracker output obstacles.
                tracker_obstacles = self.__get_tracked_obstacles_at(start_time)
                if (len(tracker_obstacles) > 0 or len(ground_obstacles) > 0):
                    metrics_summary_df = self.get_tracker_metrics(
                        tracker_obstacles, ground_obstacles)
                    # Get runtime in ms
                    runtime = (time.time() - op_start_time) * 1000
                    self._csv_logger.info("{},{},{},{}".format(
                        time_epoch_ms(), self.config.name, "runtime", runtime))
                    # Write metrics to csv log file
                    for metric_name in self._flags.tracking_metrics:
                        if metric_name in metrics_summary_df.columns:
                            self._csv_logger.info("{},{},{},{}".format(
                                time_epoch_ms(), self.config.name, metric_name,
                                metrics_summary_df[metric_name].values[0]))
                        else:
                            raise ValueError(
                                "Unexpected tracking metric: {}".format(
                                    metric_name))
                self._logger.debug('Computing accuracy for {} {}'.format(
                    end_time, start_time))
            else:
                # The remaining entries require newer ground obstacles.
                break

        self.__garbage_collect_obstacles()
コード例 #3
0
    def on_msg_camera_stream(self, msg, segmented_stream):
        """Camera stream callback method.
        Invoked upon the receipt of a message on the camera stream.
        """
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self._name))
        start_time = time.time()
        assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
        image = torch.from_numpy(msg.frame.frame.transpose(
            [2, 0, 1])).unsqueeze(0).float()
        image_var = Variable(image, requires_grad=False, volatile=True)

        final = self._model(image_var)[0]
        _, pred = torch.max(final, 1)

        pred = pred.cpu().data.numpy()[0]
        # After we apply the pallete, the image is in RGB format
        image_np = self._pallete[pred.squeeze()]

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self._name, msg.timestamp,
                                                     runtime))
        frame = SegmentedFrame(image_np, 'cityscapes', msg.frame.camera_setup)
        if self._flags.visualize_segmentation_output:
            frame.visualize(self._name, msg.timestamp)
        segmented_stream.send(
            SegmentedFrameMessage(frame, msg.timestamp, runtime))
コード例 #4
0
    def on_notification(self, timestamp):
        assert len(timestamp.coordinates) == 1
        game_time = timestamp.coordinates[0]
        if not self._last_notification:
            self._last_notification = game_time
            return
        else:
            self._sim_interval = (game_time - self._last_notification)
            self._last_notification = game_time

        while len(self._detector_start_end_times) > 0:
            (end_time, start_time) = self._detector_start_end_times[0]
            # We can compute mAP if the endtime is not greater than the ground
            # time.
            if end_time <= game_time:
                # This is the closest ground bounding box to the end time.
                heapq.heappop(self._detector_start_end_times)
                ground_obstacles = self.__get_ground_obstacles_at(end_time)
                # Get detector output obstacles.
                obstacles = self.__get_obstacles_at(start_time)
                if (len(obstacles) > 0 or len(ground_obstacles) > 0):
                    mAP = get_mAP(ground_obstacles, obstacles)
                    self._logger.info('mAP is: {}'.format(mAP))
                    self._csv_logger.info('{},{},{},{}'.format(
                        time_epoch_ms(), self._name, 'mAP', mAP))
                self._logger.debug('Computing accuracy for {} {}'.format(
                    end_time, start_time))
            else:
                # The remaining entries require newer ground obstacles.
                break

        self.__garbage_collect_obstacles()
コード例 #5
0
    def on_watermark(self, timestamp):
        """Invoked when all input streams have received a watermark.

        Args:
            timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
                the watermark.
        """
        assert len(timestamp.coordinates) == 1
        op_start_time = time.time()
        game_time = timestamp.coordinates[0]
        if not self._last_notification:
            self._last_notification = game_time
            return
        else:
            self._sim_interval = (game_time - self._last_notification)
            self._last_notification = game_time

        sim_time = timestamp.coordinates[0]
        while len(self._detector_start_end_times) > 0:
            (end_time, start_time) = self._detector_start_end_times[0]
            # We can compute mAP if the endtime is not greater than the ground
            # time.
            if end_time <= game_time:
                # This is the closest ground bounding box to the end time.
                heapq.heappop(self._detector_start_end_times)
                ground_obstacles = self.__get_ground_obstacles_at(end_time)
                # Get detector output obstacles.
                obstacles = self.__get_obstacles_at(start_time)
                if (len(obstacles) > 0 or len(ground_obstacles) > 0):
                    mAP = pylot.perception.detection.utils.get_mAP(
                        ground_obstacles, obstacles)
                    # Get runtime in ms
                    runtime = (time.time() - op_start_time) * 1000
                    self._csv_logger.info('{},{},{},{},{:.4f}'.format(
                        time_epoch_ms(), sim_time, self.config.name, 'runtime',
                        runtime))
                    self._logger.info('mAP is: {}'.format(mAP))
                    self._csv_logger.info('{},{},{},{},{:.4f}'.format(
                        time_epoch_ms(), sim_time, self.config.name, 'mAP',
                        mAP))
                self._logger.debug('Computing accuracy for {} {}'.format(
                    end_time, start_time))
            else:
                # The remaining entries require newer ground obstacles.
                break

        self.__garbage_collect_obstacles()
コード例 #6
0
    def on_watermark(self, timestamp: erdos.Timestamp,
                     prediction_stream: erdos.WriteStream):
        self._logger.debug('@{}: received watermark'.format(timestamp))
        if timestamp.is_top:
            return
        point_cloud_msg = self._point_cloud_msgs.popleft()
        tracking_msg = self._tracking_msgs.popleft()

        start_time = time.time()
        nearby_trajectories, nearby_vehicle_ego_transforms, \
            nearby_trajectories_tensor, binned_lidars_tensor = \
            self._preprocess_input(tracking_msg, point_cloud_msg)

        num_predictions = len(nearby_trajectories)
        self._logger.info(
            '@{}: Getting R2P2 predictions for {} vehicles'.format(
                timestamp, num_predictions))

        if num_predictions == 0:
            prediction_stream.send(PredictionMessage(timestamp, []))
            return

        # Run the forward pass.
        z = torch.tensor(
            np.random.normal(size=(num_predictions,
                                   self._flags.prediction_num_future_steps,
                                   2))).to(torch.float32).to(self._device)
        model_start_time = time.time()
        prediction_array, _ = self._r2p2_model.forward(
            z, nearby_trajectories_tensor, binned_lidars_tensor)
        model_runtime = (time.time() - model_start_time) * 1000
        self._csv_logger.debug("{},{},{},{:.4f}".format(
            time_epoch_ms(), timestamp.coordinates[0],
            'r2p2-modelonly-runtime', model_runtime))
        prediction_array = prediction_array.cpu().detach().numpy()

        obstacle_predictions_list = self._postprocess_predictions(
            prediction_array, nearby_trajectories,
            nearby_vehicle_ego_transforms)
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.debug("{},{},{},{:.4f}".format(
            time_epoch_ms(), timestamp.coordinates[0], 'r2p2-runtime',
            runtime))
        prediction_stream.send(
            PredictionMessage(timestamp, obstacle_predictions_list))
コード例 #7
0
 def __compute_mean_iou(self, timestamp, ground_frame, segmented_frame):
     ground_frame.transform_to_cityscapes()
     (mean_iou,
      class_iou) = ground_frame.compute_semantic_iou(segmented_frame)
     self._logger.info('IoU class scores: {}'.format(class_iou))
     self._logger.info('mean IoU score: {}'.format(mean_iou))
     self._csv_logger.info('{},{},{},{},{:.4f}'.format(
         time_epoch_ms(), timestamp.coordinates[0], self.config.name,
         self._flags.segmentation_metric, mean_iou))
コード例 #8
0
    def on_pose_update(self, msg):
        vehicle_location = msg.data.transform.location
        x = vehicle_location.x
        y = vehicle_location.y
        z = vehicle_location.z

        self._csv_logger.info('{},{},pose,global,{}'.format(
            time_epoch_ms(), msg.timestamp.coordinates[0],
            "[{:.4f} {:.4f} {:.4f}]".format(x, y, z)))
コード例 #9
0
    def on_msg_camera_stream(self, msg, detected_lanes_stream):
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self._name))
        start_time = time.time()
        assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
        image_np = msg.frame.frame

        # TODO(ionel): Implement lane detection.
        edges = self.apply_canny(image_np)
        lines_edges = self.apply_hough(image_np, edges)

        kernel_size = 3
        grad_x = self.apply_sobel(image_np,
                                  orient='x',
                                  sobel_kernel=kernel_size,
                                  thresh_min=0,
                                  thresh_max=255)
        grad_y = self.apply_sobel(image_np,
                                  orient='y',
                                  sobel_kernel=kernel_size,
                                  thresh_min=0,
                                  thresh_max=255)
        mag_binary = self.magnitude_threshold(image_np,
                                              sobel_kernel=kernel_size,
                                              thresh_min=0,
                                              thresh_max=255)
        dir_binary = self.direction_threshold(image_np,
                                              sobel_kernel=kernel_size,
                                              thresh_min=0,
                                              thresh_max=np.pi / 2)

        s_binary = self.extract_s_channel(image_np)

        # Select the pixels where both x and y gradients meet the threshold
        # criteria, or the gradient magnitude and direction are both with
        # the threshold values.
        combined = np.zeros_like(dir_binary)
        combined[((grad_x == 1) & (grad_y == 1)) | ((mag_binary == 1) &
                                                    (dir_binary == 1))] = 1

        combined_binary = np.zeros_like(grad_x)
        combined_binary[(s_binary == 1) | (grad_x == 1)] = 1

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self._name, msg.timestamp,
                                                     runtime))

        if self._flags.visualize_lane_detection:
            frame = CameraFrame(lines_edges, 'BGR', msg.frame.camera_setup)
            frame.visualize(self._name, msg.timestamp)

        detected_lanes_stream.send(erdos.Message(msg.timestamp, image_np))
コード例 #10
0
    def on_lane_invasion_update(self, msg):
        """ Invoked upon receipt of a lane invasion update.

        Logs the type of invasion and the timestamp.

        Args:
            msg (:py:class:`pylot.simulation.messages.LaneInvasionMessage`):
                The lane invasion message sent by the sensor.
        """
        sim_time = msg.timestamp.coordinates[0]
        # We log lane invasion events only if they are illegal.
        if any(
                map(EvalMetricLoggerOperator.check_illegal_lane_invasion,
                    msg.lane_markings)):
            self._csv_logger.info('{},{},invasion,lane'.format(
                time_epoch_ms(), sim_time))

        if msg.lane_type == LaneType.SIDEWALK:
            self._csv_logger.info('{},{},invasion,sidewalk'.format(
                time_epoch_ms(), sim_time))
コード例 #11
0
 def compute_accuracy(self, frame_time, ground_time, end_anchored):
     anchor_type = "end" if end_anchored else "start"
     anchor_time = ground_time if end_anchored else frame_time
     predictions = self.get_prediction_at(frame_time)
     ground_truths = self.get_ground_truth_at(ground_time)
     self.scoring_modules[anchor_type].add_datapoint(
         predictions, ground_truths)
     new_scores = self.scoring_modules[anchor_type].get_scores()
     for k, v in new_scores.items():
         self._csv_logger.info("{},{},{},{},{},{},{:.4f}".format(
             time_epoch_ms(), anchor_time, self.config.name, anchor_type,
             self._matching_policy, k, v))
コード例 #12
0
    def on_collision_update(self, msg):
        """ Invoked upon receipt of a collision update.

        Logs the collided actor and the intensity of the collision.

        Args:
            msg (:py:class:`pylot.simulation.messages.CollisionMessage`): The
                collision message sent by the sensor.
        """
        self._csv_logger.info('{},{},collision,{},{:.4f}'.format(
            time_epoch_ms(), msg.timestamp.coordinates[0],
            msg.collided_actor.split('.')[0], msg.intensity))
コード例 #13
0
    def on_traffic_light_invasion_update(self, msg):
        """ Invoked upon receipt of a traffic light invasion update.

        Logs the timestamp of the invasion.

        Args:
            msg (:py:class:`pylot.simulation.messages.TrafficInfractionMessage`):  # noqa: E501
                The traffic infraction message sent by the sensor.
        """
        sim_time = msg.timestamp.coordinates[0]
        self._csv_logger.info('{},{},invasion,red_light'.format(
            time_epoch_ms(), sim_time))
コード例 #14
0
    def on_ground_segmented_frame(self, msg, iou_stream):
        assert len(msg.timestamp.coordinates) == 1
        start_time = time.time()
        # We don't fully transform it to cityscapes palette to avoid
        # introducing extra latency.
        frame = msg.frame

        sim_time = msg.timestamp[0]
        if len(self._ground_frames) > 0:
            # Pop the oldest frame if it's older than the max latency
            # we're interested in.
            if (msg.timestamp.coordinates[0] - self._ground_frames[0][0] >
                    self._flags.decay_max_latency):
                self._ground_frames.popleft()

            cur_time = time_epoch_ms()
            for timestamp, ground_frame in self._ground_frames:
                (mean_iou, class_iou) = \
                    ground_frame.compute_semantic_iou_using_masks(frame)
                time_diff = msg.timestamp.coordinates[0] - timestamp
                self._logger.info(
                    'Segmentation ground latency {} ; mean IoU {}'.format(
                        time_diff, mean_iou))
                self._csv_logger.info('{},{},{},mIoU,{},{:.4f}'.format(
                    cur_time, sim_time, self.config.name, time_diff, mean_iou))
                iou_stream.send(
                    erdos.Message(msg.timestamp, (time_diff, mean_iou)))
                person_key = 4
                if person_key in class_iou:
                    self._logger.info(
                        'Segmentation ground latency {} ; person IoU {}'.
                        format(time_diff, class_iou[person_key]))
                    self._csv_logger.info(
                        '{},{},{},personIoU,{},{:.4f}'.format(
                            cur_time, sim_time, self.config.name, time_diff,
                            class_iou[person_key]))

                vehicle_key = 10
                if vehicle_key in class_iou:
                    self._logger.info(
                        'Segmentation ground latency {} ; vehicle IoU {}'.
                        format(time_diff, class_iou[vehicle_key]))
                    self._csv_logger.info(
                        '{},{},{},vehicleIoU,{},{:.4f}'.format(
                            cur_time, sim_time, self.config.name, time_diff,
                            class_iou[vehicle_key]))

        # Append the processed image to the buffer.
        self._ground_frames.append((msg.timestamp.coordinates[0], frame))

        runtime = (time.time() - start_time) * 1000
        self._logger.info(
            'Segmentation eval ground runtime {}'.format(runtime))
コード例 #15
0
ファイル: control_eval_operator.py プロジェクト: wnklmx/pylot
    def on_watermark(self, timestamp: Timestamp):
        """Computes and logs the metrics of accuracy for the control module.

        This operator uses two different metrics of accuracy, as follows:
            1. Crosstrack error: The distance between the waypoint that the
                planner intended the vehicle to achieve, and the location
                achieved by the control module.
            2. Heading error: The angle between the heading of the vehicle,
                and the reference trajectory that the planner intended the
                vehicle to follow.

        The log format is (timestamp, crosstrack_error, heading_error).

        Args:
            timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp
                of the watermark.
        """
        self._logger.debug('@{}: received watermark.'.format(timestamp))
        if timestamp.is_top:
            return

        # Get the transform of the ego vehicle.
        pose_msg = self._pose_messages.popleft()
        vehicle_transform = pose_msg.data.transform

        if len(self.last_waypoints) == 2:
            # Compute the metrics of accuracy using the last two waypoints.
            self._logger.debug(
                "@{}: vehicle location: {}, waypoints: {}".format(
                    timestamp, vehicle_transform.location,
                    self.last_waypoints))
            crosstrack_err, heading_err = ControlEvalOperator.\
                compute_control_metrics(vehicle_transform,
                                        self.last_waypoints)

            self._csv_logger.info("{},{},control,{:.4f},{:.4f}".format(
                time_epoch_ms(), timestamp.coordinates[0], crosstrack_err,
                heading_err))

        # Add the first waypoint from the last waypoints received
        # by the operator.
        waypoints = self._waypoints_msgs.popleft().waypoints.waypoints
        if len(waypoints) > 0:
            next_waypoint = waypoints.popleft()
            while len(self.last_waypoints) >= 1 and len(
                    waypoints) > 0 and np.isclose(
                        next_waypoint.location.distance(
                            self.last_waypoints[-1].location), 0.0):
                next_waypoint = waypoints.popleft()
            self.last_waypoints.append(next_waypoint)
コード例 #16
0
    def on_frame(self, msg, traffic_lights_stream):
        """ Invoked when the operator receives a message on the data stream."""
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self._name))
        start_time = time.time()
        assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
        # Expand dimensions since the model expects images to have
        # shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(msg.frame.as_rgb_numpy_array(),
                                           axis=0)
        (boxes, scores, classes, num) = self._tf_session.run(
            [
                self._detection_boxes, self._detection_scores,
                self._detection_classes, self._num_detections
            ],
            feed_dict={self._image_tensor: image_np_expanded})

        num_detections = int(num[0])
        labels = [self._labels[label] for label in classes[0][:num_detections]]
        boxes = boxes[0][:num_detections]
        scores = scores[0][:num_detections]

        traffic_lights = self.__convert_to_detected_tl(
            boxes, scores, labels, msg.frame.camera_setup.height,
            msg.frame.camera_setup.width)

        self._logger.debug('@{}: {} detected traffic lights {}'.format(
            msg.timestamp, self._name, traffic_lights))

        if (self._flags.visualize_detected_traffic_lights
                or self._flags.log_traffic_light_detector_output):
            msg.frame.annotate_with_bounding_boxes(msg.timestamp,
                                                   traffic_lights,
                                                   self._bbox_colors)
            if self._flags.visualize_detected_traffic_lights:
                msg.frame.visualize(self._name)
            if self._flags.log_traffic_light_detector_output:
                msg.frame.save(msg.timestamp.coordinates[0],
                               self._flags.data_path,
                               'tl-detector-{}'.format(self._name))

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self._name, msg.timestamp,
                                                     runtime))

        traffic_lights_stream.send(
            ObstaclesMessage(traffic_lights, msg.timestamp, runtime))
コード例 #17
0
    def on_watermark(self, timestamp, control_stream):
        self._logger.debug('@{}: received watermark'.format(timestamp))
        start_time = time.time()
        can_bus_msg = self._can_bus_msgs.popleft()
        waypoint_msg = self._waypoint_msgs.popleft()
        tl_msg = self._traffic_lights_msgs.popleft()
        obstacles_msg = self._obstacles_msgs.popleft()
        point_cloud_msg = self._point_clouds.popleft()
        vehicle_transform = can_bus_msg.data.transform
        # Vehicle sped in m/s
        vehicle_speed = can_bus_msg.data.forward_speed
        wp_angle = waypoint_msg.wp_angle
        wp_vector = waypoint_msg.wp_vector
        wp_angle_speed = waypoint_msg.wp_angle_speed
        target_speed = waypoint_msg.target_speed

        transformed_camera_setup = copy.deepcopy(self._camera_setup)
        transformed_camera_setup.set_transform(
            vehicle_transform * transformed_camera_setup.transform)

        traffic_lights = self.__transform_tl_output(
            tl_msg, point_cloud_msg.point_cloud, transformed_camera_setup)
        (people, vehicles) = self.__transform_detector_output(
            obstacles_msg, point_cloud_msg.point_cloud,
            transformed_camera_setup)

        self._logger.debug('@{}: speed {} and location {}'.format(
            timestamp, vehicle_speed, vehicle_transform))
        self._logger.debug('@{}: people {}'.format(timestamp, people))
        self._logger.debug('@{}: vehicles {}'.format(timestamp, vehicles))

        speed_factor, _ = self.__stop_for_agents(vehicle_transform.location,
                                                 wp_angle, wp_vector, vehicles,
                                                 people, traffic_lights,
                                                 timestamp)

        control_msg = self.get_control_message(wp_angle, wp_angle_speed,
                                               speed_factor, vehicle_speed,
                                               target_speed, timestamp)

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self._name, timestamp,
                                                     runtime))

        control_stream.send(control_msg)
コード例 #18
0
 def on_frame_msg(self, msg):
     """ Invoked when a FrameMessage is received on the camera stream."""
     self._logger.debug('@{}: {} received frame'.format(
         msg.timestamp, self._name))
     start_time = time.time()
     assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
     camera_frame = msg.frame
     # Store frames so that they can be re-processed once we receive the
     # next update from the detector.
     self._to_process.append((msg.timestamp, camera_frame))
     # Track if we have a tracker ready to accept new frames.
     if self._ready_to_update:
         self.__track_bboxes_on_frame(camera_frame, msg.timestamp, False)
     # Get runtime in ms.
     runtime = (time.time() - start_time) * 1000
     self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                  self._name, msg.timestamp,
                                                  runtime))
コード例 #19
0
    def on_ground_obstacles(self, msg, map_stream):
        # Ignore the first several seconds of the simulation because the car is
        # not moving at the beginning.
        assert len(msg.timestamp.coordinates) == 1
        game_time = msg.timestamp.coordinates[0]
        bboxes = []
        # Select the person bounding boxes.
        for obstacle in msg.obstacles:
            if obstacle.label == 'person':
                bboxes.append(obstacle.bounding_box)

        # Remove the buffered bboxes that are too old.
        while (len(self._ground_bboxes) > 0
               and game_time - self._ground_bboxes[0][0] >
               self._flags.decay_max_latency):
            self._ground_bboxes.popleft()

        sim_time = msg.timestamp.coordinates[0]
        for (old_game_time, old_bboxes) in self._ground_bboxes:
            # Ideally, we would like to take multiple precision values at
            # different recalls and average them, but we can't vary model
            # confidence, so we just return the actual precision.
            if (len(bboxes) > 0 or len(old_bboxes) > 0):
                latency = game_time - old_game_time
                precisions = []
                for iou in self._iou_thresholds:
                    (precision,
                     _) = get_precision_recall_at_iou(bboxes, old_bboxes, iou)
                    precisions.append(precision)
                self._logger.info("Precision {}".format(precisions))
                avg_precision = float(sum(precisions)) / len(precisions)
                self._logger.info(
                    "The latency is {} and the average precision is {}".format(
                        latency, avg_precision))
                self._csv_logger.info('{},{},{},{},{:.4f}'.format(
                    time_epoch_ms(), sim_time, self.config.name, latency,
                    avg_precision))
                map_stream.send(
                    erdos.Message(msg.timestamp, (latency, avg_precision)))

        # Buffer the new bounding boxes.
        self._ground_bboxes.append((game_time, bboxes))
コード例 #20
0
    def fuse(self):
        # Return if we don't have car position, distances or obstacles.
        start_time = time.time()
        if min(
                map(len,
                    [self._car_positions, self._distances, self._obstacles
                     ])) == 0:
            return
        self.__discard_old_data()
        obstacle_positions = self.__calc_obstacle_positions(
            self._obstacles[0][1], self._distances[0][1],
            self._car_positions[0][1][0],
            np.arccos(self._car_positions[0][1][1][0]))
        timestamp = self._obstacles[0][0]

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},{}'.format(time_epoch_ms(), self._name,
                                                runtime))

        output_msg = ObstaclePositionsSpeedsMessage(obstacle_positions,
                                                    timestamp)
        self._fused_stream.send(output_msg)
コード例 #21
0
    def compute_depth(self, timestamp, depth_estimation_stream):
        self._logger.debug('@{}: {} received watermark'.format(
            timestamp, self._name))
        start_time = time.time()

        imgL = self._left_imgs.pop(timestamp)
        imgR = self._right_imgs.pop(timestamp)

        cudnn.benchmark = False
        self._model.eval()
        imgL = imgL.float().cuda().unsqueeze(0)
        imgR = imgR.float().cuda().unsqueeze(0)
        with torch.no_grad():
            outputs = self._model(imgL, imgR)
            output = torch.squeeze(outputs[2], 1)
        output = output.squeeze().cpu().numpy()
        # Process the output (disparity) to depth, model-dependent
        depth = preprocess.disp2depth(output)
        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self._name, timestamp,
                                                     runtime))

        if self._flags.visualize_depth_est:
            cv2.imshow(self._name, output)
            cv2.waitKey(1)

        camera_setup = CameraSetup("depth_estimation",
                                   "estimation.anynet",
                                   depth.shape[1],
                                   depth.shape[0],
                                   self._transform,
                                   fov=self._fov)
        depth_estimation_stream.send(
            DepthFrameMessage(DepthFrame(depth, camera_setup), timestamp))
コード例 #22
0
    def on_watermark(self, timestamp, obstacles_error_stream):
        """Invoked when all input streams have received a watermark.

        Args:
            timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
                the watermark.
        """
        assert len(timestamp.coordinates) == 1
        op_start_time = time.time()
        game_time = timestamp.coordinates[0]
        if not self._last_notification:
            self._last_notification = game_time
            return
        else:
            self._sim_interval = (game_time - self._last_notification)
            self._last_notification = game_time

        sim_time = timestamp.coordinates[0]
        while len(self._detector_start_end_times) > 0:
            print("INSIDE EVAL WATERMARK", timestamp)
            (end_time, start_time) = self._detector_start_end_times[0]

            if end_time <= game_time:
                heapq.heappop(self._detector_start_end_times)

                ego_transform, ground_obstacles = self.__get_ground_obstacles_at(end_time)

                #go_labels = []
                #for go in ground_obstacles:
                #    go_labels.append(go.label)

                obstacles = self.__get_obstacles_at(start_time)

                #do_labels = []
                #for do in obstacles:
                #    do_labels.append(do.label)
                #print(timestamp, go_labels, do_labels)

                if (len(obstacles) > 0 or len(ground_obstacles) > 0):
                    errs = pylot.perception.detection.utils.get_errors(
                        ground_obstacles, obstacles, ego_transform)

                    # Get runtime in ms
                    runtime = (time.time() - op_start_time) * 1000
                    # self._csv_logger.info('{},{},{},{},{:.4f}'.format(
                    #    time_epoch_ms(), sim_time, self.config.name, 'runtime',
                    #    runtime))

                    self._logger.info('errors calculated')

                    matchobs = []
                    for i in range(len(errs)):
                        ground_ob = errs[i][0]
                        det_ob = errs[i][1]
                        err_val = errs[i][2]

                        det_id = "NONE"
                        if (det_ob is not None):
                            det_ob.vis_error = err_val
                            det_id = det_ob.id

                        ego_point = ego_transform.location.as_numpy_array().reshape(1, 3)
                        ego_loc = ego_transform.inverse_transform_points(ego_point).reshape(3,)

                        ob_actual_point = ground_ob.transform.location.as_numpy_array().reshape(1, 3)
                        ob_actual_loc = ego_transform.inverse_transform_points(ob_actual_point).reshape(3,)

                        relative_dist = ob_actual_loc - ego_loc

                        #print([ground_ob.id, ego_loc, "===", ob_actual_loc, "====", relative_dist])
                        print([ground_ob.id, det_id, err_val])
                        
                        self._csv_logger.info('{},{},{},{},{},{:.4f},{:.4f},{:.4f},{:.4f}'.format(
                            time_epoch_ms(), sim_time, self.config.name, ground_ob.id, ground_ob.label,
                            relative_dist[0], relative_dist[1], relative_dist[2],
                            err_val))

                self._logger.debug('Computing accuracy for {} {}'.format(
                    end_time, start_time))

                obstacles_error_stream.send(ObstaclesMessage(timestamp, obstacles))
                obstacles_error_stream.send(erdos.WatermarkMessage(timestamp))
            else:
                # The remaining entries require newer ground obstacles.
                break

        self.__garbage_collect_obstacles()
コード例 #23
0
    def on_msg_camera_stream(self, msg, obstacles_stream):
        """ Invoked when the operator receives a message on the data stream."""
        self._logger.debug('@{}: {} received message'.format(
            msg.timestamp, self._name))
        start_time = time.time()
        # The models expect BGR images.
        assert msg.frame.encoding == 'BGR', 'Expects BGR frames'
        # Expand dimensions since the model expects images to have
        # shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(msg.frame.frame, axis=0)
        (boxes, scores, classes, num_detections) = self._tf_session.run(
            [
                self._detection_boxes, self._detection_scores,
                self._detection_classes, self._num_detections
            ],
            feed_dict={self._image_tensor: image_np_expanded})

        num_detections = int(num_detections[0])
        res_classes = classes[0][:num_detections]
        res_boxes = boxes[0][:num_detections]
        res_scores = scores[0][:num_detections]

        obstacles = []
        for i in range(0, num_detections):
            if (res_classes[i] in self._coco_labels and res_scores[i] >=
                    self._flags.obstacle_detection_min_score_threshold):
                obstacles.append(
                    DetectedObstacle(
                        BoundingBox2D(
                            int(res_boxes[i][1] *
                                msg.frame.camera_setup.width),
                            int(res_boxes[i][3] *
                                msg.frame.camera_setup.width),
                            int(res_boxes[i][0] *
                                msg.frame.camera_setup.height),
                            int(res_boxes[i][2] *
                                msg.frame.camera_setup.height)), res_scores[i],
                        self._coco_labels[res_classes[i]]))
            else:
                self._logger.warning('Filtering unknown class: {}'.format(
                    res_classes[i]))

        self._logger.debug('@{}: {} obstacles: {}'.format(
            msg.timestamp, self._name, obstacles))

        if (self._flags.visualize_detected_obstacles
                or self._flags.log_detector_output):
            msg.frame.annotate_with_bounding_boxes(msg.timestamp, obstacles,
                                                   self._bbox_colors)
            if self._flags.visualize_detected_obstacles:
                msg.frame.visualize(self._name)
            if self._flags.log_detector_output:
                msg.frame.save(msg.timestamp.coordinates[0],
                               self._flags.data_path,
                               'detector-{}'.format(self._name))

        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self._name, msg.timestamp,
                                                     runtime))
        # Send out obstacles.
        obstacles_stream.send(
            ObstaclesMessage(obstacles, msg.timestamp, runtime))
コード例 #24
0
    def on_waypoints_update(self, msg):
        """ Invoked upon receipt of a waypoints message from the pipeline.

        This method retrieves the pose message for the timestamp, calculates
        the runtime of the pipeline, logs it and saves the waypoints for the
        future.

        Args:
            msg (:py:class:`pylot.planning.messages.WaypointsMessage`): The
                waypoints message received for the given timestamp.
        """
        waypoint_recv_time = time.time()
        self._logger.debug("@{}: received waypoints update.".format(
            msg.timestamp))

        # Retrieve the game time.
        game_time = msg.timestamp.coordinates[0]

        # Ensure that a single invocation of the pipeline is happening.
        assert self._last_localization_update == game_time, \
            "Concurrent Execution of the pipeline."

        watermark = erdos.WatermarkMessage(msg.timestamp)
        if self._waypoint_num < 10:
            self._logger.debug(
                "@{}: received waypoint num {}. "
                "Skipping because the simulator might not be in sync.".format(
                    msg.timestamp, self._waypoint_num))
            self._first_waypoint = False
            self._waypoint_num += 1
            # Send a message on the notify stream to ask the simulator to send
            # a new sensor stream.
            self._pipeline_finish_notify_stream.send(watermark)
            return

        # Retrieve the pose message for this timestamp.
        (pose_msg, pose_recv_time) = self._pose_map[game_time]

        # Calculate and log the processing time for this waypoints message.
        processing_time = int((waypoint_recv_time - pose_recv_time) * 1000)
        self._csv_logger.info('{},{},{},{:.4f}'.format(time_epoch_ms(),
                                                       game_time,
                                                       'end-to-end-runtime',
                                                       processing_time))

        # Apply the waypoints at the timestamp + processing time.
        applicable_time = game_time + processing_time
        if (self._last_highest_applicable_time is None
                or self._last_highest_applicable_time < applicable_time):
            self._last_highest_applicable_time = applicable_time
            self._waypoints.append((applicable_time, msg))
            self._logger.debug(
                "@{}: waypoints will be applicable at {}".format(
                    msg.timestamp, applicable_time))
        else:
            # The last waypoint applicable time was higher, we should purge
            # the ones higher than this one and add this entry.
            self._logger.debug(
                "@{}: Popping the last applicable time: {}".format(
                    msg.timestamp, self._waypoints[-1][0]))

            assert (
                self._waypoints.pop()[0] == self._last_highest_applicable_time)
            while self._waypoints[-1][0] >= applicable_time:
                self._logger.debug(
                    "@{}: Popping the last applicable time: {}".format(
                        msg.timestamp, self._waypoints[-1][0]))
                self._waypoints.pop()
            self._last_highest_applicable_time = applicable_time
            self._waypoints.append((applicable_time, msg))
            self._logger.debug("@{}: the waypoints were adjusted "
                               "and will be applicable at {}".format(
                                   msg.timestamp, applicable_time))

        # Delete the pose from the map.
        self._pose_map.pop(game_time, None)

        # Send a message on the notify stream to ask the simulator to send a
        # new sensor stream.
        self._pipeline_finish_notify_stream.send(watermark)
コード例 #25
0
    def on_watermark(self, timestamp, control_stream):
        """ The callback function invoked upon receipt of a WatermarkMessage.

        The function uses the latest location of the vehicle and drives to the
        next waypoint, while doing either a stop or a swerve upon the
        detection of a person.

        Args:
            timestamp: Timestamp for which the WatermarkMessage was received.
            control_stream: Output stream on which the callback can write to.
        """
        self._logger.debug('@{}: received watermark'.format(timestamp))
        pose_msg = self._pose_msgs.popleft()
        ground_obstacles_msg = self._ground_obstacles_msgs.popleft()
        obstacle_msg = self._obstacle_msgs.popleft()

        self._logger.debug(
            "The vehicle is travelling at a speed of {} m/s.".format(
                pose_msg.data.forward_speed))

        ego_transform = pose_msg.data.transform
        ego_location = ego_transform.location
        ego_wp = self._map.get_waypoint(ego_location.as_carla_location())

        people_obstacles = [
            obstacle for obstacle in ground_obstacles_msg.obstacles
            if obstacle.label == 'person'
        ]
        sim_time = timestamp.coordinates[0]
        # Heuristic to tell us how far away do we detect the person.
        for person in people_obstacles:
            person_wp = self._map.get_waypoint(
                person.transform.location.as_carla_location(),
                project_to_road=False)
            if person_wp and person_wp.road_id == ego_wp.road_id:
                for obstacle in obstacle_msg.obstacles:
                    if obstacle.label == 'person':
                        self._csv_logger.info(
                            "{},{},{},{:.4f},detected a {} {:.4f} m away".
                            format(time_epoch_ms(), sim_time, self.config.name,
                                   self.SPEED, obstacle.label,
                                   person.distance(ego_transform)))
                        self._csv_logger.info(
                            "{},{},{},{:.4f},vehicle speed {:.4f} m/s".format(
                                time_epoch_ms(), sim_time, self.config.name,
                                self.SPEED, pose_msg.data.forward_speed))

        # Figure out the location of the ego vehicle and compute the next
        # waypoint.
        if self._goal_reached or ego_location.distance(
                self._goal) <= self.GOAL_DISTANCE:
            self._logger.info(
                "The distance was {} and we reached the goal.".format(
                    ego_location.distance(self._goal)))
            control_stream.send(
                ControlMessage(0.0, 0.0, 1.0, True, False, timestamp))
            self._goal_reached = True
        else:
            person_detected = False
            for person in people_obstacles:
                person_wp = self._map.get_waypoint(
                    person.transform.location.as_carla_location(),
                    project_to_road=False)
                if person_wp and ego_location.distance(
                        person.transform.location) <= self.DETECTION_DISTANCE:
                    person_detected = True
                    break

            if person_detected and self._flags.avoidance_behavior == 'stop':
                control_stream.send(
                    ControlMessage(0.0, 0.0, 1.0, True, False, timestamp))
                return

            # Get the waypoint that is SAMPLING_DISTANCE away.
            sample_distance = self.SAMPLING_DISTANCE if \
                ego_transform.forward_vector.x > 0 else \
                -1 * self.SAMPLING_DISTANCE
            steer_loc = ego_location + pylot.utils.Location(
                x=sample_distance, y=0, z=0)
            wp_steer = self._map.get_waypoint(steer_loc.as_carla_location())

            in_swerve = False
            fwd_vector = wp_steer.transform.get_forward_vector()
            waypoint_fwd = [fwd_vector.x, fwd_vector.y, fwd_vector.z]
            if person_detected:
                # If a pedestrian was detected, make sure we're driving on the
                # wrong direction.
                if np.dot(ego_transform.forward_vector.as_numpy_array(),
                          waypoint_fwd) > 0:
                    # We're not driving in the wrong direction, get left
                    # lane waypoint.
                    if wp_steer.get_left_lane():
                        wp_steer = wp_steer.get_left_lane()
                        in_swerve = True
                else:
                    # We're driving in the right direction, continue driving.
                    pass
            else:
                # The person was not detected, come back from the swerve.
                if np.dot(ego_transform.forward_vector.as_numpy_array(),
                          waypoint_fwd) < 0:
                    # We're driving in the wrong direction, get the left lane
                    # waypoint.
                    if wp_steer.get_left_lane():
                        wp_steer = wp_steer.get_left_lane()
                        in_swerve = True
                else:
                    # We're driving in the right direction, continue driving.
                    pass

            # self._world.debug.draw_point(wp_steer.transform.location,
            #                              size=0.2,
            #                              life_time=30000.0)

            wp_steer_vector, _, wp_steer_angle = \
                ego_transform.get_vector_magnitude_angle(
                    pylot.utils.Location.from_carla_location(
                        wp_steer.transform.location))
            current_speed = max(0, pose_msg.data.forward_speed)
            steer = pylot.control.utils.radians_to_steer(
                wp_steer_angle, self._flags.steer_gain)
            # target_speed = self.SPEED if not in_swerve else self.SPEED / 5.0
            target_speed = self.SPEED
            throttle, brake = pylot.control.utils.compute_throttle_and_brake(
                self._pid, current_speed, target_speed, self._flags)

            control_stream.send(
                ControlMessage(steer, throttle, brake, False, False,
                               timestamp))
コード例 #26
0
    def on_watermark(self, timestamp):
        """Invoked when all input streams have received a watermark.

        Args:
            timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
                the watermark.
        """
        assert len(timestamp.coordinates) == 1
        op_start_time = time.time()
        game_time = timestamp.coordinates[0]
        if not self._last_notification:
            self._last_notification = game_time
            return
        else:
            self._sim_interval = (game_time - self._last_notification)
            self._last_notification = game_time

        sim_time = timestamp.coordinates[0]
        while len(self._tracker_start_end_times) > 0:
            (end_time, start_time) = self._tracker_start_end_times[0]
            # We can compute tracker metrics if the endtime is not greater than
            # the ground time.
            if end_time <= game_time:
                # This is the closest ground bounding box to the end time.
                heapq.heappop(self._tracker_start_end_times)
                ground_obstacles = self.__get_ground_obstacles_at(end_time)
                # Get tracker output obstacles.
                tracker_obstacles = self.__get_tracked_obstacles_at(start_time)
                if (len(tracker_obstacles) > 0 or len(ground_obstacles) > 0):
                    metrics_summary_df = self.get_tracker_metrics(
                        tracker_obstacles, ground_obstacles)
                    # Get runtime in ms
                    runtime = (time.time() - op_start_time) * 1000
                    self._csv_logger.info('{},{},{},{},{}'.format(
                        time_epoch_ms(), sim_time, self.config.name, 'runtime',
                        runtime))
                    # Write metrics to csv log file
                    for metric_name in self._flags.tracking_metrics:
                        if metric_name in metrics_summary_df.columns:
                            if (metric_name == 'mostly_tracked'
                                    or metric_name == 'mostly_lost'
                                    or metric_name == 'partially_tracked'):
                                ratio = metrics_summary_df[metric_name].values[
                                    0] / metrics_summary_df[
                                        'num_unique_objects'].values[0]
                                self._csv_logger.info(
                                    "{},{},{},{},{:.4f}".format(
                                        time_epoch_ms(), sim_time,
                                        self.config.name,
                                        'ratio_' + metric_name, ratio))
                            elif metric_name == 'motp':
                                # See https://github.com/cheind/py-motmetrics/issues/92  # noqa: E501
                                motp = (1 - metrics_summary_df[metric_name].
                                        values[0]) * 100
                                self._csv_logger.info(
                                    '{},{},{},{},{:.4f}'.format(
                                        time_epoch_ms(), sim_time,
                                        self.config.name, metric_name, motp))
                            elif (metric_name == 'idf1'
                                  or metric_name == 'mota'):
                                metric_val = metrics_summary_df[
                                    metric_name].values[0] * 100
                                self._csv_logger.info(
                                    '{},{},{},{},{:.4f}'.format(
                                        time_epoch_ms(), sim_time,
                                        self.config.name, metric_name,
                                        metric_val))
                            else:
                                self._csv_logger.info(
                                    '{},{},{},{},{:.4f}'.format(
                                        time_epoch_ms(), sim_time,
                                        self.config.name, metric_name,
                                        metrics_summary_df[metric_name].
                                        values[0]))
                        else:
                            raise ValueError(
                                'Unexpected tracking metric: {}'.format(
                                    metric_name))
                self._logger.debug('Computing accuracy for {} {}'.format(
                    end_time, start_time))
            else:
                # The remaining entries require newer ground obstacles.
                break

        self.__garbage_collect_obstacles()
コード例 #27
0
    def on_waypoints_update(self, msg):
        """ Invoked upon receipt of a waypoints message from the pipeline.

        This method retrieves the pose message for the timestamp, calculates
        the runtime of the pipeline, logs it and saves the waypoints for the
        future.

        Args:
            msg (:py:class:`pylot.planning.messages.WaypointsMessage`): The
                waypoints message received for the given timestamp.
        """
        waypoint_recv_time = time.time()
        self._logger.debug("@{}: received waypoints update.".format(
            msg.timestamp))

        if self._first_waypoint:
            self._logger.debug(
                "@{}: received first waypoint. "
                "Skipping because the simulator might not be in sync.".format(
                    msg.timestamp))
            self._first_waypoint = False
            return

        # Retrieve the game time.
        game_time = msg.timestamp.coordinates[0]

        # Retrieve the pose message for this timestamp.
        (pose_msg, pose_recv_time) = self._pose_map[game_time]

        # Calculate and log the processing time for this waypoints message.
        processing_time = int((waypoint_recv_time - pose_recv_time) * 1000)
        self._csv_logger.info('{},{},{},{:.4f}'.format(time_epoch_ms(),
                                                       game_time,
                                                       'end-to-end-runtime',
                                                       processing_time))

        # Apply the waypoints at the timestamp + processing time.
        applicable_time = game_time + processing_time
        if (self._last_highest_applicable_time is None
                or self._last_highest_applicable_time < applicable_time):
            self._last_highest_applicable_time = applicable_time
            self._waypoints.append((applicable_time, msg))
            self._logger.debug(
                "@{}: waypoints will be applicable at {}".format(
                    msg.timestamp, applicable_time))
        else:
            # We add the applicable time to the time between localization
            # readings, and put these waypoints at that location.
            sensor_frequency = self._flags.carla_fps
            if self._flags.carla_localization_frequency != -1:
                sensor_frequency = self._flags.carla_localization_frequency
            applicable_time = self._last_highest_applicable_time + int(
                1000 / sensor_frequency)
            self._last_highest_applicable_time = applicable_time
            self._waypoints.append((applicable_time, msg))
            self._logger.debug(
                "@{}: the waypoints were adjusted by the localization "
                "frequency and will be applicable at {}".format(
                    msg.timestamp, applicable_time))

        # Delete the pose from the map.
        del self._pose_map[game_time]
コード例 #28
0
    def _calculate_metrics(self, timestamp, ground_trajectories, predictions):
        """ Calculates and logs MSD (mean squared distance), ADE (average
            displacement error), and FDE (final displacement error).

            Args:
                ground_trajectories: A dict of perfect past trajectories.
                predictions: A list of obstacle predictions.
        """
        # Vehicle metrics.
        vehicle_cnt = 0
        vehicle_msd = 0.0
        vehicle_ade = 0.0
        vehicle_fde = 0.0

        # Person metrics.
        person_cnt = 0
        person_msd = 0.0
        person_ade = 0.0
        person_fde = 0.0

        for obstacle in predictions:
            # We remove altitude from the accuracy calculation because the
            # prediction operators do not currently predict altitude.
            predicted_trajectory = [
                Vector2D(transform.location.x, transform.location.y)
                for transform in obstacle.trajectory
            ]
            ground_trajectory = [
                Vector2D(transform.location.x, transform.location.y)
                for transform in ground_trajectories[obstacle.id].trajectory
            ]
            if obstacle.label in VEHICLE_LABELS:
                vehicle_cnt += 1
            elif obstacle.label == 'person':
                person_cnt += 1
            else:
                raise ValueError('Unexpected obstacle label {}'.format(
                    obstacle.label))
            l2_distance = 0.0
            l1_distance = 0.0
            for idx in range(1, len(predicted_trajectory) + 1):
                # Calculate MSD
                l2_distance += predicted_trajectory[-idx].l2_distance(
                    ground_trajectory[-idx])
                # Calculate ADE
                l1_distance += predicted_trajectory[-idx].l1_distance(
                    ground_trajectory[-idx])
            l2_distance /= len(predicted_trajectory)
            l1_distance /= len(predicted_trajectory)
            fde = predicted_trajectory[-1].l1_distance(ground_trajectory[-1])
            if obstacle.label in VEHICLE_LABELS:
                vehicle_msd += l2_distance
                vehicle_ade += l1_distance
                vehicle_fde += fde
            elif obstacle.label == 'person':
                person_msd += l2_distance
                person_ade += l1_distance
                person_fde += fde
            else:
                raise ValueError('Unexpected obstacle label {}'.format(
                    obstacle.label))

        # Log metrics.
        sim_time = timestamp.coordinates[0]
        actor_cnt = person_cnt + vehicle_cnt
        if actor_cnt > 0:
            msd = (person_msd + vehicle_msd) / actor_cnt
            ade = (person_ade + vehicle_ade) / actor_cnt
            fde = (person_fde + vehicle_fde) / actor_cnt
            self._csv_logger.info('{},{},prediction,MSD,{:.4f}'.format(
                time_epoch_ms(), sim_time, msd))
            self._csv_logger.info('{},{},prediction,ADE,{:.4f}'.format(
                time_epoch_ms(), sim_time, ade))
            self._csv_logger.info('{},{},prediction,FDE,{:.4f}'.format(
                time_epoch_ms(), sim_time, fde))
        if person_cnt > 0:
            person_msd /= person_cnt
            person_ade /= person_cnt
            person_fde /= person_cnt
            self._logger.info('Person MSD is: {:.4f}'.format(person_msd))
            self._logger.info('Person ADE is: {:.4f}'.format(person_ade))
            self._logger.info('Person FDE is: {:.4f}'.format(person_fde))
            self._csv_logger.info('{},{},prediction,person-MSD,{:.4f}'.format(
                time_epoch_ms(), sim_time, person_msd))
            self._csv_logger.info('{},{},prediction,person-ADE,{:.4f}'.format(
                time_epoch_ms(), sim_time, person_ade))
            self._csv_logger.info('{},{},prediction,person-FDE,{:.4f}'.format(
                time_epoch_ms(), sim_time, person_fde))
        if vehicle_cnt > 0:
            vehicle_msd /= vehicle_cnt
            vehicle_ade /= vehicle_cnt
            vehicle_fde /= vehicle_cnt
            self._logger.info('Vehicle MSD is: {:.4f}'.format(vehicle_msd))
            self._logger.info('Vehicle ADE is: {:.4f}'.format(vehicle_ade))
            self._logger.info('Vehicle FDE is: {:.4f}'.format(vehicle_fde))
            self._csv_logger.info('{},{},prediction,vehicle-MSD,{:.4f}'.format(
                time_epoch_ms(), sim_time, vehicle_msd))
            self._csv_logger.info('{},{},prediction,vehicle-ADE,{:.4f}'.format(
                time_epoch_ms(), sim_time, vehicle_ade))
            self._csv_logger.info('{},{},prediction,vehicle-FDE,{:.4f}'.format(
                time_epoch_ms(), sim_time, vehicle_fde))
コード例 #29
0
    def calculate_metrics(self, ground_trajectories, predictions):
        """ Calculates and logs MSD (mean squared distance), ADE (average
            displacement error), and FDE (final displacement error).

            Args:
                ground_trajectories: A dict of perfect past trajectories.
                predictions: A list of obstacle predictions.
        """
        # Vehicle metrics.
        vehicle_cnt = 0
        vehicle_msd = 0.0
        vehicle_ade = 0.0
        vehicle_fde = 0.0

        # Person metrics.
        person_cnt = 0
        person_msd = 0.0
        person_ade = 0.0
        person_fde = 0.0

        for obstacle in predictions:
            predicted_trajectory = obstacle.trajectory
            ground_trajectory = ground_trajectories[obstacle.id].trajectory
            if obstacle.label == 'vehicle':
                vehicle_cnt += 1
            elif obstacle.label == 'person':
                person_cnt += 1
            else:
                raise ValueError('Unexpected obstacle label {}'.format(
                    obstacle.label))
            l2_distance = 0.0
            l1_distance = 0.0
            for idx in range(1, len(predicted_trajectory) + 1):
                # Calculate MSD
                l2_distance += predicted_trajectory[-idx].location.distance(
                    ground_trajectory[-idx].location)
                # Calculate ADE
                l1_distance += predicted_trajectory[-idx].location.l1_distance(
                    ground_trajectory[-idx].location)
            l2_distance /= len(predicted_trajectory)
            l1_distance /= len(predicted_trajectory)
            fde = predicted_trajectory[-1].location.l1_distance(
                ground_trajectory[-1].location)
            if obstacle.label == 'vehicle':
                vehicle_msd += l2_distance
                vehicle_ade += l1_distance
                vehicle_fde += fde
            elif obstacle.label == 'person':
                person_msd += l2_distance
                person_ade += l1_distance
                person_fde += fde
            else:
                raise ValueError('Unexpected obstacle label {}'.format(
                    obstacle.label))

        vehicle_msd /= vehicle_cnt
        vehicle_ade /= vehicle_cnt
        vehicle_fde /= vehicle_cnt
        person_msd /= person_cnt
        person_ade /= person_cnt
        person_fde /= person_cnt
        # Log metrics.
        self._logger.info('Vehicle MSD is: {}'.format(vehicle_msd))
        self._logger.info('Vehicle ADE is: {}'.format(vehicle_ade))
        self._logger.info('Vehicle FDE is: {}'.format(vehicle_fde))
        self._logger.info('Person MSD is: {}'.format(person_msd))
        self._logger.info('Person ADE is: {}'.format(person_ade))
        self._logger.info('Person FDE is: {}'.format(person_fde))

        self._csv_logger.info('{},{},{},{}'.format(time_epoch_ms(), self._name,
                                                   'vehicle-MSD', vehicle_msd))
        self._csv_logger.info('{},{},{},{}'.format(time_epoch_ms(), self._name,
                                                   'vehicle-ADE', vehicle_ade))
        self._csv_logger.info('{},{},{},{}'.format(time_epoch_ms(), self._name,
                                                   'vehicle-FDE', vehicle_fde))
        self._csv_logger.info('{},{},{},{}'.format(time_epoch_ms(), self._name,
                                                   'person-MSD', person_msd))
        self._csv_logger.info('{},{},{},{}'.format(time_epoch_ms(), self._name,
                                                   'person-ADE', person_ade))
        self._csv_logger.info('{},{},{},{}'.format(time_epoch_ms(), self._name,
                                                   'person-FDE', person_fde))