Exemplo n.º 1
0
    def _get_anns(self, ann_tokens, ref_pose_record):
        """
        Get all annotations for a given sample
        :param ann_tokens (list): list of annotation tokens
        :param cam (dict): ca
        """
        ## TODO: add filtering based on num_radar/num_lidar points here
        ## TODO: for 'scene' samples, ref_pose_record stuff needs attention here

        annotations = []
        for i, token in enumerate(ann_tokens):
            ann = self.get('sample_annotation', token)
            this_ann = {}
            try:
                this_ann['category'] = self.cfg.CATEGORIES[
                    ann['category_name']]
            except:
                continue
            this_ann['category_id'] = self.cfg.CAT_ID[this_ann['category']]
            this_ann['num_lidar_pts'] = ann['num_lidar_pts']
            this_ann['num_radar_pts'] = ann['num_radar_pts']

            ## Create Box object (boxes are in global coordinates)
            box = Box(ann['translation'],
                      ann['size'],
                      Quaternion(ann['rotation']),
                      name=this_ann['category'],
                      token=ann['token'])

            ## Get distance to vehicle
            box_dist = nsutils.get_box_dist(box, ref_pose_record)
            this_ann['distance'] = box_dist
            if self.cfg.MAX_BOX_DIST:
                if box_dist > abs(self.cfg.MAX_BOX_DIST):
                    continue

            ## Take to the vehicle coordinate system
            box = nsutils.global_to_vehicle(box, ref_pose_record)

            ## Calculate box velocity
            if self.cfg.BOX_VELOCITY:
                box.velocity = self.box_velocity(box.token)

            this_ann['box_3d'] = box
            annotations.append(this_ann)
        return annotations
Exemplo n.º 2
0
    def get_boxes(self,
                  token: str,
                  filter_classes: List[str] = None,
                  max_dist: float = None) -> List[Box]:
        """
        Load up all the boxes associated with a sample.
        Boxes are in nuScenes lidar frame.
        :param token: KittiDB unique id.
        :param filter_classes: List of Kitti classes to use or None to use all.
        :param max_dist: Maximum distance in m to still draw a box.
        :return: Boxes in nuScenes lidar reference frame.
        """
        # Get transforms for this sample
        transforms = self.get_transforms(token, root=self.root)

        boxes = []
        if token.startswith('test_'):
            # No boxes to return for the test set.
            return boxes

        with open(KittiDB.get_filepath(token, 'label_2', root=self.root),
                  'r') as f:
            for line in f:
                # Parse this line into box information.
                parsed_line = self.parse_label_line(line)

                if parsed_line['name'] in {'DontCare', 'Misc'}:
                    continue

                center = parsed_line['xyz_camera']
                wlh = parsed_line['wlh']
                yaw_camera = parsed_line['yaw_camera']
                name = parsed_line['name']
                score = parsed_line['score']

                # Optional: Filter classes.
                if filter_classes is not None and name not in filter_classes:
                    continue

                # The Box class coord system is oriented the same way as as KITTI LIDAR: x forward, y left, z up.
                # For orientation confer: http://www.cvlibs.net/datasets/kitti/setup.php.

                # 1: Create box in Box coordinate system with center at origin.
                # The second quaternion in yaw_box transforms the coordinate frame from the object frame
                # to KITTI camera frame. The equivalent cannot be naively done afterwards, as it's a rotation
                # around the local object coordinate frame, rather than the camera frame.
                quat_box = Quaternion(axis=(0, 1, 0),
                                      angle=yaw_camera) * Quaternion(
                                          axis=(1, 0, 0), angle=np.pi / 2)
                box = Box([0.0, 0.0, 0.0], wlh, quat_box, name=name)

                # 2: Translate: KITTI defines the box center as the bottom center of the vehicle. We use true center,
                # so we need to add half height in negative y direction, (since y points downwards), to adjust. The
                # center is already given in camera coord system.
                box.translate(center + np.array([0, -wlh[2] / 2, 0]))

                # 3: Transform to KITTI LIDAR coord system. First transform from rectified camera to camera, then
                # camera to KITTI lidar.
                box.rotate(Quaternion(matrix=transforms['r0_rect']).inverse)
                box.translate(-transforms['velo_to_cam']['T'])
                box.rotate(
                    Quaternion(matrix=transforms['velo_to_cam']['R']).inverse)

                # 4: Transform to nuScenes LIDAR coord system.
                box.rotate(self.kitti_to_nu_lidar)

                # Set score or NaN.
                box.score = score

                # Set dummy velocity.
                box.velocity = np.array((0.0, 0.0, 0.0))

                # Optional: Filter by max_dist
                if max_dist is not None:
                    dist = np.sqrt(np.sum(box.center[:2]**2))
                    if dist > max_dist:
                        continue

                boxes.append(box)

        return boxes