예제 #1
0
    def simple_test(self,
                    points,
                    img_metas,
                    imgs=None,
                    bev_seg_image=None,
                    rescale=False,
                    gt_bboxes_3d=None):
        """Test function without augmentaiton."""
        # segmask_maps = self.generate_mask(points, vis_voxel_size=[0.16, 0.16, 4],
        #                         vis_point_range=[0, -39.68, -3, 69.12, 39.68, 1],
        #                         boxes=gt_bboxes_3d)
        x, masks = self.extract_feat(points, img_metas)
        outs = self.bbox_head(x)
        # traced_script_module = torch.jit.trace(self.bbox_head, [x])
        # traced_script_module.save("/home/zhangxiao/code/mmdetection3d/work_dirs/save_path/pts_bbox_head.zip")

        bbox_list = self.bbox_head.get_bboxes(*outs,
                                              img_metas,
                                              rescale=rescale)
        bbox_results = [
            bbox3d2result(bboxes, scores, labels)
            for bboxes, scores, labels in bbox_list
        ]
        # bbox_list = self.bbox_head.get_bboxes(
        #     outs, img_metas, rescale=rescale)
        # bbox_results = [
        #     bbox3d2result(bboxes, scores, labels)
        #     for bboxes, scores, labels in bbox_list
        # ]
        return bbox_results
예제 #2
0
 def simple_test(self,
                 points,
                 img_metas,
                 imgs=None,
                 bev_seg_image=None,
                 rescale=False,
                 gt_bboxes_3d=None):
     """Test function without augmentaiton."""
     # segmask_maps = self.generate_mask(points, vis_voxel_size=[0.16, 0.16, 4],
     #                         vis_point_range=[0, -39.68, -3, 69.12, 39.68, 1],
     #                         boxes=gt_bboxes_3d)
     x = self.extract_feat(points, img_metas)
     outs = self.bbox_head(x)
     bbox_list = self.bbox_head.get_bboxes(*outs,
                                           img_metas,
                                           rescale=rescale)
     bbox_results = [
         bbox3d2result(bboxes, scores, labels)
         for bboxes, scores, labels in bbox_list
     ]
     # bbox_list = self.bbox_head.get_bboxes(
     #     outs, img_metas, rescale=rescale)
     # bbox_results = [
     #     bbox3d2result(bboxes, scores, labels)
     #     for bboxes, scores, labels in bbox_list
     # ]
     return bbox_results
예제 #3
0
 def simple_test_pts(self, x, img_metas, rescale=False):
     """Test function of point cloud branch."""
     outs = self.pts_bbox_head(x)
     bbox_list = self.pts_bbox_head.get_bboxes(
         *outs, img_metas, rescale=rescale)
     bbox_results = [
         bbox3d2result(bboxes, scores, labels)
         for bboxes, scores, labels in bbox_list
     ]
     return bbox_results
예제 #4
0
 def simple_test(self, points, img_metas, imgs=None, rescale=False):
     """Test function without augmentaiton."""
     x = self.extract_feat(points, img_metas)
     outs = self.bbox_head(x)
     bbox_list = self.bbox_head.get_bboxes(
         *outs, img_metas, rescale=rescale)
     bbox_results = [
         bbox3d2result(bboxes, scores, labels)
         for bboxes, scores, labels in bbox_list
     ]
     return bbox_results
예제 #5
0
 def simple_test(self, points, img_metas, imgs=None, rescale=False):
     """Test function without augmentaiton."""
     x = self.extract_feat(points, img_metas)
     pred_dict = self.bbox_head(x)
     bbox_list = self.bbox_head.get_bboxes(pred_dict, img_metas)
     # print(len(bbox_list))
     # print(bbox_list[0][3])
     # print(type(bbox_list[0][0]))
     bbox_results = [
         bbox3d2result(bboxes, scores, labels)
         for bboxes, scores, labels, img_meta in bbox_list
     ]
     return bbox_results[0]  #list of dicts
예제 #6
0
 def simple_test_pts(self, x, img_metas, rescale=False):
     """Test function of point cloud branch."""
     outs = self.pts_bbox_head(x)
     # traced_script_module = torch.jit.trace(self.pts_bbox_head, [x])
     # traced_script_module.save("/home/zhangxiao/code/mmdetection3d/work_dirs/save_path/pts_bbox_head.zip")
     bbox_list = self.pts_bbox_head.get_bboxes(*outs,
                                               img_metas,
                                               rescale=rescale)
     bbox_results = [
         bbox3d2result(bboxes, scores, labels)
         for bboxes, scores, labels in bbox_list
     ]
     return bbox_results
예제 #7
0
 def simple_test_pts(self, x, img_metas, rescale=False):
     """Test function of point cloud branch."""
     outs = self.pts_bbox_head(x)
     bbox_list = self.pts_bbox_head.get_bboxes(*outs,
                                               img_metas,
                                               rescale=rescale)
     bbox_results = [
         bbox3d2result(bboxes, scores, labels)
         for bboxes, scores, labels in bbox_list
     ]
     # bbox_results[0].keys=(boxes_3d, scores_3d, labels_3d)
     # values on cpu
     # print(len(bbox_results))  # batch_size
     return bbox_results
예제 #8
0
 def simple_test_pts(self, pts_feats, img_metas):
     """Test function of point cloud branch."""
     outs = self.pts_bbox_head(pts_feats)
     bbox_list = self.pts_bbox_head.get_bboxes(*outs, img_metas)
     # bbox_list: list of tuple; len(bbox_list) = test_batch_size
     # bbox_list[0]: (bboxes, scores, labels)
     # bboxes: LiDARInstance3DBoxes of tensor (N, 9); scores: tensor (N, ); labels: tensor (N, )
     bbox_results = [
         bbox3d2result(bboxes, scores, labels)
         for bboxes, scores, labels in bbox_list
     ]
     # bbox_results: [dict(boxes_3d=bboxes, scores_3d=scores, labels_3d=labels)] on cpu
     # bboxes: LiDARInstance3DBoxes on cpu
     # scores: tensor (N, ); labels: tensor(N, )
     return bbox_results
예제 #9
0
    def simple_test(self, img, img_metas):
        """Test without augmentations.

        Args:
            img (torch.Tensor): Input images of shape (N, C_in, H, W).
            img_metas (list): Image metas.

        Returns:
            list[dict]: Predicted 3d boxes.
        """
        x = self.extract_feat(img, img_metas)
        x = self.bbox_head(x)
        bbox_list = self.bbox_head.get_bboxes(*x, img_metas)
        bbox_results = [
            bbox3d2result(det_bboxes, det_scores, det_labels)
            for det_bboxes, det_scores, det_labels in bbox_list
        ]
        return bbox_results
예제 #10
0
    def simple_test(self, img, img_metas, rescale=False):
        """Test function without test time augmentation.

        Args:
            imgs (list[torch.Tensor]): List of multiple images
            img_metas (list[dict]): List of image information.
            rescale (bool, optional): Whether to rescale the results.
                Defaults to False.

        Returns:
            list[list[np.ndarray]]: BBox results of each image and classes.
                The outer list corresponds to each image. The inner list
                corresponds to each class.
        """
        x = self.extract_feat(img)
        outs = self.bbox_head(x)
        bbox_outputs = self.bbox_head.get_bboxes(*outs,
                                                 img_metas,
                                                 rescale=rescale)

        if self.bbox_head.pred_bbox2d:
            from mmdet.core import bbox2result
            bbox2d_img = [
                bbox2result(bboxes2d, labels, self.bbox_head.num_classes)
                for bboxes, scores, labels, attrs, bboxes2d in bbox_outputs
            ]
            bbox_outputs = [bbox_outputs[0][:-1]]

        bbox_img = [
            bbox3d2result(bboxes, scores, labels, attrs)
            for bboxes, scores, labels, attrs in bbox_outputs
        ]

        bbox_list = [dict() for i in range(len(img_metas))]
        for result_dict, img_bbox in zip(bbox_list, bbox_img):
            result_dict['img_bbox'] = img_bbox
        if self.bbox_head.pred_bbox2d:
            for result_dict, img_bbox2d in zip(bbox_list, bbox2d_img):
                result_dict['img_bbox2d'] = img_bbox2d
        return bbox_list
예제 #11
0
    def simple_test(self, points, img_metas, imgs=None, rescale=False):
        """Forward of testing.

        Args:
            points (list[torch.Tensor]): Points of each sample.
            img_metas (list): Image metas.
            rescale (bool): Whether to rescale results.

        Returns:
            list: Predicted 3d boxes.
        """
        points_cat = torch.stack(points)

        x = self.extract_feat(points_cat)
        bbox_preds = self.bbox_head(x, self.test_cfg.sample_mod)
        bbox_list = self.bbox_head.get_bboxes(
            points_cat, bbox_preds, img_metas, rescale=rescale)
        bbox_results = [
            bbox3d2result(bboxes, scores, labels)
            for bboxes, scores, labels in bbox_list
        ]
        return bbox_results
예제 #12
0
    def simple_test(self, points, img_metas, imgs=None, rescale=False):
        """Test function without augmentaiton."""
        x, ref_map = self.extract_feat(points, img_metas)

        img_feat = self.img_encoder(imgs)
        img_feat = self.bbox_head([img_feat])

        outs = self.bbox_head(x)
        #从反射率地图中求cls score
        ref_cls_score = self.ref_to_cls(ref_map)

        for i in range(len(outs)):
            outs[i][0] += img_feat[i][0]
        #用反射率加权
        outs[0][0] *= (ref_cls_score * 2 + 1)

        bbox_list = self.bbox_head.get_bboxes(*outs,
                                              img_metas,
                                              rescale=rescale)
        bbox_results = [
            bbox3d2result(bboxes, scores, labels)
            for bboxes, scores, labels in bbox_list
        ]
        return bbox_results
예제 #13
0
    def _format_bbox(self, results, jsonfile_prefix=None):
        """Convert the results to the standard format.

        Args:
            results (list[dict]): Testing results of the dataset.
            jsonfile_prefix (str): The prefix of the output jsonfile.
                You can specify the output directory/filename by
                modifying the jsonfile_prefix. Default: None.

        Returns:
            str: Path of the output json file.
        """
        nusc_annos = {}
        mapped_class_names = self.CLASSES

        print('Start to convert detection format...')

        CAM_NUM = 6

        for sample_id, det in enumerate(mmcv.track_iter_progress(results)):

            if sample_id % CAM_NUM == 0:
                boxes_per_frame = []
                attrs_per_frame = []

            # need to merge results from images of the same sample
            annos = []
            boxes, attrs = output_to_nusc_box(det)
            sample_token = self.data_infos[sample_id]['token']
            boxes, attrs = cam_nusc_box_to_global(self.data_infos[sample_id],
                                                  boxes, attrs,
                                                  mapped_class_names,
                                                  self.eval_detection_configs,
                                                  self.eval_version)

            boxes_per_frame.extend(boxes)
            attrs_per_frame.extend(attrs)
            # Remove redundant predictions caused by overlap of images
            if (sample_id + 1) % CAM_NUM != 0:
                continue
            boxes = global_nusc_box_to_cam(
                self.data_infos[sample_id + 1 - CAM_NUM], boxes_per_frame,
                mapped_class_names, self.eval_detection_configs,
                self.eval_version)
            cam_boxes3d, scores, labels = nusc_box_to_cam_box3d(boxes)
            # box nms 3d over 6 images in a frame
            # TODO: move this global setting into config
            nms_cfg = dict(use_rotate_nms=True,
                           nms_across_levels=False,
                           nms_pre=4096,
                           nms_thr=0.05,
                           score_thr=0.01,
                           min_bbox_size=0,
                           max_per_frame=500)
            from mmcv import Config
            nms_cfg = Config(nms_cfg)
            cam_boxes3d_for_nms = xywhr2xyxyr(cam_boxes3d.bev)
            boxes3d = cam_boxes3d.tensor
            # generate attr scores from attr labels
            attrs = labels.new_tensor([attr for attr in attrs_per_frame])
            boxes3d, scores, labels, attrs = box3d_multiclass_nms(
                boxes3d,
                cam_boxes3d_for_nms,
                scores,
                nms_cfg.score_thr,
                nms_cfg.max_per_frame,
                nms_cfg,
                mlvl_attr_scores=attrs)
            cam_boxes3d = CameraInstance3DBoxes(boxes3d, box_dim=9)
            det = bbox3d2result(cam_boxes3d, scores, labels, attrs)
            boxes, attrs = output_to_nusc_box(det)
            boxes, attrs = cam_nusc_box_to_global(
                self.data_infos[sample_id + 1 - CAM_NUM], boxes, attrs,
                mapped_class_names, self.eval_detection_configs,
                self.eval_version)

            for i, box in enumerate(boxes):
                name = mapped_class_names[box.label]
                attr = self.get_attr_name(attrs[i], name)
                nusc_anno = dict(sample_token=sample_token,
                                 translation=box.center.tolist(),
                                 size=box.wlh.tolist(),
                                 rotation=box.orientation.elements.tolist(),
                                 velocity=box.velocity[:2].tolist(),
                                 detection_name=name,
                                 detection_score=box.score,
                                 attribute_name=attr)
                annos.append(nusc_anno)
            # other views results of the same frame should be concatenated
            if sample_token in nusc_annos:
                nusc_annos[sample_token].extend(annos)
            else:
                nusc_annos[sample_token] = annos

        nusc_submissions = {
            'meta': self.modality,
            'results': nusc_annos,
        }

        mmcv.mkdir_or_exist(jsonfile_prefix)
        res_path = osp.join(jsonfile_prefix, 'results_nusc.json')
        print('Results writes to', res_path)
        mmcv.dump(nusc_submissions, res_path)
        return res_path
예제 #14
0
    def simple_test(self,
                    points=None,
                    img_metas=None,
                    img=None,
                    bboxes_2d=None,
                    rescale=False,
                    **kwargs):
        """Test without augmentation, stage 2.

        Args:
            points (list[torch.Tensor], optional): Elements in the list
                should have a shape NxC, the list indicates all point-clouds
                in the batch. Defaults to None.
            img_metas (list[dict], optional): List indicates
                images in a batch. Defaults to None.
            img (torch.Tensor, optional): Should have a shape NxCxHxW,
                which contains all images in the batch. Defaults to None.
            bboxes_2d (list[torch.Tensor], optional):
                Provided 2d bboxes, not supported yet. Defaults to None.
            rescale (bool, optional): Whether or not rescale bboxes.
                Defaults to False.

        Returns:
            list[dict]: Predicted 3d boxes.
        """
        bboxes_2d = self.extract_bboxes_2d(
            img, img_metas, train=False, bboxes_2d=bboxes_2d, **kwargs)

        points = torch.stack(points)
        seeds_3d, seed_3d_features, seed_indices = \
            self.extract_pts_feat(points)

        img_features, masks = self.fusion_layer(img, bboxes_2d, seeds_3d,
                                                img_metas)

        inds = sample_valid_seeds(masks, self.num_sampled_seed)
        batch_size, img_feat_size = img_features.shape[:2]
        pts_feat_size = seed_3d_features.shape[1]
        inds_img = inds.view(batch_size, 1, -1).expand(-1, img_feat_size, -1)
        img_features = img_features.gather(-1, inds_img)
        inds = inds % inds.shape[1]
        inds_seed_xyz = inds.view(batch_size, -1, 1).expand(-1, -1, 3)
        seeds_3d = seeds_3d.gather(1, inds_seed_xyz)
        inds_seed_feats = inds.view(batch_size, 1,
                                    -1).expand(-1, pts_feat_size, -1)
        seed_3d_features = seed_3d_features.gather(-1, inds_seed_feats)
        seed_indices = seed_indices.gather(1, inds)

        img_features = self.img_mlp(img_features)

        fused_features = torch.cat([seed_3d_features, img_features], dim=1)

        feat_dict = dict(
            seed_points=seeds_3d,
            seed_features=fused_features,
            seed_indices=seed_indices)
        bbox_preds = self.pts_bbox_head_joint(feat_dict,
                                              self.test_cfg.pts.sample_mod)
        bbox_list = self.pts_bbox_head_joint.get_bboxes(
            points, bbox_preds, img_metas, rescale=rescale)
        bbox_results = [
            bbox3d2result(bboxes, scores, labels)
            for bboxes, scores, labels in bbox_list
        ]
        return bbox_results
예제 #15
0
    def aug_test(self, imgs, img_metas, rescale=False):
        """Test function with test time augmentation."""
        feats = self.extract_feats(imgs)

        # only support aug_test for one sample
        outs_list = [self.bbox_head(x) for x in feats]
        for i, img_meta in enumerate(img_metas):
            if img_meta[0]['pcd_horizontal_flip']:
                for j in range(len(outs_list[i])):  # for each prediction
                    if outs_list[i][j][0] is None:
                        continue
                    for k in range(len(outs_list[i][j])):
                        # every stride of featmap
                        outs_list[i][j][k] = torch.flip(outs_list[i][j][k],
                                                        dims=[3])
                reg = outs_list[i][1]
                for reg_feat in reg:
                    # offset_x
                    reg_feat[:, 0, :, :] = 1 - reg_feat[:, 0, :, :]
                    # velo_x
                    if self.bbox_head.pred_velo:
                        reg_feat[:, 7, :, :] = -reg_feat[:, 7, :, :]
                    # rotation
                    reg_feat[:, 6, :, :] = -reg_feat[:, 6, :, :] + np.pi

        merged_outs = []
        for i in range(len(outs_list[0])):  # for each prediction
            merged_feats = []
            for j in range(len(outs_list[0][i])):
                if outs_list[0][i][0] is None:
                    merged_feats.append(None)
                    continue
                # for each stride of featmap
                avg_feats = torch.mean(torch.cat([x[i][j] for x in outs_list]),
                                       dim=0,
                                       keepdim=True)
                if i == 1:  # regression predictions
                    # rot/velo/2d det keeps the original
                    avg_feats[:, 6:, :, :] = \
                        outs_list[0][i][j][:, 6:, :, :]
                if i == 2:
                    # dir_cls keeps the original
                    avg_feats = outs_list[0][i][j]
                merged_feats.append(avg_feats)
            merged_outs.append(merged_feats)
        merged_outs = tuple(merged_outs)

        bbox_outputs = self.bbox_head.get_bboxes(*merged_outs,
                                                 img_metas[0],
                                                 rescale=rescale)
        if self.bbox_head.pred_bbox2d:
            from mmdet.core import bbox2result
            bbox2d_img = [
                bbox2result(bboxes2d, labels, self.bbox_head.num_classes)
                for bboxes, scores, labels, attrs, bboxes2d in bbox_outputs
            ]
            bbox_outputs = [bbox_outputs[0][:-1]]

        bbox_img = [
            bbox3d2result(bboxes, scores, labels, attrs)
            for bboxes, scores, labels, attrs in bbox_outputs
        ]

        bbox_list = dict()
        bbox_list.update(img_bbox=bbox_img[0])
        if self.bbox_head.pred_bbox2d:
            bbox_list.update(img_bbox2d=bbox2d_img[0])

        return [bbox_list]
예제 #16
0
    def simple_test_pts(self,
                        pts_feats,
                        img_metas,
                        rescale=False,
                        voxel_feats=None,
                        voxel_coors=None,
                        point_feats=None,
                        point_coors=None,
                        pts_res_dict=None,
                        ori_points=None):
        """Test function of point cloud branch."""

        pred_v_semantic_cls = self.pts_bbox_head(pts_feats,
                                                 mode='semantic',
                                                 voxel_feat=voxel_feats,
                                                 voxel_coors=voxel_coors)
        pred_v_sem_labels = []

        for temp in pred_v_semantic_cls:
            pred_v_sem_labels.append(torch.argmax(temp, dim=1))

        point_sem_labels = self.pts_bbox_head.get_point_label_from_voxel(
            pred_v_sem_labels, voxel_coors, point_coors)

        point_ins_labels, bbox_results = [None for _ in point_sem_labels
                                          ], [None for _ in point_sem_labels]
        if self.need_instance:
            instance_fm = pts_res_dict["low_reso_fm"]

            voxel_sem_feats = None
            if self.instance_head.use_sem_feats:
                pred_v_semantic_cls = torch.cat(pred_v_semantic_cls, dim=0)
                voxel_sem_feats = pred_v_semantic_cls.detach()[:, 1:]
                voxel_sem_feats = F.softmax(voxel_sem_feats, dim=1)

            # filter: reserve thing class
            temp_v_sem_labels = torch.cat(pred_v_sem_labels, dim=0)
            thing2point_labels = torch.tensor(
                self.instance_head.thing2point_labels).type_as(
                    temp_v_sem_labels)
            pred_v_thing_mask = (
                temp_v_sem_labels[:,
                                  None] == thing2point_labels[None, :]).any(1)

            pts_res_dict['ins_voxel_mean'] = pts_res_dict['ins_voxel_mean'][
                pred_v_thing_mask].view(-1,
                                        pts_res_dict['ins_voxel_mean'].size(1))
            pts_res_dict['ins_voxel_coors'] = pts_res_dict['ins_voxel_coors'][
                pred_v_thing_mask].view(
                    -1, pts_res_dict['ins_voxel_coors'].size(1))

            v_offset_pred_list, p_offset_pred_list, hm_preds, bbox_preds = \
                self.instance_head(instance_fm,
                    voxel_feat=voxel_feats[pred_v_thing_mask].view(-1, voxel_feats.size(1)),
                    voxel_coors=voxel_coors[pred_v_thing_mask].view(-1, voxel_coors.size(1)),
                    voxel_sem_feats=voxel_sem_feats[pred_v_thing_mask].view(-1, voxel_sem_feats.size(1)))

            pred_bboxes_list = [None for _ in point_sem_labels]
            if self.instance_head.need_bbox:
                bbox_preds_dict = self.instance_head.format_bbox_preds(
                    hm_preds, bbox_preds)
                pred_bboxes_list = self.instance_head.get_bboxes(
                    bbox_preds_dict, img_metas)
                bbox_results = [
                    bbox3d2result(bboxes, scores, labels)
                    for bboxes, scores, labels in pred_bboxes_list
                ]
            else:
                bbox_results = [None for _ in point_sem_labels]

            pred_v_ins_labels, pred_v_sem_labels, debug_v_shift_xyz_list, debug_valid_v_mask_list = \
                self.instance_head.get_ins_label(
                    v_offset_pred_list[-1],
                    pts_res_dict['ins_voxel_mean'],
                    voxel_coors,
                    pred_bboxes_list,
                    pred_v_sem_labels,
                    pred_v_thing_mask)

            point_ins_labels = self.pts_bbox_head.get_point_label_from_voxel(
                pred_v_ins_labels, voxel_coors, point_coors)
            if self.instance_head.test_cfg['refine_sem_labels']:
                point_sem_labels = self.pts_bbox_head.get_point_label_from_voxel(
                    pred_v_sem_labels, voxel_coors, point_coors)

            # debug
            need_debug = False
            if need_debug:
                debug_shift_point_xyz = self.pts_bbox_head.get_point_label_from_voxel(
                    debug_v_shift_xyz_list, voxel_coors, point_coors)
                debug_shift_point_mask = self.pts_bbox_head.get_point_label_from_voxel(
                    debug_valid_v_mask_list, voxel_coors, point_coors)
                debug_shift_point_xyz = debug_shift_point_xyz[0][
                    debug_shift_point_mask[0]]
                debug_shift_point_sem_label = point_sem_labels[0][
                    debug_shift_point_mask[0]]
                debug_shift_point_ins_label = point_ins_labels[0][
                    debug_shift_point_mask[0]]

                debug_pts = torch.cat(
                    [debug_shift_point_xyz, ori_points[0][:, :3]], dim=0)
                debug_pts = torch.cat([debug_pts, debug_pts[:, :2]], dim=-1)

                debug_sem_label = torch.cat([
                    debug_shift_point_sem_label,
                    debug_shift_point_sem_label.new_zeros(
                        ori_points[0].size(0))
                ])
                debug_ins_label = torch.cat([
                    debug_shift_point_ins_label,
                    debug_shift_point_ins_label.new_zeros(
                        ori_points[0].size(0))
                ])
                debug_label = torch.cat(
                    [debug_sem_label.view(-1, 1),
                     debug_ins_label.view(-1, 1)],
                    dim=-1)

                debug_pts = debug_pts.cpu().numpy()

                file_name = str(self.debug_global_id).zfill(6)

                bin_file_path = '/home/radmin/jk/code/seg/SelectiveSeg/work_dirs/debug/lidar'
                debug_pts.tofile(bin_file_path + '/' + file_name + '.bin')

                debug_label = debug_label.cpu().numpy()
                debug_label = np.array(debug_label, dtype=np.uint8)

                label_file_path = '/home/radmin/jk/code/seg/SelectiveSeg/work_dirs/debug/label'
                debug_label.tofile(label_file_path + '/' + file_name + '.bin')

                self.debug_global_id = self.debug_global_id + 1
                self.instance_head.debug_bbox_file_id = self.debug_global_id
        '''
        outs = self.pts_bbox_head(x)
        bbox_list = self.pts_bbox_head.get_bboxes(
            outs, img_metas, rescale=rescale)
        bbox_results = [
            bbox3d2result(bboxes, scores, labels)
            for bboxes, scores, labels in bbox_list
        ]
        '''
        return point_sem_labels, point_ins_labels, bbox_results