def test_points_in_boxes_gpu():
    if not torch.cuda.is_available():
        pytest.skip('test requires GPU and torch+cuda')
    boxes = torch.tensor(
        [[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3]],
         [[-10.0, 23.0, 16.0, 10, 20, 20, 0.5]]],
        dtype=torch.float32).cuda(
        )  # boxes (b, t, 7) with bottom center in lidar coordinate
    pts = torch.tensor(
        [[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6],
          [0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3],
          [4.7, 3.5, -12.2]],
         [[3.8, 7.6, -2], [-10.6, -12.9, -20], [-16, -18, 9], [-21.3, -52, -5],
          [0, 0, 0], [6, 7, 8], [-2, -3, -4], [6, 4, 9]]],
        dtype=torch.float32).cuda()  # points (b, m, 3) in lidar coordinate

    point_indices = points_in_boxes_gpu(points=pts, boxes=boxes)
    expected_point_indices = torch.tensor(
        [[0, 0, 0, 0, 0, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1, -1]],
        dtype=torch.int32).cuda()
    assert point_indices.shape == torch.Size([2, 8])
    assert (point_indices == expected_point_indices).all()

    if torch.cuda.device_count() > 1:
        pts = pts.to('cuda:1')
        boxes = boxes.to('cuda:1')
        expected_point_indices = expected_point_indices.to('cuda:1')
        point_indices = points_in_boxes_gpu(points=pts, boxes=boxes)
        assert point_indices.shape == torch.Size([2, 8])
        assert (point_indices == expected_point_indices).all()
def calc(model, data_loader):
    model.eval()
    dataset = data_loader.dataset
    prog_bar = mmcv.ProgressBar(len(dataset))
    seg_eval = SegEvaluator(class_names=dataset.SEG_CLASSES)
    det_eval = SegEvaluator(class_names=dataset.SEG_CLASSES)
    print('\nStart Test Loop')
    # print('batch_size:', data_loader.batch_size)  # 1
    for idx, data in enumerate(data_loader):
        # print(type(data['img'][0]))  # DataContainter
        with torch.no_grad():
            seg_res, box_res = model(return_loss=False, rescale=True, **data)
        # len(box_res) == batch_size == 1
        # box_res: [dict('pts_bbox'=dict(boxes_3d=bboxes, scores_3d=scores, labels_3d=labels))]

        # handle seg
        seg_label = data['seg_label'][0].data[0]  # list of tensor
        seg_pts_indices = data['seg_pts_indices'][0].data[0]  # list of tensor
        seg_points = data['seg_points'][0].data[0]
        seg_pred = seg_res.argmax(1).cpu().numpy()
        pred_list = []
        gt_list = []
        left_idx = 0
        for i in range(len(seg_label)):
            # num_points = len(seg_pts_indices[i])
            assert len(seg_label[i]) == len(seg_pts_indices[i])
            num_points = len(seg_label[i])
            right_idx = left_idx + num_points
            pred_list.append(seg_pred[left_idx:right_idx])
            gt_list.append(seg_label[i].numpy())
            left_idx = right_idx
        seg_eval.batch_update(pred_list, gt_list)

        # handle det
        dic = box_res[0]['pts_bbox']
        tensor_boxes = dic['boxes_3d'].tensor[:, :7].cuda()
        labels = dic['labels_3d']

        num_seg_pts = len(seg_points[0])
        num_pred_boxes = len(tensor_boxes)
        fake_labels = torch.tensor([4] * num_seg_pts)
        box_idx = points_in_boxes_gpu(seg_points[0].cuda().unsqueeze(0),
                                      tensor_boxes.unsqueeze(0)).squeeze(0)
        for i in range(num_pred_boxes):
            mask = box_idx == i  # select points in i_th box
            fake_labels[mask] = labels[i]
        det_eval.update(fake_labels.numpy(), seg_label[0])

        # progress bar
        batch_size = len(box_res)
        for _ in range(batch_size):
            prog_bar.update()

    print(seg_eval.print_table())
    print('overall_acc:', seg_eval.overall_acc)
    print('overall_iou:', seg_eval.overall_iou)
    print(det_eval.print_table())
    print('overall_acc:', det_eval.overall_acc)
    print('overall_iou:', det_eval.overall_iou)
Пример #3
0
    def points_in_boxes(self, points):
        """Find the box which the points are in.

        Args:
            points (torch.Tensor): Points in shape (N, 3).

        Returns:
            torch.Tensor: The index of box where each point are in.
        """
        box_idx = points_in_boxes_gpu(
            points.unsqueeze(0),
            self.tensor.unsqueeze(0).to(points.device)).squeeze(0)
        return box_idx
Пример #4
0
    def forward_target(
            self,
            img=None,
            seg_points=None,
            seg_pts_indices=None,
            seg_label=None,  # just for debug
            points=None,
            pts_indices=None,
            img_metas=None,
            **kwargs):
        '''

        Args:
            img: (4, 3, 225, 400)
            seg_points: list of tensor (N, 4); len == batch_size
            seg_pts_indices: list of tensor (N, 2); len == batch_size
            seg_label: list of tensor (N, ); len == batch_size
            points: list of tensor (M, 4); len == batch_size
            pts_indices: list of tensor (M, 2); len == batch_size
            img_metas:

        Returns: losses

        '''
        # no aug on target; if aug, pay attention to seg_points
        img_feats, pts_feats = self.extract_feat(points, pts_indices, img,
                                                 img_metas)
        seg_logits = self.img_seg_head(img_feats=img_feats,
                                       seg_pts=seg_points,
                                       seg_pts_indices=seg_pts_indices)
        # seg_logits.shape=(num_seg_pts_batch, num_classes=4+1)

        outs = self.pts_bbox_head(pts_feats)
        bbox_list = self.pts_bbox_head.get_bboxes(*outs, img_metas)
        # bbox_list: list of tuple; len(bbox_list) = target_batch_size
        # bbox_list[i]: (bboxes, scores, labels)
        # bboxes: LiDARInstance3DBoxes of tensor (num_pred, 9); scores: tensor (num_pred, ); labels: tensor (num_pred, )

        fake_labels = []
        for batch_id, (bboxes, scores, labels) in enumerate(bbox_list):
            seg_points_i = seg_points[batch_id]
            num_seg_pts = len(seg_points_i)
            num_pred_boxes = len(labels)
            tensor_boxes = bboxes.tensor[:, :
                                         7]  # len(tensor_boxes) == len(scores) == len(labels) == num_pred_boxes

            fake_labels_i = torch.tensor([self.img_seg_head.num_classes - 1] *
                                         num_seg_pts).to(seg_points[0].device)
            # fake_labels_i: tensor (num_seg_pts, ); filled with num_classes-1 (background == 4)
            box_idx = points_in_boxes_gpu(seg_points_i.unsqueeze(0),
                                          tensor_boxes.unsqueeze(0)).squeeze(0)
            # box_idx: tensor (num_seg_pts, ); box_idx[i]==-1: not in any bounding box; box_idx[i]=k>=0: in k_th box
            for i in range(num_pred_boxes):
                mask = box_idx == i  # select points in i_th box
                fake_labels_i[mask] = labels[i]

            fake_labels.append(fake_labels_i)

        target_seg_loss = self.img_seg_head.loss(seg_logits,
                                                 fake_labels)  # cross_entropy
        return target_seg_loss
Пример #5
0
for idx in range(len(dataset)):
    data = dataset[idx]
    points = data['points'].data[:, :3]
    seg_points = data['seg_points'].data[:, :3]  # (N, 4) -> (N, 3)
    seg_labels = data['seg_label'].data  # (N, ) 
    gt_bboxes_3d = data['gt_bboxes_3d'].data  # tensor=(M, 9)
    gt_labels_3d = data['gt_labels_3d'].data  # (M, )


    # LiDARInstance3DBoxes points_in_boxes
    tensor_boxes = gt_bboxes_3d.tensor.cuda()
    pts1 = seg_points.cuda()
    start = time.time()
    tensor_boxes = tensor_boxes[:, :7]
    box_idx = points_in_boxes_gpu(pts1.unsqueeze(0), tensor_boxes.unsqueeze(0)).squeeze(0)
    # boxes = LiDARInstance3DBoxes(gt_bboxes_3d.tensor[:, :7])
    # box_idx = boxes.points_in_boxes(seg_points[:, :3].cuda())

    # 1.
    # fake_labels = torch.tensor([num_classes-1] * len(seg_labels))
    # for i in range(len(box_idx)):
    #     if box_idx[i] != -1:
    #         fake_labels[i] = gt_labels_3d[box_idx[i]]

    # 2.
    # fake_labels = torch.tensor(list(map(lambda x: gt_labels_3d[x] if x >= 0 else num_classes-1, box_idx)))

    # 3.
    fake_labels = torch.tensor([num_classes-1] * len(seg_labels))
    mask = box_idx != -1