Exemple #1
0
def test_indoor_sample():
    np.random.seed(0)
    scannet_sample_points = PointSample(5)
    scannet_results = dict()
    scannet_points = np.array([[1.0719866, -0.7870435, 0.8408122, 0.9196809],
                               [1.103661, 0.81065744, 2.6616862, 2.7405548],
                               [1.0276475, 1.5061463, 2.6174362, 2.6963048],
                               [-0.9709588, 0.6750515, 0.93901765, 1.0178864],
                               [1.0578915, 1.1693821, 0.87503505, 0.95390373],
                               [0.05560996, -1.5688863, 1.2440368, 1.3229055],
                               [-0.15731563, -1.7735453, 2.7535574, 2.832426],
                               [1.1188195, -0.99211365, 2.5551798, 2.6340485],
                               [-0.9186557, -1.7041215, 2.0562649, 2.1351335],
                               [-1.0128691, -1.3394243, 0.040936, 0.1198047]])
    scannet_results['points'] = DepthPoints(scannet_points,
                                            points_dim=4,
                                            attribute_dims=dict(height=3))
    scannet_pts_instance_mask = np.array(
        [15, 12, 11, 38, 0, 18, 17, 12, 17, 0])
    scannet_results['pts_instance_mask'] = scannet_pts_instance_mask
    scannet_pts_semantic_mask = np.array([38, 1, 1, 40, 0, 40, 1, 1, 1, 0])
    scannet_results['pts_semantic_mask'] = scannet_pts_semantic_mask
    scannet_results = scannet_sample_points(scannet_results)
    scannet_points_result = scannet_results['points'].tensor.numpy()
    scannet_instance_labels_result = scannet_results['pts_instance_mask']
    scannet_semantic_labels_result = scannet_results['pts_semantic_mask']
    scannet_choices = np.array([2, 8, 4, 9, 1])
    assert np.allclose(scannet_points[scannet_choices], scannet_points_result)
    assert np.all(scannet_pts_instance_mask[scannet_choices] ==
                  scannet_instance_labels_result)
    assert np.all(scannet_pts_semantic_mask[scannet_choices] ==
                  scannet_semantic_labels_result)

    np.random.seed(0)
    sunrgbd_sample_points = PointSample(5)
    sunrgbd_results = dict()
    sunrgbd_point_cloud = np.array(
        [[-1.8135729e-01, 1.4695230e+00, -1.2780589e+00, 7.8938007e-03],
         [1.2581362e-03, 2.0561588e+00, -1.0341064e+00, 2.5184631e-01],
         [6.8236995e-01, 3.3611867e+00, -9.2599887e-01, 3.5995382e-01],
         [-2.9432583e-01, 1.8714852e+00, -9.0929651e-01, 3.7665617e-01],
         [-0.5024875, 1.8032674, -1.1403012, 0.14565146],
         [-0.520559, 1.6324949, -0.9896099, 0.2963428],
         [0.95929825, 2.9402404, -0.8746674, 0.41128528],
         [-0.74624217, 1.5244724, -0.8678476, 0.41810507],
         [0.56485355, 1.5747732, -0.804522, 0.4814307],
         [-0.0913099, 1.3673826, -1.2800645, 0.00588822]])
    sunrgbd_results['points'] = DepthPoints(sunrgbd_point_cloud,
                                            points_dim=4,
                                            attribute_dims=dict(height=3))
    sunrgbd_results = sunrgbd_sample_points(sunrgbd_results)
    sunrgbd_choices = np.array([2, 8, 4, 9, 1])
    sunrgbd_points_result = sunrgbd_results['points'].tensor.numpy()
    repr_str = repr(sunrgbd_sample_points)
    expected_repr_str = 'PointSample(num_points=5, ' \
                        'sample_range=None, ' \
                        'replace=False)'
    assert repr_str == expected_repr_str
    assert np.allclose(sunrgbd_point_cloud[sunrgbd_choices],
                       sunrgbd_points_result)
Exemple #2
0
def test_random_drop_points_color():
    # drop_ratio should be in [0, 1]
    with pytest.raises(AssertionError):
        random_drop_points_color = RandomDropPointsColor(drop_ratio=1.1)

    # 100% drop
    random_drop_points_color = RandomDropPointsColor(drop_ratio=1)

    points = np.fromfile('tests/data/scannet/points/scene0000_00.bin',
                         np.float32).reshape(-1, 6)
    depth_points = DepthPoints(points.copy(),
                               points_dim=6,
                               attribute_dims=dict(color=[3, 4, 5]))

    input_dict = dict(points=depth_points.clone())

    input_dict = random_drop_points_color(input_dict)
    trans_depth_points = input_dict['points']
    trans_color = trans_depth_points.color
    assert torch.all(trans_color == trans_color.new_zeros(trans_color.shape))

    # 0% drop
    random_drop_points_color = RandomDropPointsColor(drop_ratio=0)
    input_dict = dict(points=depth_points.clone())

    input_dict = random_drop_points_color(input_dict)
    trans_depth_points = input_dict['points']
    trans_color = trans_depth_points.color
    assert torch.allclose(trans_color, depth_points.tensor[:, 3:6])

    random_drop_points_color = RandomDropPointsColor(drop_ratio=0.5)
    repr_str = repr(random_drop_points_color)
    expected_repr_str = 'RandomDropPointsColor(drop_ratio=0.5)'
    assert repr_str == expected_repr_str
Exemple #3
0
def test_global_alignment():
    np.random.seed(0)
    global_alignment = GlobalAlignment(rotation_axis=2)

    points = np.fromfile('tests/data/scannet/points/scene0000_00.bin',
                         np.float32).reshape(-1, 6)
    annos = mmcv.load('tests/data/scannet/scannet_infos.pkl')
    info = annos[0]
    axis_align_matrix = info['annos']['axis_align_matrix']

    depth_points = DepthPoints(points.copy(), points_dim=6)

    input_dict = dict(points=depth_points.clone(),
                      ann_info=dict(axis_align_matrix=axis_align_matrix))

    input_dict = global_alignment(input_dict)
    trans_depth_points = input_dict['points']

    # construct expected transformed points by affine transformation
    pts = np.ones((points.shape[0], 4))
    pts[:, :3] = points[:, :3]
    trans_pts = np.dot(pts, axis_align_matrix.T)
    expected_points = np.concatenate([trans_pts[:, :3], points[:, 3:]], axis=1)

    assert np.allclose(trans_depth_points.tensor.numpy(),
                       expected_points,
                       atol=1e-6)

    repr_str = repr(global_alignment)
    expected_repr_str = 'GlobalAlignment(rotation_axis=2)'
    assert repr_str == expected_repr_str
Exemple #4
0
def test_points_range_filter():
    pcd_range = [0.0, 0.0, 0.0, 3.0, 3.0, 3.0]
    points_range_filter = PointsRangeFilter(pcd_range)

    points = np.fromfile('tests/data/scannet/points/scene0000_00.bin',
                         np.float32).reshape(-1, 6)
    ins_mask = np.fromfile('tests/data/scannet/instance_mask/scene0000_00.bin',
                           np.long)
    sem_mask = np.fromfile('tests/data/scannet/semantic_mask/scene0000_00.bin',
                           np.long)

    points = DepthPoints(points.copy(),
                         points_dim=6,
                         attribute_dims=dict(color=[3, 4, 5]))
    input_dict = dict(points=points.clone(),
                      pts_instance_mask=ins_mask.copy(),
                      pts_semantic_mask=sem_mask.copy())
    results = points_range_filter(input_dict)
    shuffle_pts = results['points']
    shuffle_ins_mask = results['pts_instance_mask']
    shuffle_sem_mask = results['pts_semantic_mask']

    select_idx = np.array(
        [5, 11, 22, 26, 27, 33, 46, 47, 56, 63, 74, 78, 79, 91])
    expected_pts = points.tensor.numpy()[select_idx]
    expected_ins_mask = ins_mask[select_idx]
    expected_sem_mask = sem_mask[select_idx]

    assert np.allclose(shuffle_pts.tensor.numpy(), expected_pts)
    assert np.all(shuffle_ins_mask == expected_ins_mask)
    assert np.all(shuffle_sem_mask == expected_sem_mask)

    repr_str = repr(points_range_filter)
    expected_repr_str = f'PointsRangeFilter(point_cloud_range={pcd_range})'
    assert repr_str == expected_repr_str
Exemple #5
0
def test_normalize_points_color():
    coord = np.array([[68.137, 3.358, 2.516], [67.697, 3.55, 2.501],
                      [67.649, 3.76, 2.5], [66.414, 3.901, 2.459],
                      [66.012, 4.085, 2.446], [65.834, 4.178, 2.44],
                      [65.841, 4.386, 2.44], [65.745, 4.587, 2.438],
                      [65.551, 4.78, 2.432], [65.486, 4.982, 2.43]])
    color = np.array([[131, 95, 138], [71, 185, 253], [169, 47, 41],
                      [174, 161, 88], [6, 158, 213], [6, 86, 78],
                      [118, 161, 78], [72, 195, 138], [180, 170, 32],
                      [197, 85, 27]])
    points = np.concatenate([coord, color], axis=1)
    points = DepthPoints(points,
                         points_dim=6,
                         attribute_dims=dict(color=[3, 4, 5]))
    input_dict = dict(points=points)

    color_mean = [100, 150, 200]
    points_color_normalizer = NormalizePointsColor(color_mean=color_mean)
    input_dict = points_color_normalizer(input_dict)
    points = input_dict['points']
    repr_str = repr(points_color_normalizer)
    expected_repr_str = f'NormalizePointsColor(color_mean={color_mean})'

    assert repr_str == expected_repr_str
    assert np.allclose(points.coord, coord)
    assert np.allclose(points.color,
                       (color - np.array(color_mean)[None, :]) / 255.0)
Exemple #6
0
def test_random_jitter_points():
    # jitter_std should be a number or seq of numbers
    with pytest.raises(AssertionError):
        random_jitter_points = RandomJitterPoints(jitter_std='0.0')

    # clip_range should be a number or seq of numbers
    with pytest.raises(AssertionError):
        random_jitter_points = RandomJitterPoints(clip_range='0.0')

    random_jitter_points = RandomJitterPoints(jitter_std=0.01, clip_range=0.05)
    np.random.seed(0)
    points = np.fromfile('tests/data/scannet/points/scene0000_00.bin',
                         np.float32).reshape(-1, 6)[:10]
    depth_points = DepthPoints(points.copy(),
                               points_dim=6,
                               attribute_dims=dict(color=[3, 4, 5]))

    input_dict = dict(points=depth_points.clone())

    input_dict = random_jitter_points(input_dict)
    trans_depth_points = input_dict['points']

    jitter_noise = np.array([[0.01764052, 0.00400157, 0.00978738],
                             [0.02240893, 0.01867558, -0.00977278],
                             [0.00950088, -0.00151357, -0.00103219],
                             [0.00410598, 0.00144044, 0.01454273],
                             [0.00761038, 0.00121675, 0.00443863],
                             [0.00333674, 0.01494079, -0.00205158],
                             [0.00313068, -0.00854096, -0.0255299],
                             [0.00653619, 0.00864436, -0.00742165],
                             [0.02269755, -0.01454366, 0.00045759],
                             [-0.00187184, 0.01532779, 0.01469359]])

    trans_depth_points = trans_depth_points.tensor.numpy()
    expected_depth_points = points
    expected_depth_points[:, :3] += jitter_noise
    assert np.allclose(trans_depth_points, expected_depth_points)

    repr_str = repr(random_jitter_points)
    jitter_std = [0.01, 0.01, 0.01]
    clip_range = [-0.05, 0.05]
    expected_repr_str = f'RandomJitterPoints(jitter_std={jitter_std},' \
                        f' clip_range={clip_range})'
    assert repr_str == expected_repr_str

    # test clipping very large noise
    random_jitter_points = RandomJitterPoints(jitter_std=1.0, clip_range=0.05)
    input_dict = dict(points=depth_points.clone())

    input_dict = random_jitter_points(input_dict)
    trans_depth_points = input_dict['points']
    assert (trans_depth_points.tensor - depth_points.tensor).max().item() <= \
        0.05 + 1e-6
    assert (trans_depth_points.tensor - depth_points.tensor).min().item() >= \
        -0.05 - 1e-6
Exemple #7
0
def test_multi_scale_flip_aug_3D():
    np.random.seed(0)
    transforms = [{
        'type': 'GlobalRotScaleTrans',
        'rot_range': [-0.1, 0.1],
        'scale_ratio_range': [0.9, 1.1],
        'translation_std': [0, 0, 0]
    }, {
        'type': 'RandomFlip3D',
        'sync_2d': False,
        'flip_ratio_bev_horizontal': 0.5
    }, {
        'type': 'PointSample',
        'num_points': 5
    }, {
        'type':
        'DefaultFormatBundle3D',
        'class_names': ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk',
                        'dresser', 'night_stand', 'bookshelf', 'bathtub'),
        'with_label':
        False
    }, {
        'type': 'Collect3D',
        'keys': ['points']
    }]
    img_scale = (1333, 800)
    pts_scale_ratio = 1
    multi_scale_flip_aug_3D = MultiScaleFlipAug3D(transforms, img_scale,
                                                  pts_scale_ratio)
    pts_file_name = 'tests/data/sunrgbd/points/000001.bin'
    sample_idx = 4
    file_name = 'tests/data/sunrgbd/points/000001.bin'
    bbox3d_fields = []
    points = np.array([[0.20397437, 1.4267826, -1.0503972, 0.16195858],
                       [-2.2095256, 3.3159535, -0.7706928, 0.4416629],
                       [1.5090443, 3.2764456, -1.1913797, 0.02097607],
                       [-1.373904, 3.8711405, 0.8524302, 2.064786],
                       [-1.8139812, 3.538856, -1.0056694, 0.20668638]])
    points = DepthPoints(points, points_dim=4, attribute_dims=dict(height=3))
    results = dict(points=points,
                   pts_file_name=pts_file_name,
                   sample_idx=sample_idx,
                   file_name=file_name,
                   bbox3d_fields=bbox3d_fields)
    results = multi_scale_flip_aug_3D(results)
    expected_points = torch.tensor(
        [[-2.2418, 3.2942, -0.7707, 0.4417], [-1.4116, 3.8575, 0.8524, 2.0648],
         [-1.8484, 3.5210, -1.0057, 0.2067], [0.1900, 1.4287, -1.0504, 0.1620],
         [1.4770, 3.2910, -1.1914, 0.0210]],
        dtype=torch.float32)

    assert torch.allclose(results['points'][0]._data,
                          expected_points,
                          atol=1e-4)
def test_indoor_seg_sample():
    # test the train time behavior of IndoorPatchPointSample
    np.random.seed(0)
    scannet_patch_sample_points = IndoorPatchPointSample(5, 1.5, 1.0, 20, True)
    scannet_seg_class_mapping = \
        PointSegClassMapping((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16,
                              24, 28, 33, 34, 36, 39))
    scannet_results = dict()
    scannet_points = np.fromfile(
        './tests/data/scannet/points/scene0000_00.bin',
        dtype=np.float32).reshape((-1, 6))
    scannet_results['points'] = DepthPoints(
        scannet_points, points_dim=6, attribute_dims=dict(color=[3, 4, 5]))

    scannet_pts_semantic_mask = np.fromfile(
        './tests/data/scannet/semantic_mask/scene0000_00.bin', dtype=np.long)
    scannet_results['pts_semantic_mask'] = scannet_pts_semantic_mask

    scannet_results = scannet_seg_class_mapping(scannet_results)
    scannet_results = scannet_patch_sample_points(scannet_results)
    scannet_points_result = scannet_results['points']
    scannet_semantic_labels_result = scannet_results['pts_semantic_mask']

    # manually constructed sampled points
    scannet_choices = np.array([87, 34, 58, 9, 18])
    scannet_center = np.array([-2.1772466, -3.4789145, 1.242711])
    scannet_center[2] = 0.0
    scannet_coord_max = np.amax(scannet_points[:, :3], axis=0)
    scannet_input_points = np.concatenate([
        scannet_points[scannet_choices, :3] - scannet_center,
        scannet_points[scannet_choices, 3:],
        scannet_points[scannet_choices, :3] / scannet_coord_max
    ],
                                          axis=1)

    assert scannet_points_result.points_dim == 9
    assert scannet_points_result.attribute_dims == dict(
        color=[3, 4, 5], normalized_coord=[6, 7, 8])
    scannet_points_result = scannet_points_result.tensor.numpy()
    assert np.allclose(scannet_input_points, scannet_points_result, atol=1e-6)
    assert np.all(
        np.array([13, 13, 12, 2, 0]) == scannet_semantic_labels_result)

    repr_str = repr(scannet_patch_sample_points)
    expected_repr_str = 'IndoorPatchPointSample(num_points=5, ' \
                        'block_size=1.5, ' \
                        'sample_rate=1.0, ' \
                        'ignore_index=20, ' \
                        'use_normalized_coord=True, ' \
                        'num_try=10)'
    assert repr_str == expected_repr_str
Exemple #9
0
def test_point_shuffle():
    np.random.seed(0)
    torch.manual_seed(0)
    point_shuffle = PointShuffle()

    points = np.fromfile('tests/data/scannet/points/scene0000_00.bin',
                         np.float32).reshape(-1, 6)
    ins_mask = np.fromfile('tests/data/scannet/instance_mask/scene0000_00.bin',
                           np.long)
    sem_mask = np.fromfile('tests/data/scannet/semantic_mask/scene0000_00.bin',
                           np.long)

    points = DepthPoints(points.copy(),
                         points_dim=6,
                         attribute_dims=dict(color=[3, 4, 5]))
    input_dict = dict(points=points.clone(),
                      pts_instance_mask=ins_mask.copy(),
                      pts_semantic_mask=sem_mask.copy())
    results = point_shuffle(input_dict)

    shuffle_pts = results['points']
    shuffle_ins_mask = results['pts_instance_mask']
    shuffle_sem_mask = results['pts_semantic_mask']

    shuffle_idx = np.array([
        44, 19, 93, 90, 71, 69, 37, 95, 53, 91, 81, 42, 80, 85, 74, 56, 76, 63,
        82, 40, 26, 92, 57, 10, 16, 66, 89, 41, 97, 8, 31, 24, 35, 30, 65, 7,
        98, 23, 20, 29, 78, 61, 94, 15, 4, 52, 59, 5, 54, 46, 3, 28, 2, 70, 6,
        60, 49, 68, 55, 72, 79, 77, 45, 1, 32, 34, 11, 0, 22, 12, 87, 50, 25,
        47, 36, 96, 9, 83, 62, 84, 18, 17, 75, 67, 13, 48, 39, 21, 64, 88, 38,
        27, 14, 73, 33, 58, 86, 43, 99, 51
    ])
    expected_pts = points.tensor.numpy()[shuffle_idx]
    expected_ins_mask = ins_mask[shuffle_idx]
    expected_sem_mask = sem_mask[shuffle_idx]

    assert np.allclose(shuffle_pts.tensor.numpy(), expected_pts)
    assert np.all(shuffle_ins_mask == expected_ins_mask)
    assert np.all(shuffle_sem_mask == expected_sem_mask)

    repr_str = repr(point_shuffle)
    expected_repr_str = 'PointShuffle'
    assert repr_str == expected_repr_str
Exemple #10
0
def test_global_rot_scale_trans():
    angle = 0.78539816
    scale = [0.95, 1.05]
    trans_std = 1.0

    # rot_range should be a number or seq of numbers
    with pytest.raises(AssertionError):
        global_rot_scale_trans = GlobalRotScaleTrans(rot_range='0.0')

    # scale_ratio_range should be seq of numbers
    with pytest.raises(AssertionError):
        global_rot_scale_trans = GlobalRotScaleTrans(scale_ratio_range=1.0)

    # translation_std should be a positive number or seq of positive numbers
    with pytest.raises(AssertionError):
        global_rot_scale_trans = GlobalRotScaleTrans(translation_std='0.0')
    with pytest.raises(AssertionError):
        global_rot_scale_trans = GlobalRotScaleTrans(translation_std=-1.0)

    global_rot_scale_trans = GlobalRotScaleTrans(rot_range=angle,
                                                 scale_ratio_range=scale,
                                                 translation_std=trans_std,
                                                 shift_height=False)

    np.random.seed(0)
    points = np.fromfile('tests/data/scannet/points/scene0000_00.bin',
                         np.float32).reshape(-1, 6)
    annos = mmcv.load('tests/data/scannet/scannet_infos.pkl')
    info = annos[0]
    gt_bboxes_3d = info['annos']['gt_boxes_upright_depth']

    depth_points = DepthPoints(points.copy(),
                               points_dim=6,
                               attribute_dims=dict(color=[3, 4, 5]))
    gt_bboxes_3d = DepthInstance3DBoxes(gt_bboxes_3d.copy(),
                                        box_dim=gt_bboxes_3d.shape[-1],
                                        with_yaw=False,
                                        origin=(0.5, 0.5, 0.5))

    input_dict = dict(points=depth_points.clone(),
                      bbox3d_fields=['gt_bboxes_3d'],
                      gt_bboxes_3d=gt_bboxes_3d.clone())

    input_dict = global_rot_scale_trans(input_dict)
    trans_depth_points = input_dict['points']
    trans_bboxes_3d = input_dict['gt_bboxes_3d']

    noise_rot = 0.07667607233534723
    scale_factor = 1.021518936637242
    trans_factor = np.array([0.97873798, 2.2408932, 1.86755799])

    true_depth_points = depth_points.clone()
    true_bboxes_3d = gt_bboxes_3d.clone()
    true_depth_points, noise_rot_mat_T = true_bboxes_3d.rotate(
        noise_rot, true_depth_points)
    true_bboxes_3d.scale(scale_factor)
    true_bboxes_3d.translate(trans_factor)
    true_depth_points.scale(scale_factor)
    true_depth_points.translate(trans_factor)

    assert torch.allclose(trans_depth_points.tensor,
                          true_depth_points.tensor,
                          atol=1e-6)
    assert torch.allclose(trans_bboxes_3d.tensor,
                          true_bboxes_3d.tensor,
                          atol=1e-6)
    assert input_dict['pcd_scale_factor'] == scale_factor
    assert torch.allclose(input_dict['pcd_rotation'],
                          noise_rot_mat_T,
                          atol=1e-6)
    assert np.allclose(input_dict['pcd_trans'], trans_factor)

    repr_str = repr(global_rot_scale_trans)
    expected_repr_str = f'GlobalRotScaleTrans(rot_range={[-angle, angle]},' \
                        f' scale_ratio_range={scale},' \
                        f' translation_std={[trans_std for _ in range(3)]},' \
                        f' shift_height=False)'
    assert repr_str == expected_repr_str

    # points with shift_height but no bbox
    global_rot_scale_trans = GlobalRotScaleTrans(rot_range=angle,
                                                 scale_ratio_range=scale,
                                                 translation_std=trans_std,
                                                 shift_height=True)

    # points should have height attribute when shift_height=True
    with pytest.raises(AssertionError):
        input_dict = global_rot_scale_trans(input_dict)

    np.random.seed(0)
    shift_height = points[:, 2:3] * 0.99
    points = np.concatenate([points, shift_height], axis=1)
    depth_points = DepthPoints(points.copy(),
                               points_dim=7,
                               attribute_dims=dict(color=[3, 4, 5], height=6))

    input_dict = dict(points=depth_points.clone(), bbox3d_fields=[])

    input_dict = global_rot_scale_trans(input_dict)
    trans_depth_points = input_dict['points']
    true_shift_height = shift_height * scale_factor

    assert np.allclose(
        trans_depth_points.tensor.numpy(),
        np.concatenate([true_depth_points.tensor.numpy(), true_shift_height],
                       axis=1),
        atol=1e-6)
Exemple #11
0
def test_indoor_seg_sample():
    # test the train time behavior of IndoorPatchPointSample
    np.random.seed(0)
    scannet_patch_sample_points = IndoorPatchPointSample(
        5, 1.5, ignore_index=20, use_normalized_coord=True)
    scannet_seg_class_mapping = \
        PointSegClassMapping((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16,
                              24, 28, 33, 34, 36, 39), 40)
    scannet_results = dict()
    scannet_points = np.fromfile(
        './tests/data/scannet/points/scene0000_00.bin',
        dtype=np.float32).reshape((-1, 6))
    scannet_results['points'] = DepthPoints(
        scannet_points, points_dim=6, attribute_dims=dict(color=[3, 4, 5]))

    scannet_pts_semantic_mask = np.fromfile(
        './tests/data/scannet/semantic_mask/scene0000_00.bin', dtype=np.long)
    scannet_results['pts_semantic_mask'] = scannet_pts_semantic_mask

    scannet_results = scannet_seg_class_mapping(scannet_results)
    scannet_results = scannet_patch_sample_points(scannet_results)
    scannet_points_result = scannet_results['points']
    scannet_semantic_labels_result = scannet_results['pts_semantic_mask']

    # manually constructed sampled points
    scannet_choices = np.array([87, 34, 58, 9, 18])
    scannet_center = np.array([-2.1772466, -3.4789145, 1.242711])
    scannet_center[2] = 0.0
    scannet_coord_max = np.amax(scannet_points[:, :3], axis=0)
    scannet_input_points = np.concatenate([
        scannet_points[scannet_choices, :3] - scannet_center,
        scannet_points[scannet_choices, 3:],
        scannet_points[scannet_choices, :3] / scannet_coord_max
    ], 1)

    assert scannet_points_result.points_dim == 9
    assert scannet_points_result.attribute_dims == dict(
        color=[3, 4, 5], normalized_coord=[6, 7, 8])
    scannet_points_result = scannet_points_result.tensor.numpy()
    assert np.allclose(scannet_input_points, scannet_points_result, atol=1e-6)
    assert np.all(
        np.array([13, 13, 12, 2, 0]) == scannet_semantic_labels_result)

    repr_str = repr(scannet_patch_sample_points)
    expected_repr_str = 'IndoorPatchPointSample(num_points=5, ' \
                        'block_size=1.5, ' \
                        'ignore_index=20, ' \
                        'use_normalized_coord=True, ' \
                        'num_try=10, ' \
                        'enlarge_size=0.2, ' \
                        'min_unique_num=None, ' \
                        'eps=0.01)'
    assert repr_str == expected_repr_str

    # when enlarge_size and min_unique_num are set
    np.random.seed(0)
    scannet_patch_sample_points = IndoorPatchPointSample(
        5,
        1.0,
        ignore_index=20,
        use_normalized_coord=False,
        num_try=1000,
        enlarge_size=None,
        min_unique_num=5)
    # this patch is within [0, 1] and has 5 unique points
    # it should be selected
    scannet_points = np.random.rand(5, 6)
    scannet_points[0, :3] = np.array([0.5, 0.5, 0.5])
    # generate points smaller than `min_unique_num` in local patches
    # they won't be sampled
    for i in range(2, 11, 2):
        scannet_points = np.concatenate(
            [scannet_points, np.random.rand(4, 6) + i], axis=0)
    scannet_results = dict(
        points=DepthPoints(scannet_points,
                           points_dim=6,
                           attribute_dims=dict(color=[3, 4, 5])),
        pts_semantic_mask=np.random.randint(0, 20,
                                            (scannet_points.shape[0], )))
    scannet_results = scannet_patch_sample_points(scannet_results)
    scannet_points_result = scannet_results['points']

    # manually constructed sampled points
    scannet_choices = np.array([2, 4, 3, 1, 0])
    scannet_center = np.array([0.56804454, 0.92559665, 0.07103606])
    scannet_center[2] = 0.0
    scannet_input_points = np.concatenate([
        scannet_points[scannet_choices, :3] - scannet_center,
        scannet_points[scannet_choices, 3:],
    ], 1)

    assert scannet_points_result.points_dim == 6
    assert scannet_points_result.attribute_dims == dict(color=[3, 4, 5])
    scannet_points_result = scannet_points_result.tensor.numpy()
    assert np.allclose(scannet_input_points, scannet_points_result, atol=1e-6)

    # test on S3DIS dataset
    np.random.seed(0)
    s3dis_patch_sample_points = IndoorPatchPointSample(
        5, 1.0, ignore_index=None, use_normalized_coord=True)
    s3dis_results = dict()
    s3dis_points = np.fromfile('./tests/data/s3dis/points/Area_1_office_2.bin',
                               dtype=np.float32).reshape((-1, 6))
    s3dis_results['points'] = DepthPoints(s3dis_points,
                                          points_dim=6,
                                          attribute_dims=dict(color=[3, 4, 5]))

    s3dis_pts_semantic_mask = np.fromfile(
        './tests/data/s3dis/semantic_mask/Area_1_office_2.bin', dtype=np.long)
    s3dis_results['pts_semantic_mask'] = s3dis_pts_semantic_mask

    s3dis_results = s3dis_patch_sample_points(s3dis_results)
    s3dis_points_result = s3dis_results['points']
    s3dis_semantic_labels_result = s3dis_results['pts_semantic_mask']

    # manually constructed sampled points
    s3dis_choices = np.array([87, 37, 60, 18, 31])
    s3dis_center = np.array([2.691, 2.231, 3.172])
    s3dis_center[2] = 0.0
    s3dis_coord_max = np.amax(s3dis_points[:, :3], axis=0)
    s3dis_input_points = np.concatenate([
        s3dis_points[s3dis_choices, :3] - s3dis_center,
        s3dis_points[s3dis_choices,
                     3:], s3dis_points[s3dis_choices, :3] / s3dis_coord_max
    ], 1)

    assert s3dis_points_result.points_dim == 9
    assert s3dis_points_result.attribute_dims == dict(
        color=[3, 4, 5], normalized_coord=[6, 7, 8])
    s3dis_points_result = s3dis_points_result.tensor.numpy()
    assert np.allclose(s3dis_input_points, s3dis_points_result, atol=1e-6)
    assert np.all(np.array([0, 1, 0, 8, 0]) == s3dis_semantic_labels_result)
def test_points_conversion():
    """Test the conversion of points between different modes."""
    points_np = np.array([[
        -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956,
        0.4974, 0.9409
    ],
                          [
                              -2.66751588e+01, 5.59499564e+00, -9.14345860e-01,
                              0.1502, 0.3707, 0.1086, 0.6297
                          ],
                          [
                              -5.80979675e+00, 3.54092357e+01, 2.00889888e-01,
                              0.6565, 0.6248, 0.6954, 0.2538
                          ],
                          [
                              -3.13086877e+01, 1.09007628e+00, -1.94612112e-01,
                              0.2803, 0.0258, 0.4896, 0.3269
                          ]],
                         dtype=np.float32)

    # test CAM to LIDAR and DEPTH
    cam_points = CameraPoints(points_np,
                              points_dim=7,
                              attribute_dims=dict(color=[3, 4, 5], height=6))

    convert_lidar_points = cam_points.convert_to(Coord3DMode.LIDAR)
    expected_tensor = torch.tensor([[
        2.9757e-01, 5.2422e+00, -4.0021e+01, 6.6660e-01, 1.9560e-01,
        4.9740e-01, 9.4090e-01
    ],
                                    [
                                        -9.1435e-01, 2.6675e+01, -5.5950e+00,
                                        1.5020e-01, 3.7070e-01, 1.0860e-01,
                                        6.2970e-01
                                    ],
                                    [
                                        2.0089e-01, 5.8098e+00, -3.5409e+01,
                                        6.5650e-01, 6.2480e-01, 6.9540e-01,
                                        2.5380e-01
                                    ],
                                    [
                                        -1.9461e-01, 3.1309e+01, -1.0901e+00,
                                        2.8030e-01, 2.5800e-02, 4.8960e-01,
                                        3.2690e-01
                                    ]])

    lidar_point_tensor = Coord3DMode.convert_point(cam_points.tensor,
                                                   Coord3DMode.CAM,
                                                   Coord3DMode.LIDAR)
    assert torch.allclose(expected_tensor, convert_lidar_points.tensor, 1e-4)
    assert torch.allclose(lidar_point_tensor, convert_lidar_points.tensor,
                          1e-4)

    convert_depth_points = cam_points.convert_to(Coord3DMode.DEPTH)
    expected_tensor = torch.tensor([[
        -5.2422e+00, 2.9757e-01, -4.0021e+01, 6.6660e-01, 1.9560e-01,
        4.9740e-01, 9.4090e-01
    ],
                                    [
                                        -2.6675e+01, -9.1435e-01, -5.5950e+00,
                                        1.5020e-01, 3.7070e-01, 1.0860e-01,
                                        6.2970e-01
                                    ],
                                    [
                                        -5.8098e+00, 2.0089e-01, -3.5409e+01,
                                        6.5650e-01, 6.2480e-01, 6.9540e-01,
                                        2.5380e-01
                                    ],
                                    [
                                        -3.1309e+01, -1.9461e-01, -1.0901e+00,
                                        2.8030e-01, 2.5800e-02, 4.8960e-01,
                                        3.2690e-01
                                    ]])

    depth_point_tensor = Coord3DMode.convert_point(cam_points.tensor,
                                                   Coord3DMode.CAM,
                                                   Coord3DMode.DEPTH)
    assert torch.allclose(expected_tensor, convert_depth_points.tensor, 1e-4)
    assert torch.allclose(depth_point_tensor, convert_depth_points.tensor,
                          1e-4)

    # test LIDAR to CAM and DEPTH
    lidar_points = LiDARPoints(points_np,
                               points_dim=7,
                               attribute_dims=dict(color=[3, 4, 5], height=6))

    convert_cam_points = lidar_points.convert_to(Coord3DMode.CAM)
    expected_tensor = torch.tensor([[
        -4.0021e+01, -2.9757e-01, -5.2422e+00, 6.6660e-01, 1.9560e-01,
        4.9740e-01, 9.4090e-01
    ],
                                    [
                                        -5.5950e+00, 9.1435e-01, -2.6675e+01,
                                        1.5020e-01, 3.7070e-01, 1.0860e-01,
                                        6.2970e-01
                                    ],
                                    [
                                        -3.5409e+01, -2.0089e-01, -5.8098e+00,
                                        6.5650e-01, 6.2480e-01, 6.9540e-01,
                                        2.5380e-01
                                    ],
                                    [
                                        -1.0901e+00, 1.9461e-01, -3.1309e+01,
                                        2.8030e-01, 2.5800e-02, 4.8960e-01,
                                        3.2690e-01
                                    ]])

    cam_point_tensor = Coord3DMode.convert_point(lidar_points.tensor,
                                                 Coord3DMode.LIDAR,
                                                 Coord3DMode.CAM)
    assert torch.allclose(expected_tensor, convert_cam_points.tensor, 1e-4)
    assert torch.allclose(cam_point_tensor, convert_cam_points.tensor, 1e-4)

    convert_depth_points = lidar_points.convert_to(Coord3DMode.DEPTH)
    expected_tensor = torch.tensor([[
        -4.0021e+01, -5.2422e+00, 2.9757e-01, 6.6660e-01, 1.9560e-01,
        4.9740e-01, 9.4090e-01
    ],
                                    [
                                        -5.5950e+00, -2.6675e+01, -9.1435e-01,
                                        1.5020e-01, 3.7070e-01, 1.0860e-01,
                                        6.2970e-01
                                    ],
                                    [
                                        -3.5409e+01, -5.8098e+00, 2.0089e-01,
                                        6.5650e-01, 6.2480e-01, 6.9540e-01,
                                        2.5380e-01
                                    ],
                                    [
                                        -1.0901e+00, -3.1309e+01, -1.9461e-01,
                                        2.8030e-01, 2.5800e-02, 4.8960e-01,
                                        3.2690e-01
                                    ]])

    depth_point_tensor = Coord3DMode.convert_point(lidar_points.tensor,
                                                   Coord3DMode.LIDAR,
                                                   Coord3DMode.DEPTH)
    assert torch.allclose(expected_tensor, convert_depth_points.tensor, 1e-4)
    assert torch.allclose(depth_point_tensor, convert_depth_points.tensor,
                          1e-4)

    # test DEPTH to CAM and LIDAR
    depth_points = DepthPoints(points_np,
                               points_dim=7,
                               attribute_dims=dict(color=[3, 4, 5], height=6))

    convert_cam_points = depth_points.convert_to(Coord3DMode.CAM)
    expected_tensor = torch.tensor([[
        -5.2422e+00, -2.9757e-01, 4.0021e+01, 6.6660e-01, 1.9560e-01,
        4.9740e-01, 9.4090e-01
    ],
                                    [
                                        -2.6675e+01, 9.1435e-01, 5.5950e+00,
                                        1.5020e-01, 3.7070e-01, 1.0860e-01,
                                        6.2970e-01
                                    ],
                                    [
                                        -5.8098e+00, -2.0089e-01, 3.5409e+01,
                                        6.5650e-01, 6.2480e-01, 6.9540e-01,
                                        2.5380e-01
                                    ],
                                    [
                                        -3.1309e+01, 1.9461e-01, 1.0901e+00,
                                        2.8030e-01, 2.5800e-02, 4.8960e-01,
                                        3.2690e-01
                                    ]])

    cam_point_tensor = Coord3DMode.convert_point(depth_points.tensor,
                                                 Coord3DMode.DEPTH,
                                                 Coord3DMode.CAM)
    assert torch.allclose(expected_tensor, convert_cam_points.tensor, 1e-4)
    assert torch.allclose(cam_point_tensor, convert_cam_points.tensor, 1e-4)

    rt_mat_provided = torch.tensor([[0.99789, -0.012698, -0.063678],
                                    [-0.012698, 0.92359, -0.38316],
                                    [0.063678, 0.38316, 0.92148]])

    depth_points_new = torch.cat([
        depth_points.tensor[:, :3] @ rt_mat_provided.t(),
        depth_points.tensor[:, 3:]
    ],
                                 dim=1)
    mat = rt_mat_provided.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
    rt_mat_provided = mat @ rt_mat_provided.transpose(1, 0)
    cam_point_tensor_new = Coord3DMode.convert_point(depth_points_new,
                                                     Coord3DMode.DEPTH,
                                                     Coord3DMode.CAM,
                                                     rt_mat=rt_mat_provided)
    assert torch.allclose(expected_tensor, cam_point_tensor_new, 1e-4)

    convert_lidar_points = depth_points.convert_to(Coord3DMode.LIDAR)
    expected_tensor = torch.tensor([[
        4.0021e+01, 5.2422e+00, 2.9757e-01, 6.6660e-01, 1.9560e-01, 4.9740e-01,
        9.4090e-01
    ],
                                    [
                                        5.5950e+00, 2.6675e+01, -9.1435e-01,
                                        1.5020e-01, 3.7070e-01, 1.0860e-01,
                                        6.2970e-01
                                    ],
                                    [
                                        3.5409e+01, 5.8098e+00, 2.0089e-01,
                                        6.5650e-01, 6.2480e-01, 6.9540e-01,
                                        2.5380e-01
                                    ],
                                    [
                                        1.0901e+00, 3.1309e+01, -1.9461e-01,
                                        2.8030e-01, 2.5800e-02, 4.8960e-01,
                                        3.2690e-01
                                    ]])

    lidar_point_tensor = Coord3DMode.convert_point(depth_points.tensor,
                                                   Coord3DMode.DEPTH,
                                                   Coord3DMode.LIDAR)
    assert torch.allclose(lidar_point_tensor, convert_lidar_points.tensor,
                          1e-4)
    assert torch.allclose(lidar_point_tensor, convert_lidar_points.tensor,
                          1e-4)
def test_depth_points():
    # test empty initialization
    empty_boxes = []
    points = DepthPoints(empty_boxes)
    assert points.tensor.shape[0] == 0
    assert points.tensor.shape[1] == 3

    # Test init with origin
    points_np = np.array([[-5.24223238e+00, 4.00209696e+01, 2.97570381e-01],
                          [-2.66751588e+01, 5.59499564e+00, -9.14345860e-01],
                          [-5.80979675e+00, 3.54092357e+01, 2.00889888e-01],
                          [-3.13086877e+01, 1.09007628e+00, -1.94612112e-01]],
                         dtype=np.float32)
    depth_points = DepthPoints(points_np, points_dim=3)
    assert depth_points.tensor.shape[0] == 4

    # Test init with color and height
    points_np = np.array([[
        -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956,
        0.4974, 0.9409
    ],
                          [
                              -2.66751588e+01, 5.59499564e+00, -9.14345860e-01,
                              0.1502, 0.3707, 0.1086, 0.6297
                          ],
                          [
                              -5.80979675e+00, 3.54092357e+01, 2.00889888e-01,
                              0.6565, 0.6248, 0.6954, 0.2538
                          ],
                          [
                              -3.13086877e+01, 1.09007628e+00, -1.94612112e-01,
                              0.2803, 0.0258, 0.4896, 0.3269
                          ]],
                         dtype=np.float32)
    depth_points = DepthPoints(
        points_np,
        points_dim=7,
        attribute_dims=dict(color=[3, 4, 5], height=6))
    expected_tensor = torch.tensor([[
        -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956,
        0.4974, 0.9409
    ],
                                    [
                                        -2.66751588e+01, 5.59499564e+00,
                                        -9.14345860e-01, 0.1502, 0.3707,
                                        0.1086, 0.6297
                                    ],
                                    [
                                        -5.80979675e+00, 3.54092357e+01,
                                        2.00889888e-01, 0.6565, 0.6248, 0.6954,
                                        0.2538
                                    ],
                                    [
                                        -3.13086877e+01, 1.09007628e+00,
                                        -1.94612112e-01, 0.2803, 0.0258,
                                        0.4896, 0.3269
                                    ]])

    assert torch.allclose(expected_tensor, depth_points.tensor)
    assert torch.allclose(expected_tensor[:, :3], depth_points.coord)
    assert torch.allclose(expected_tensor[:, 3:6], depth_points.color)
    assert torch.allclose(expected_tensor[:, 6], depth_points.height)

    # test points clone
    new_depth_points = depth_points.clone()
    assert torch.allclose(new_depth_points.tensor, depth_points.tensor)

    # test points shuffle
    new_depth_points.shuffle()
    assert new_depth_points.tensor.shape == torch.Size([4, 7])

    # test points rotation
    rot_mat = torch.tensor([[0.93629336, -0.27509585, 0.21835066],
                            [0.28962948, 0.95642509, -0.03695701],
                            [-0.19866933, 0.0978434, 0.97517033]])
    depth_points.rotate(rot_mat)
    expected_tensor = torch.tensor([[
        6.6239e+00, 3.9748e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01,
        4.9740e-01, 9.4090e-01
    ],
                                    [
                                        -2.3174e+01, 1.2600e+01, -6.9230e+00,
                                        1.5020e-01, 3.7070e-01, 1.0860e-01,
                                        6.2970e-01
                                    ],
                                    [
                                        4.7760e+00, 3.5484e+01, -2.3813e+00,
                                        6.5650e-01, 6.2480e-01, 6.9540e-01,
                                        2.5380e-01
                                    ],
                                    [
                                        -2.8960e+01, 9.6364e+00, -7.0663e+00,
                                        2.8030e-01, 2.5800e-02, 4.8960e-01,
                                        3.2690e-01
                                    ]])
    assert torch.allclose(expected_tensor, depth_points.tensor, 1e-3)

    new_depth_points = depth_points.clone()
    new_depth_points.rotate(0.1, axis=2)
    expected_tensor = torch.tensor([[
        2.6226e+00, 4.0211e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01,
        4.9740e-01, 9.4090e-01
    ],
                                    [
                                        -2.4316e+01, 1.0224e+01, -6.9230e+00,
                                        1.5020e-01, 3.7070e-01, 1.0860e-01,
                                        6.2970e-01
                                    ],
                                    [
                                        1.2096e+00, 3.5784e+01, -2.3813e+00,
                                        6.5650e-01, 6.2480e-01, 6.9540e-01,
                                        2.5380e-01
                                    ],
                                    [
                                        -2.9777e+01, 6.6971e+00, -7.0663e+00,
                                        2.8030e-01, 2.5800e-02, 4.8960e-01,
                                        3.2690e-01
                                    ]])
    assert torch.allclose(expected_tensor, new_depth_points.tensor, 1e-3)

    # test points translation
    translation_vector = torch.tensor([0.93629336, -0.27509585, 0.21835066])
    depth_points.translate(translation_vector)
    expected_tensor = torch.tensor([[
        7.5602e+00, 3.9473e+01, -2.1152e+00, 6.6660e-01, 1.9560e-01,
        4.9740e-01, 9.4090e-01
    ],
                                    [
                                        -2.2237e+01, 1.2325e+01, -6.7046e+00,
                                        1.5020e-01, 3.7070e-01, 1.0860e-01,
                                        6.2970e-01
                                    ],
                                    [
                                        5.7123e+00, 3.5209e+01, -2.1629e+00,
                                        6.5650e-01, 6.2480e-01, 6.9540e-01,
                                        2.5380e-01
                                    ],
                                    [
                                        -2.8023e+01, 9.3613e+00, -6.8480e+00,
                                        2.8030e-01, 2.5800e-02, 4.8960e-01,
                                        3.2690e-01
                                    ]])
    assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4)

    # test points filter
    point_range = [-10, -40, -10, 10, 40, 10]
    in_range_flags = depth_points.in_range_3d(point_range)
    expected_flags = torch.tensor([True, False, True, False])
    assert torch.all(in_range_flags == expected_flags)

    # test points scale
    depth_points.scale(1.2)
    expected_tensor = torch.tensor([[
        9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01,
        4.9740e-01, 9.4090e-01
    ],
                                    [
                                        -2.6685e+01, 1.4790e+01, -8.0455e+00,
                                        1.5020e-01, 3.7070e-01, 1.0860e-01,
                                        6.2970e-01
                                    ],
                                    [
                                        6.8547e+00, 4.2251e+01, -2.5955e+00,
                                        6.5650e-01, 6.2480e-01, 6.9540e-01,
                                        2.5380e-01
                                    ],
                                    [
                                        -3.3628e+01, 1.1234e+01, -8.2176e+00,
                                        2.8030e-01, 2.5800e-02, 4.8960e-01,
                                        3.2690e-01
                                    ]])
    assert torch.allclose(expected_tensor, depth_points.tensor, 1e-3)

    # test get_item
    expected_tensor = torch.tensor(
        [[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297]])
    assert torch.allclose(expected_tensor, depth_points[1].tensor, 1e-4)
    expected_tensor = torch.tensor(
        [[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297],
         [6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]])
    assert torch.allclose(expected_tensor, depth_points[1:3].tensor, 1e-4)
    mask = torch.tensor([True, False, True, False])
    expected_tensor = torch.tensor(
        [[9.0722, 47.3678, -2.5382, 0.6666, 0.1956, 0.4974, 0.9409],
         [6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]])
    assert torch.allclose(expected_tensor, depth_points[mask].tensor, 1e-4)

    # test length
    assert len(depth_points) == 4

    # test repr
    expected_repr = 'DepthPoints(\n    '\
        'tensor([[ 9.0722e+00,  4.7368e+01, -2.5382e+00,  '\
        '6.6660e-01,  1.9560e-01,\n          4.9740e-01,  '\
        '9.4090e-01],\n        '\
        '[-2.6685e+01,  1.4790e+01, -8.0455e+00,  1.5020e-01,  '\
        '3.7070e-01,\n          '\
        '1.0860e-01,  6.2970e-01],\n        '\
        '[ 6.8547e+00,  4.2251e+01, -2.5955e+00,  6.5650e-01,  '\
        '6.2480e-01,\n          '\
        '6.9540e-01,  2.5380e-01],\n        '\
        '[-3.3628e+01,  1.1234e+01, -8.2176e+00,  2.8030e-01,  '\
        '2.5800e-02,\n          '\
        '4.8960e-01,  3.2690e-01]]))'
    assert expected_repr == str(depth_points)

    # test concatenate
    depth_points_clone = depth_points.clone()
    cat_points = DepthPoints.cat([depth_points, depth_points_clone])
    assert torch.allclose(cat_points.tensor[:len(depth_points)],
                          depth_points.tensor)

    # test iteration
    for i, point in enumerate(depth_points):
        assert torch.allclose(point, depth_points.tensor[i])

    # test new_point
    new_points = depth_points.new_point([[1, 2, 3, 4, 5, 6, 7]])
    assert torch.allclose(
        new_points.tensor,
        torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=depth_points.tensor.dtype))

    # test in_range_bev
    point_bev_range = [-30, -40, 30, 40]
    in_range_flags = depth_points.in_range_bev(point_bev_range)
    expected_flags = torch.tensor([False, True, False, False])
    assert torch.all(in_range_flags == expected_flags)

    # test flip
    depth_points.flip(bev_direction='horizontal')
    expected_tensor = torch.tensor([[
        -9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01,
        4.9740e-01, 9.4090e-01
    ],
                                    [
                                        2.6685e+01, 1.4790e+01, -8.0455e+00,
                                        1.5020e-01, 3.7070e-01, 1.0860e-01,
                                        6.2970e-01
                                    ],
                                    [
                                        -6.8547e+00, 4.2251e+01, -2.5955e+00,
                                        6.5650e-01, 6.2480e-01, 6.9540e-01,
                                        2.5380e-01
                                    ],
                                    [
                                        3.3628e+01, 1.1234e+01, -8.2176e+00,
                                        2.8030e-01, 2.5800e-02, 4.8960e-01,
                                        3.2690e-01
                                    ]])
    assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4)

    depth_points.flip(bev_direction='vertical')
    expected_tensor = torch.tensor([[
        -9.0722e+00, -4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01,
        4.9740e-01, 9.4090e-01
    ],
                                    [
                                        2.6685e+01, -1.4790e+01, -8.0455e+00,
                                        1.5020e-01, 3.7070e-01, 1.0860e-01,
                                        6.2970e-01
                                    ],
                                    [
                                        -6.8547e+00, -4.2251e+01, -2.5955e+00,
                                        6.5650e-01, 6.2480e-01, 6.9540e-01,
                                        2.5380e-01
                                    ],
                                    [
                                        3.3628e+01, -1.1234e+01, -8.2176e+00,
                                        2.8030e-01, 2.5800e-02, 4.8960e-01,
                                        3.2690e-01
                                    ]])
    assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4)