def test_indoor_seg_sample(): # test the train time behavior of IndoorPatchPointSample np.random.seed(0) scannet_patch_sample_points = IndoorPatchPointSample( 5, 1.5, ignore_index=20, use_normalized_coord=True) scannet_seg_class_mapping = \ PointSegClassMapping((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), 40) scannet_results = dict() scannet_points = np.fromfile( './tests/data/scannet/points/scene0000_00.bin', dtype=np.float32).reshape((-1, 6)) scannet_results['points'] = DepthPoints( scannet_points, points_dim=6, attribute_dims=dict(color=[3, 4, 5])) scannet_pts_semantic_mask = np.fromfile( './tests/data/scannet/semantic_mask/scene0000_00.bin', dtype=np.long) scannet_results['pts_semantic_mask'] = scannet_pts_semantic_mask scannet_results = scannet_seg_class_mapping(scannet_results) scannet_results = scannet_patch_sample_points(scannet_results) scannet_points_result = scannet_results['points'] scannet_semantic_labels_result = scannet_results['pts_semantic_mask'] # manually constructed sampled points scannet_choices = np.array([87, 34, 58, 9, 18]) scannet_center = np.array([-2.1772466, -3.4789145, 1.242711]) scannet_center[2] = 0.0 scannet_coord_max = np.amax(scannet_points[:, :3], axis=0) scannet_input_points = np.concatenate([ scannet_points[scannet_choices, :3] - scannet_center, scannet_points[scannet_choices, 3:], scannet_points[scannet_choices, :3] / scannet_coord_max ], 1) assert scannet_points_result.points_dim == 9 assert scannet_points_result.attribute_dims == dict( color=[3, 4, 5], normalized_coord=[6, 7, 8]) scannet_points_result = scannet_points_result.tensor.numpy() assert np.allclose(scannet_input_points, scannet_points_result, atol=1e-6) assert np.all( np.array([13, 13, 12, 2, 0]) == scannet_semantic_labels_result) repr_str = repr(scannet_patch_sample_points) expected_repr_str = 'IndoorPatchPointSample(num_points=5, ' \ 'block_size=1.5, ' \ 'ignore_index=20, ' \ 'use_normalized_coord=True, ' \ 'num_try=10, ' \ 'enlarge_size=0.2, ' \ 'min_unique_num=None)' assert repr_str == expected_repr_str # when enlarge_size and min_unique_num are set np.random.seed(0) scannet_patch_sample_points = IndoorPatchPointSample( 5, 1.0, ignore_index=20, use_normalized_coord=False, num_try=1000, enlarge_size=None, min_unique_num=5) # this patch is within [0, 1] and has 5 unique points # it should be selected scannet_points = np.random.rand(5, 6) scannet_points[0, :3] = np.array([0.5, 0.5, 0.5]) # generate points smaller than `min_unique_num` in local patches # they won't be sampled for i in range(2, 11, 2): scannet_points = np.concatenate( [scannet_points, np.random.rand(4, 6) + i], axis=0) scannet_results = dict( points=DepthPoints( scannet_points, points_dim=6, attribute_dims=dict(color=[3, 4, 5])), pts_semantic_mask=np.random.randint(0, 20, (scannet_points.shape[0], ))) scannet_results = scannet_patch_sample_points(scannet_results) scannet_points_result = scannet_results['points'] # manually constructed sampled points scannet_choices = np.array([2, 4, 3, 1, 0]) scannet_center = np.array([0.56804454, 0.92559665, 0.07103606]) scannet_center[2] = 0.0 scannet_input_points = np.concatenate([ scannet_points[scannet_choices, :3] - scannet_center, scannet_points[scannet_choices, 3:], ], 1) assert scannet_points_result.points_dim == 6 assert scannet_points_result.attribute_dims == dict(color=[3, 4, 5]) scannet_points_result = scannet_points_result.tensor.numpy() assert np.allclose(scannet_input_points, scannet_points_result, atol=1e-6) # test on S3DIS dataset np.random.seed(0) s3dis_patch_sample_points = IndoorPatchPointSample( 5, 1.0, ignore_index=None, use_normalized_coord=True) s3dis_results = dict() s3dis_points = np.fromfile( './tests/data/s3dis/points/Area_1_office_2.bin', dtype=np.float32).reshape((-1, 6)) s3dis_results['points'] = DepthPoints( s3dis_points, points_dim=6, attribute_dims=dict(color=[3, 4, 5])) s3dis_pts_semantic_mask = np.fromfile( './tests/data/s3dis/semantic_mask/Area_1_office_2.bin', dtype=np.long) s3dis_results['pts_semantic_mask'] = s3dis_pts_semantic_mask s3dis_results = s3dis_patch_sample_points(s3dis_results) s3dis_points_result = s3dis_results['points'] s3dis_semantic_labels_result = s3dis_results['pts_semantic_mask'] # manually constructed sampled points s3dis_choices = np.array([87, 37, 60, 18, 31]) s3dis_center = np.array([2.691, 2.231, 3.172]) s3dis_center[2] = 0.0 s3dis_coord_max = np.amax(s3dis_points[:, :3], axis=0) s3dis_input_points = np.concatenate([ s3dis_points[s3dis_choices, :3] - s3dis_center, s3dis_points[s3dis_choices, 3:], s3dis_points[s3dis_choices, :3] / s3dis_coord_max ], 1) assert s3dis_points_result.points_dim == 9 assert s3dis_points_result.attribute_dims == dict( color=[3, 4, 5], normalized_coord=[6, 7, 8]) s3dis_points_result = s3dis_points_result.tensor.numpy() assert np.allclose(s3dis_input_points, s3dis_points_result, atol=1e-6) assert np.all(np.array([0, 1, 0, 8, 0]) == s3dis_semantic_labels_result)
def test_depth_points(): # test empty initialization empty_boxes = [] points = DepthPoints(empty_boxes) assert points.tensor.shape[0] == 0 assert points.tensor.shape[1] == 3 # Test init with origin points_np = np.array([[-5.24223238e+00, 4.00209696e+01, 2.97570381e-01], [-2.66751588e+01, 5.59499564e+00, -9.14345860e-01], [-5.80979675e+00, 3.54092357e+01, 2.00889888e-01], [-3.13086877e+01, 1.09007628e+00, -1.94612112e-01]], dtype=np.float32) depth_points = DepthPoints(points_np, points_dim=3) assert depth_points.tensor.shape[0] == 4 # Test init with color and height points_np = np.array([[ -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956, 0.4974, 0.9409 ], [ -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, 0.1502, 0.3707, 0.1086, 0.6297 ], [ -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, 0.6565, 0.6248, 0.6954, 0.2538 ], [ -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, 0.2803, 0.0258, 0.4896, 0.3269 ]], dtype=np.float32) depth_points = DepthPoints(points_np, points_dim=7, attribute_dims=dict(color=[3, 4, 5], height=6)) expected_tensor = torch.tensor([[ -5.24223238e+00, 4.00209696e+01, 2.97570381e-01, 0.6666, 0.1956, 0.4974, 0.9409 ], [ -2.66751588e+01, 5.59499564e+00, -9.14345860e-01, 0.1502, 0.3707, 0.1086, 0.6297 ], [ -5.80979675e+00, 3.54092357e+01, 2.00889888e-01, 0.6565, 0.6248, 0.6954, 0.2538 ], [ -3.13086877e+01, 1.09007628e+00, -1.94612112e-01, 0.2803, 0.0258, 0.4896, 0.3269 ]]) assert torch.allclose(expected_tensor, depth_points.tensor) assert torch.allclose(expected_tensor[:, :3], depth_points.coord) assert torch.allclose(expected_tensor[:, 3:6], depth_points.color) assert torch.allclose(expected_tensor[:, 6], depth_points.height) # test points clone new_depth_points = depth_points.clone() assert torch.allclose(new_depth_points.tensor, depth_points.tensor) # test points shuffle new_depth_points.shuffle() assert new_depth_points.tensor.shape == torch.Size([4, 7]) # test points rotation rot_mat = torch.tensor([[0.93629336, -0.27509585, 0.21835066], [0.28962948, 0.95642509, -0.03695701], [-0.19866933, 0.0978434, 0.97517033]]) depth_points.rotate(rot_mat) expected_tensor = torch.tensor([[ 6.6239e+00, 3.9748e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01, 4.9740e-01, 9.4090e-01 ], [ -2.3174e+01, 1.2600e+01, -6.9230e+00, 1.5020e-01, 3.7070e-01, 1.0860e-01, 6.2970e-01 ], [ 4.7760e+00, 3.5484e+01, -2.3813e+00, 6.5650e-01, 6.2480e-01, 6.9540e-01, 2.5380e-01 ], [ -2.8960e+01, 9.6364e+00, -7.0663e+00, 2.8030e-01, 2.5800e-02, 4.8960e-01, 3.2690e-01 ]]) assert torch.allclose(expected_tensor, depth_points.tensor, 1e-3) new_depth_points = depth_points.clone() new_depth_points.rotate(0.1, axis=2) expected_tensor = torch.tensor([[ 2.6226e+00, 4.0211e+01, -2.3335e+00, 6.6660e-01, 1.9560e-01, 4.9740e-01, 9.4090e-01 ], [ -2.4316e+01, 1.0224e+01, -6.9230e+00, 1.5020e-01, 3.7070e-01, 1.0860e-01, 6.2970e-01 ], [ 1.2096e+00, 3.5784e+01, -2.3813e+00, 6.5650e-01, 6.2480e-01, 6.9540e-01, 2.5380e-01 ], [ -2.9777e+01, 6.6971e+00, -7.0663e+00, 2.8030e-01, 2.5800e-02, 4.8960e-01, 3.2690e-01 ]]) assert torch.allclose(expected_tensor, new_depth_points.tensor, 1e-3) # test points translation translation_vector = torch.tensor([0.93629336, -0.27509585, 0.21835066]) depth_points.translate(translation_vector) expected_tensor = torch.tensor([[ 7.5602e+00, 3.9473e+01, -2.1152e+00, 6.6660e-01, 1.9560e-01, 4.9740e-01, 9.4090e-01 ], [ -2.2237e+01, 1.2325e+01, -6.7046e+00, 1.5020e-01, 3.7070e-01, 1.0860e-01, 6.2970e-01 ], [ 5.7123e+00, 3.5209e+01, -2.1629e+00, 6.5650e-01, 6.2480e-01, 6.9540e-01, 2.5380e-01 ], [ -2.8023e+01, 9.3613e+00, -6.8480e+00, 2.8030e-01, 2.5800e-02, 4.8960e-01, 3.2690e-01 ]]) assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4) # test points filter point_range = [-10, -40, -10, 10, 40, 10] in_range_flags = depth_points.in_range_3d(point_range) expected_flags = torch.tensor([True, False, True, False]) assert torch.all(in_range_flags == expected_flags) # test points scale depth_points.scale(1.2) expected_tensor = torch.tensor([[ 9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01, 4.9740e-01, 9.4090e-01 ], [ -2.6685e+01, 1.4790e+01, -8.0455e+00, 1.5020e-01, 3.7070e-01, 1.0860e-01, 6.2970e-01 ], [ 6.8547e+00, 4.2251e+01, -2.5955e+00, 6.5650e-01, 6.2480e-01, 6.9540e-01, 2.5380e-01 ], [ -3.3628e+01, 1.1234e+01, -8.2176e+00, 2.8030e-01, 2.5800e-02, 4.8960e-01, 3.2690e-01 ]]) assert torch.allclose(expected_tensor, depth_points.tensor, 1e-3) # test get_item expected_tensor = torch.tensor( [[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297]]) assert torch.allclose(expected_tensor, depth_points[1].tensor, 1e-4) expected_tensor = torch.tensor( [[-26.6848, 14.7898, -8.0455, 0.1502, 0.3707, 0.1086, 0.6297], [6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]]) assert torch.allclose(expected_tensor, depth_points[1:3].tensor, 1e-4) mask = torch.tensor([True, False, True, False]) expected_tensor = torch.tensor( [[9.0722, 47.3678, -2.5382, 0.6666, 0.1956, 0.4974, 0.9409], [6.8547, 42.2509, -2.5955, 0.6565, 0.6248, 0.6954, 0.2538]]) assert torch.allclose(expected_tensor, depth_points[mask].tensor, 1e-4) expected_tensor = torch.tensor([[0.6666], [0.1502], [0.6565], [0.2803]]) assert torch.allclose(expected_tensor, depth_points[:, 3].tensor, 1e-4) # test length assert len(depth_points) == 4 # test repr expected_repr = 'DepthPoints(\n '\ 'tensor([[ 9.0722e+00, 4.7368e+01, -2.5382e+00, '\ '6.6660e-01, 1.9560e-01,\n 4.9740e-01, '\ '9.4090e-01],\n '\ '[-2.6685e+01, 1.4790e+01, -8.0455e+00, 1.5020e-01, '\ '3.7070e-01,\n '\ '1.0860e-01, 6.2970e-01],\n '\ '[ 6.8547e+00, 4.2251e+01, -2.5955e+00, 6.5650e-01, '\ '6.2480e-01,\n '\ '6.9540e-01, 2.5380e-01],\n '\ '[-3.3628e+01, 1.1234e+01, -8.2176e+00, 2.8030e-01, '\ '2.5800e-02,\n '\ '4.8960e-01, 3.2690e-01]]))' assert expected_repr == str(depth_points) # test concatenate depth_points_clone = depth_points.clone() cat_points = DepthPoints.cat([depth_points, depth_points_clone]) assert torch.allclose(cat_points.tensor[:len(depth_points)], depth_points.tensor) # test iteration for i, point in enumerate(depth_points): assert torch.allclose(point, depth_points.tensor[i]) # test new_point new_points = depth_points.new_point([[1, 2, 3, 4, 5, 6, 7]]) assert torch.allclose( new_points.tensor, torch.tensor([[1, 2, 3, 4, 5, 6, 7]], dtype=depth_points.tensor.dtype)) # test in_range_bev point_bev_range = [-30, -40, 30, 40] in_range_flags = depth_points.in_range_bev(point_bev_range) expected_flags = torch.tensor([False, True, False, False]) assert torch.all(in_range_flags == expected_flags) # test flip depth_points.flip(bev_direction='horizontal') expected_tensor = torch.tensor([[ -9.0722e+00, 4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01, 4.9740e-01, 9.4090e-01 ], [ 2.6685e+01, 1.4790e+01, -8.0455e+00, 1.5020e-01, 3.7070e-01, 1.0860e-01, 6.2970e-01 ], [ -6.8547e+00, 4.2251e+01, -2.5955e+00, 6.5650e-01, 6.2480e-01, 6.9540e-01, 2.5380e-01 ], [ 3.3628e+01, 1.1234e+01, -8.2176e+00, 2.8030e-01, 2.5800e-02, 4.8960e-01, 3.2690e-01 ]]) assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4) depth_points.flip(bev_direction='vertical') expected_tensor = torch.tensor([[ -9.0722e+00, -4.7368e+01, -2.5382e+00, 6.6660e-01, 1.9560e-01, 4.9740e-01, 9.4090e-01 ], [ 2.6685e+01, -1.4790e+01, -8.0455e+00, 1.5020e-01, 3.7070e-01, 1.0860e-01, 6.2970e-01 ], [ -6.8547e+00, -4.2251e+01, -2.5955e+00, 6.5650e-01, 6.2480e-01, 6.9540e-01, 2.5380e-01 ], [ 3.3628e+01, -1.1234e+01, -8.2176e+00, 2.8030e-01, 2.5800e-02, 4.8960e-01, 3.2690e-01 ]]) assert torch.allclose(expected_tensor, depth_points.tensor, 1e-4)
def test_global_rot_scale_trans(): angle = 0.78539816 scale = [0.95, 1.05] trans_std = 1.0 # rot_range should be a number or seq of numbers with pytest.raises(AssertionError): global_rot_scale_trans = GlobalRotScaleTrans(rot_range='0.0') # scale_ratio_range should be seq of numbers with pytest.raises(AssertionError): global_rot_scale_trans = GlobalRotScaleTrans(scale_ratio_range=1.0) # translation_std should be a number or seq of numbers with pytest.raises(AssertionError): global_rot_scale_trans = GlobalRotScaleTrans(translation_std='0.0') global_rot_scale_trans = GlobalRotScaleTrans(rot_range=angle, scale_ratio_range=scale, translation_std=trans_std, shift_height=False) np.random.seed(0) points = np.fromfile('tests/data/scannet/points/scene0000_00.bin', np.float32).reshape(-1, 6) annos = mmcv.load('tests/data/scannet/scannet_infos.pkl') info = annos[0] gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'] depth_points = DepthPoints(points.copy(), points_dim=6, attribute_dims=dict(color=[3, 4, 5])) gt_bboxes_3d = DepthInstance3DBoxes(gt_bboxes_3d.copy(), box_dim=gt_bboxes_3d.shape[-1], with_yaw=False, origin=(0.5, 0.5, 0.5)) input_dict = dict(points=depth_points.clone(), bbox3d_fields=['gt_bboxes_3d'], gt_bboxes_3d=gt_bboxes_3d.clone()) input_dict = global_rot_scale_trans(input_dict) trans_depth_points = input_dict['points'] trans_bboxes_3d = input_dict['gt_bboxes_3d'] noise_rot = 0.07667607233534723 scale_factor = 1.021518936637242 trans_factor = np.array([0.97873798, 2.2408932, 1.86755799]) true_depth_points = depth_points.clone() true_bboxes_3d = gt_bboxes_3d.clone() true_depth_points, noise_rot_mat_T = true_bboxes_3d.rotate( noise_rot, true_depth_points) true_bboxes_3d.scale(scale_factor) true_bboxes_3d.translate(trans_factor) true_depth_points.scale(scale_factor) true_depth_points.translate(trans_factor) assert torch.allclose(trans_depth_points.tensor, true_depth_points.tensor, atol=1e-6) assert torch.allclose(trans_bboxes_3d.tensor, true_bboxes_3d.tensor, atol=1e-6) assert input_dict['pcd_scale_factor'] == scale_factor assert torch.allclose(input_dict['pcd_rotation'], noise_rot_mat_T, atol=1e-6) assert np.allclose(input_dict['pcd_trans'], trans_factor) repr_str = repr(global_rot_scale_trans) expected_repr_str = f'GlobalRotScaleTrans(rot_range={[-angle, angle]},' \ f' scale_ratio_range={scale},' \ f' translation_std={[trans_std for _ in range(3)]},' \ f' shift_height=False)' assert repr_str == expected_repr_str # points with shift_height but no bbox global_rot_scale_trans = GlobalRotScaleTrans(rot_range=angle, scale_ratio_range=scale, translation_std=trans_std, shift_height=True) # points should have height attribute when shift_height=True with pytest.raises(AssertionError): input_dict = global_rot_scale_trans(input_dict) np.random.seed(0) shift_height = points[:, 2:3] * 0.99 points = np.concatenate([points, shift_height], axis=1) depth_points = DepthPoints(points.copy(), points_dim=7, attribute_dims=dict(color=[3, 4, 5], height=6)) input_dict = dict(points=depth_points.clone(), bbox3d_fields=[]) input_dict = global_rot_scale_trans(input_dict) trans_depth_points = input_dict['points'] true_shift_height = shift_height * scale_factor assert np.allclose( trans_depth_points.tensor.numpy(), np.concatenate([true_depth_points.tensor.numpy(), true_shift_height], axis=1), atol=1e-6)