def test_seg_show(): import mmcv import tempfile from os import path as osp tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, scene_idxs=[0]) result = dict( semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results = [result] scannet_dataset.show(results, temp_dir, show=False) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup()
def test_seg_show(): import mmcv import tempfile from os import path as osp tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset(data_root=root_path, ann_file=ann_file, scene_idxs=[0]) result = dict(semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results = [result] scannet_dataset.show(results, temp_dir, show=False) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() # test show with pipeline tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') eval_pipeline = [ dict(type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict(type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict(type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39)), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) ] scannet_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup()