def test_body3d_semi_supervision_dataset(): # Test Body3d Semi-supervision Dataset dataset_info = Config.fromfile( 'configs/_base_/datasets/h36m.py').dataset_info # load labeled dataset labeled_data_cfg = dict(num_joints=17, seq_len=27, seq_frame_interval=1, causall=False, temporal_padding=True, joint_2d_src='gt', subset=1, subjects=['S1'], need_camera_param=True, camera_param_file='tests/data/h36m/cameras.pkl') labeled_dataset_cfg = dict(type='Body3DH36MDataset', ann_file='tests/data/h36m/test_h36m_body3d.npz', img_prefix='tests/data/h36m', data_cfg=labeled_data_cfg, dataset_info=dataset_info, pipeline=[]) # load unlabled data unlabeled_data_cfg = dict(num_joints=17, seq_len=27, seq_frame_interval=1, causal=False, temporal_padding=True, joint_2d_src='gt', subjects=['S5', 'S7', 'S8'], need_camera_param=True, camera_param_file='tests/data/h36m/cameras.pkl', need_2d_label=True) unlabeled_dataset_cfg = dict( type='Body3DH36MDataset', ann_file='tests/data/h36m/test_h36m_body3d.npz', img_prefix='tests/data/h36m', data_cfg=unlabeled_data_cfg, dataset_info=dataset_info, pipeline=[ dict(type='Collect', keys=[('input_2d', 'unlabeled_input')], meta_name='metas', meta_keys=[]) ]) # combine labeled and unlabeled dataset to form a new dataset dataset = 'Body3DSemiSupervisionDataset' dataset_class = DATASETS.get(dataset) custom_dataset = dataset_class(labeled_dataset_cfg, unlabeled_dataset_cfg) item = custom_dataset[0] assert custom_dataset.labeled_dataset.dataset_name == 'h36m' assert 'unlabeled_input' in item.keys() unlabeled_dataset = build_dataset(unlabeled_dataset_cfg) assert len(unlabeled_dataset) == len(custom_dataset)
def test_concat_dataset(): # build COCO-like dataset config dataset_info = Config.fromfile( 'configs/_base_/datasets/coco.py').dataset_info channel_cfg = dict( num_output_channels=17, dataset_joints=17, dataset_channel=[ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], ], inference_channel=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ]) data_cfg = dict( image_size=[192, 256], heatmap_size=[48, 64], num_output_channels=channel_cfg['num_output_channels'], num_joints=channel_cfg['dataset_joints'], dataset_channel=channel_cfg['dataset_channel'], inference_channel=channel_cfg['inference_channel'], soft_nms=False, nms_thr=1.0, oks_thr=0.9, vis_thr=0.2, use_gt_bbox=True, det_bbox_thr=0.0, bbox_file='tests/data/coco/test_coco_det_AP_H_56.json', ) dataset_cfg = dict( type='TopDownCocoDataset', ann_file='tests/data/coco/test_coco.json', img_prefix='tests/data/coco/', data_cfg=data_cfg, pipeline=[], dataset_info=dataset_info) dataset = build_dataset(dataset_cfg) # Case 1: build ConcatDataset explicitly concat_dataset_cfg = dict( type='ConcatDataset', datasets=[dataset_cfg, dataset_cfg]) concat_dataset = build_dataset(concat_dataset_cfg) assert len(concat_dataset) == 2 * len(dataset) # Case 2: build ConcatDataset from cfg sequence concat_dataset = build_dataset([dataset_cfg, dataset_cfg]) assert len(concat_dataset) == 2 * len(dataset) # Case 3: build ConcatDataset from ann_file sequence concat_dataset_cfg = dataset_cfg.copy() for key in ['ann_file', 'type', 'img_prefix', 'dataset_info']: val = concat_dataset_cfg[key] concat_dataset_cfg[key] = [val] * 2 for key in ['num_joints', 'dataset_channel']: val = concat_dataset_cfg['data_cfg'][key] concat_dataset_cfg['data_cfg'][key] = [val] * 2 concat_dataset = build_dataset(concat_dataset_cfg) assert len(concat_dataset) == 2 * len(dataset)
def __init__(self, labeled_dataset, unlabeled_dataset): super().__init__() self.labeled_dataset = build_dataset(labeled_dataset) self.unlabeled_dataset = build_dataset(unlabeled_dataset) self.length = len(self.unlabeled_dataset)
def __init__(self, train_dataset, adversarial_dataset): super().__init__() self.train_dataset = build_dataset(train_dataset) self.adversarial_dataset = build_dataset(adversarial_dataset) self.length = len(self.train_dataset)