Beispiel #1
0
def test_deepfashion_dataset():
    dataset = 'DeepFashionDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/deepfashion_full.py').dataset_info
    # test JHMDB datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(num_output_channels=8,
                       dataset_joints=8,
                       dataset_channel=[
                           [0, 1, 2, 3, 4, 5, 6, 7],
                       ],
                       inference_channel=[0, 1, 2, 3, 4, 5, 6, 7])

    data_cfg = dict(image_size=[192, 256],
                    heatmap_size=[48, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'],
                    soft_nms=False,
                    nms_thr=1.0,
                    oks_thr=0.9,
                    vis_thr=0.2,
                    use_gt_bbox=True,
                    det_bbox_thr=0.0,
                    image_thr=0.0,
                    bbox_file='')

    # Test gt bbox
    custom_dataset = dataset_class(ann_file='tests/data/fld/test_fld.json',
                                   img_prefix='tests/data/fld/',
                                   subset='full',
                                   data_cfg=data_cfg,
                                   pipeline=[],
                                   dataset_info=dataset_info,
                                   test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'deepfashion_full'

    image_id = 128
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 2
    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric=['PCK', 'EPE', 'AUC'])
    assert_almost_equal(infos['PCK'], 1.0)
    assert_almost_equal(infos['AUC'], 0.95)
    assert_almost_equal(infos['EPE'], 0.0)

    with pytest.raises(KeyError):
        infos = custom_dataset.evaluate(results, metric='mAP')
Beispiel #2
0
def test_Panoptic2D_dataset():
    dataset = 'PanopticDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/panoptic_hand2d.py').dataset_info

    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(num_output_channels=21,
                       dataset_joints=21,
                       dataset_channel=[
                           [
                               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                               14, 15, 16, 17, 18, 19, 20
                           ],
                       ],
                       inference_channel=[
                           0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                           15, 16, 17, 18, 19, 20
                       ])

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/panoptic/test_panoptic.json',
                      img_prefix='tests/data/panoptic/',
                      data_cfg=data_cfg_copy,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/panoptic/test_panoptic.json',
        img_prefix='tests/data/panoptic/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=False)

    assert custom_dataset.dataset_name == 'panoptic_hand2d'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 4
    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric=['PCKh', 'EPE', 'AUC'])
    assert_almost_equal(infos['PCKh'], 1.0)
    assert_almost_equal(infos['AUC'], 0.95)
    assert_almost_equal(infos['EPE'], 0.0)

    with pytest.raises(KeyError):
        infos = custom_dataset.evaluate(results, metric='mAP')
Beispiel #3
0
def test_animal_fly_dataset():
    dataset = 'AnimalFlyDataset'
    dataset_class = DATASETS.get(dataset)
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/fly.py').dataset_info

    channel_cfg = dict(num_output_channels=32,
                       dataset_joints=32,
                       dataset_channel=[
                           [
                               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                               14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
                               26, 27, 28, 29, 30, 31
                           ],
                       ],
                       inference_channel=[
                           0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                           15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
                           28, 29, 30, 31
                       ])

    data_cfg = dict(image_size=[192, 192],
                    heatmap_size=[48, 48],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])

    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/fly/test_fly.json',
                      img_prefix='tests/data/fly/',
                      data_cfg=data_cfg_copy,
                      dataset_info=dataset_info,
                      pipeline=[],
                      test_mode=True)

    custom_dataset = dataset_class(ann_file='tests/data/fly/test_fly.json',
                                   img_prefix='tests/data/fly/',
                                   data_cfg=data_cfg_copy,
                                   dataset_info=dataset_info,
                                   pipeline=[],
                                   test_mode=False)

    assert custom_dataset.dataset_name == 'fly'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
        assert_almost_equal(infos['PCK'], 1.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
def test_top_down_Panoptic_dataset():
    dataset = 'PanopticDataset'
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(
        num_output_channels=21,
        dataset_joints=21,
        dataset_channel=[
            [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
                18, 19, 20
            ],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
            19, 20
        ])

    data_cfg = dict(
        image_size=[256, 256],
        heatmap_size=[64, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(
        ann_file='tests/data/panoptic/test_panoptic.json',
        img_prefix='tests/data/panoptic/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/panoptic/test_panoptic.json',
        img_prefix='tests/data/panoptic/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 4
    _ = custom_dataset[0]

    outputs = load_json_to_output('tests/data/panoptic/test_panoptic.json',
                                  'tests/data/panoptic/', 1, False)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir,
                                        ['PCKh', 'EPE', 'AUC'])
        assert_almost_equal(infos['PCKh'], 1.0)
        assert_almost_equal(infos['AUC'], 0.95)
        assert_almost_equal(infos['EPE'], 0.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
Beispiel #5
0
def test_animal_Macaque_dataset():
    dataset = 'AnimalMacaqueDataset'
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(
        num_output_channels=17,
        dataset_joints=17,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
        ])

    data_cfg = dict(
        image_size=[192, 256],
        heatmap_size=[48, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        soft_nms=False,
        nms_thr=1.0,
        oks_thr=0.9,
        vis_thr=0.2,
        use_gt_bbox=True,
        det_bbox_thr=0.0,
        bbox_file='',
    )

    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/macaque/test_macaque.json',
                      img_prefix='tests/data/macaque/',
                      data_cfg=data_cfg_copy,
                      pipeline=[],
                      test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/macaque/test_macaque.json',
        img_prefix='tests/data/macaque/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
        assert_almost_equal(infos['AP'], 1.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
def test_bottom_up_AIC_dataset():
    dataset = 'BottomUpAicDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/aic.py').dataset_info
    # test MHP datasets
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(
        num_output_channels=14,
        dataset_joints=14,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
        ],
        inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])

    data_cfg = dict(
        image_size=512,
        base_size=256,
        base_sigma=2,
        heatmap_size=[128],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        num_scales=1,
        scale_aware_sigma=False,
    )

    _ = dataset_class(ann_file='tests/data/aic/test_aic.json',
                      img_prefix='tests/data/aic/',
                      data_cfg=data_cfg,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=False)

    custom_dataset = dataset_class(ann_file='tests/data/aic/test_aic.json',
                                   img_prefix='tests/data/aic/',
                                   data_cfg=data_cfg,
                                   pipeline=[],
                                   dataset_info=dataset_info,
                                   test_mode=True)

    assert custom_dataset.dataset_name == 'aic'

    image_id = 1
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 3
    _ = custom_dataset[0]

    outputs = convert_coco_to_output(custom_dataset.coco)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
        assert_almost_equal(infos['AP'], 1.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
Beispiel #7
0
def test_deepfashion_dataset_compatibility():
    dataset = 'DeepFashionDataset'
    # test JHMDB datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(num_output_channels=8,
                       dataset_joints=8,
                       dataset_channel=[
                           [0, 1, 2, 3, 4, 5, 6, 7],
                       ],
                       inference_channel=[0, 1, 2, 3, 4, 5, 6, 7])

    data_cfg = dict(image_size=[192, 256],
                    heatmap_size=[48, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'],
                    soft_nms=False,
                    nms_thr=1.0,
                    oks_thr=0.9,
                    vis_thr=0.2,
                    use_gt_bbox=True,
                    det_bbox_thr=0.0,
                    image_thr=0.0,
                    bbox_file='')

    # Test gt bbox
    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(ann_file='tests/data/fld/test_fld.json',
                                       img_prefix='tests/data/fld/',
                                       subset='full',
                                       data_cfg=data_cfg,
                                       pipeline=[],
                                       test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'deepfashion_full'

    image_id = 128
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK', 'EPE', 'AUC'])
    assert_almost_equal(infos['PCK'], 1.0)
    assert_almost_equal(infos['AUC'], 0.95)
    assert_almost_equal(infos['EPE'], 0.0)

    with pytest.raises(KeyError):
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
def test_bottom_up_COCO_wholebody_dataset():
    dataset = 'BottomUpCocoWholeBodyDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/coco_wholebody.py').dataset_info
    # test COCO-wholebody datasets
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(num_output_channels=133,
                       dataset_joints=133,
                       dataset_channel=[
                           list(range(133)),
                       ],
                       inference_channel=list(range(133)))

    data_cfg = dict(
        image_size=512,
        base_size=256,
        base_sigma=2,
        heatmap_size=[128, 256],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        num_scales=2,
        scale_aware_sigma=False,
    )

    _ = dataset_class(ann_file='tests/data/coco/test_coco_wholebody.json',
                      img_prefix='tests/data/coco/',
                      data_cfg=data_cfg,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=False)

    custom_dataset = dataset_class(
        ann_file='tests/data/coco/test_coco_wholebody.json',
        img_prefix='tests/data/coco/',
        data_cfg=data_cfg,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'coco_wholebody'

    image_id = 785
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 4
    _ = custom_dataset[0]

    results = convert_coco_to_output(custom_dataset.coco, is_wholebody=True)
    infos = custom_dataset.evaluate(results, metric='mAP')
    assert_almost_equal(infos['AP'], 1.0)

    with pytest.raises(KeyError):
        _ = custom_dataset.evaluate(results, metric='PCK')
def test_bottom_up_MHP_dataset():
    dataset = 'BottomUpMhpDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/mhp.py').dataset_info
    # test MHP datasets
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(
        dataset_joints=16,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
        ])

    data_cfg = dict(
        image_size=512,
        base_size=256,
        base_sigma=2,
        heatmap_size=[128],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        num_scales=1,
        scale_aware_sigma=False,
    )

    _ = dataset_class(ann_file='tests/data/mhp/test_mhp.json',
                      img_prefix='tests/data/mhp/',
                      data_cfg=data_cfg,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=False)

    custom_dataset = dataset_class(ann_file='tests/data/mhp/test_mhp.json',
                                   img_prefix='tests/data/mhp/',
                                   data_cfg=data_cfg,
                                   pipeline=[],
                                   dataset_info=dataset_info,
                                   test_mode=True)

    assert custom_dataset.dataset_name == 'mhp'

    image_id = 2889
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 2
    _ = custom_dataset[0]

    results = convert_coco_to_output(custom_dataset.coco)
    infos = custom_dataset.evaluate(results, metric='mAP')
    assert_almost_equal(infos['AP'], 1.0)

    with pytest.raises(KeyError):
        _ = custom_dataset.evaluate(results, metric='PCK')
def test_animal_locust_dataset_compatibility():
    dataset = 'AnimalLocustDataset'
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(num_output_channels=35,
                       dataset_joints=35,
                       dataset_channel=[
                           [
                               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                               14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
                               26, 27, 28, 29, 30, 31, 32, 33, 34
                           ],
                       ],
                       inference_channel=[
                           0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                           15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
                           28, 29, 30, 31, 32, 33, 34
                       ])

    data_cfg = dict(image_size=[160, 160],
                    heatmap_size=[40, 40],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])

    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    with pytest.warns(DeprecationWarning):
        _ = dataset_class(ann_file='tests/data/locust/test_locust.json',
                          img_prefix='tests/data/locust/',
                          data_cfg=data_cfg_copy,
                          pipeline=[],
                          test_mode=True)

    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(
            ann_file='tests/data/locust/test_locust.json',
            img_prefix='tests/data/locust/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
        assert_almost_equal(infos['PCK'], 1.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
def test_bottom_up_COCO_wholebody_dataset_compatibility():
    dataset = 'BottomUpCocoWholeBodyDataset'
    # test COCO-wholebody datasets
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(num_output_channels=133,
                       dataset_joints=133,
                       dataset_channel=[
                           list(range(133)),
                       ],
                       inference_channel=list(range(133)))

    data_cfg = dict(
        image_size=512,
        base_size=256,
        base_sigma=2,
        heatmap_size=[128, 256],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        num_scales=2,
        scale_aware_sigma=False,
    )

    with pytest.warns(DeprecationWarning):
        _ = dataset_class(ann_file='tests/data/coco/test_coco_wholebody.json',
                          img_prefix='tests/data/coco/',
                          data_cfg=data_cfg,
                          pipeline=[],
                          test_mode=False)

    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(
            ann_file='tests/data/coco/test_coco_wholebody.json',
            img_prefix='tests/data/coco/',
            data_cfg=data_cfg,
            pipeline=[],
            test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'coco_wholebody'

    image_id = 785
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 4
    _ = custom_dataset[0]

    outputs = convert_coco_to_output(custom_dataset.coco, is_wholebody=True)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
        assert_almost_equal(infos['AP'], 1.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
def test_top_down_OneHand10K_dataset_compatibility():
    dataset = 'OneHand10KDataset'
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(num_output_channels=21,
                       dataset_joints=21,
                       dataset_channel=[
                           [
                               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                               14, 15, 16, 17, 18, 19, 20
                           ],
                       ],
                       inference_channel=[
                           0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                           15, 16, 17, 18, 19, 20
                       ])

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    with pytest.warns(DeprecationWarning):
        _ = dataset_class(
            ann_file='tests/data/onehand10k/test_onehand10k.json',
            img_prefix='tests/data/onehand10k/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            test_mode=True)

    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(
            ann_file='tests/data/onehand10k/test_onehand10k.json',
            img_prefix='tests/data/onehand10k/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 4
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK', 'EPE', 'AUC'])
        assert_almost_equal(infos['PCK'], 1.0)
        assert_almost_equal(infos['AUC'], 0.95)
        assert_almost_equal(infos['EPE'], 0.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
def test_bottom_up_MHP_dataset_compatibility():
    dataset = 'BottomUpMhpDataset'
    # test MHP datasets
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(
        dataset_joints=16,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
        ])

    data_cfg = dict(
        image_size=512,
        base_size=256,
        base_sigma=2,
        heatmap_size=[128],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        num_scales=1,
        scale_aware_sigma=False,
    )

    with pytest.warns(DeprecationWarning):
        _ = dataset_class(ann_file='tests/data/mhp/test_mhp.json',
                          img_prefix='tests/data/mhp/',
                          data_cfg=data_cfg,
                          pipeline=[],
                          test_mode=False)

    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(ann_file='tests/data/mhp/test_mhp.json',
                                       img_prefix='tests/data/mhp/',
                                       data_cfg=data_cfg,
                                       pipeline=[],
                                       test_mode=True)

    image_id = 2889
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 2
    _ = custom_dataset[0]
    assert custom_dataset.dataset_name == 'mhp'

    outputs = convert_coco_to_output(custom_dataset.coco)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
        assert_almost_equal(infos['AP'], 1.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
def test_animal_horse10_dataset():
    dataset = 'AnimalHorse10Dataset'
    dataset_class = DATASETS.get(dataset)
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/horse10.py').dataset_info

    channel_cfg = dict(num_output_channels=22,
                       dataset_joints=22,
                       dataset_channel=[
                           [
                               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                               14, 15, 16, 17, 18, 19, 21
                           ],
                       ],
                       inference_channel=[
                           0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                           15, 16, 17, 18, 19, 21
                       ])

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/horse10/test_horse10.json',
                      img_prefix='tests/data/horse10/',
                      data_cfg=data_cfg_copy,
                      dataset_info=dataset_info,
                      pipeline=[],
                      test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/horse10/test_horse10.json',
        img_prefix='tests/data/horse10/',
        data_cfg=data_cfg_copy,
        dataset_info=dataset_info,
        pipeline=[],
        test_mode=False)

    assert custom_dataset.dataset_name == 'horse10'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 3
    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric=['PCK'])
    assert_almost_equal(infos['PCK'], 1.0)

    with pytest.raises(KeyError):
        infos = custom_dataset.evaluate(results, metric='mAP')
Beispiel #15
0
def test_face_WFLW_dataset_compatibility():
    dataset = 'FaceWFLWDataset'
    # test Face WFLW datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=98,
        dataset_joints=98,
        dataset_channel=[
            list(range(98)),
        ],
        inference_channel=list(range(98)))

    data_cfg = dict(
        image_size=[256, 256],
        heatmap_size=[64, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    with pytest.warns(DeprecationWarning):
        _ = dataset_class(
            ann_file='tests/data/wflw/test_wflw.json',
            img_prefix='tests/data/wflw/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            test_mode=True)

    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(
            ann_file='tests/data/wflw/test_wflw.json',
            img_prefix='tests/data/wflw/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)

    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['NME'])
        assert_almost_equal(infos['NME'], 0.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
def test_animal_horse10_dataset():
    dataset = 'AnimalHorse10Dataset'
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(
        num_output_channels=22,
        dataset_joints=22,
        dataset_channel=[
            [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
                18, 19, 21
            ],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
            19, 21
        ])

    data_cfg = dict(
        image_size=[256, 256],
        heatmap_size=[64, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(
        ann_file='tests/data/horse10/test_horse10.json',
        img_prefix='tests/data/horse10/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/horse10/test_horse10.json',
        img_prefix='tests/data/horse10/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 3
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
        assert_almost_equal(infos['PCK'], 1.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
Beispiel #17
0
def test_body3d_semi_supervision_dataset_compatibility():
    # Test Body3d Semi-supervision Dataset

    # load labeled dataset
    labeled_data_cfg = dict(num_joints=17,
                            seq_len=27,
                            seq_frame_interval=1,
                            causall=False,
                            temporal_padding=True,
                            joint_2d_src='gt',
                            subset=1,
                            subjects=['S1'],
                            need_camera_param=True,
                            camera_param_file='tests/data/h36m/cameras.pkl')
    labeled_dataset = dict(type='Body3DH36MDataset',
                           ann_file='tests/data/h36m/test_h36m_body3d.npz',
                           img_prefix='tests/data/h36m',
                           data_cfg=labeled_data_cfg,
                           pipeline=[])

    # load unlabled data
    unlabeled_data_cfg = dict(num_joints=17,
                              seq_len=27,
                              seq_frame_interval=1,
                              causal=False,
                              temporal_padding=True,
                              joint_2d_src='gt',
                              subjects=['S5', 'S7', 'S8'],
                              need_camera_param=True,
                              camera_param_file='tests/data/h36m/cameras.pkl',
                              need_2d_label=True)
    unlabeled_dataset = dict(type='Body3DH36MDataset',
                             ann_file='tests/data/h36m/test_h36m_body3d.npz',
                             img_prefix='tests/data/h36m',
                             data_cfg=unlabeled_data_cfg,
                             pipeline=[
                                 dict(type='Collect',
                                      keys=[('input_2d', 'unlabeled_input')],
                                      meta_name='metas',
                                      meta_keys=[])
                             ])

    # combine labeled and unlabeled dataset to form a new dataset
    dataset = 'Body3DSemiSupervisionDataset'
    dataset_class = DATASETS.get(dataset)
    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(labeled_dataset, unlabeled_dataset)
    item = custom_dataset[0]
    assert 'unlabeled_input' in item.keys()

    unlabeled_dataset = build_dataset(unlabeled_dataset)
    assert len(unlabeled_dataset) == len(custom_dataset)
Beispiel #18
0
def test_bottom_up_COCO_dataset():
    dataset = 'BottomUpCocoDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/coco.py').dataset_info
    # test COCO datasets
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(
        dataset_joints=17,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17
        ])

    data_cfg = dict(image_size=512,
                    base_size=256,
                    base_sigma=2,
                    heatmap_size=[128, 256],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'],
                    num_scales=2,
                    scale_aware_sigma=False,
                    use_nms=True)

    _ = dataset_class(ann_file='tests/data/coco/test_coco.json',
                      img_prefix='tests/data/coco/',
                      data_cfg=data_cfg,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=False)

    custom_dataset = dataset_class(ann_file='tests/data/coco/test_coco.json',
                                   img_prefix='tests/data/coco/',
                                   data_cfg=data_cfg,
                                   pipeline=[],
                                   dataset_info=dataset_info,
                                   test_mode=True)

    assert custom_dataset.dataset_name == 'coco'
    assert custom_dataset.num_images == 4
    _ = custom_dataset[0]

    outputs = convert_coco_to_output(custom_dataset.coco)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
        assert_almost_equal(infos['AP'], 1.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
Beispiel #19
0
def test_mesh_Adversarial_dataset():
    # test mesh Adversarial dataset

    # load train dataset
    dataset = 'MeshMixDataset'
    dataset_class = DATASETS.get(dataset)
    data_cfg = dict(
        image_size=[256, 256],
        iuv_size=[64, 64],
        num_joints=24,
        use_IUV=True,
        uv_type='BF')
    train_dataset = dataset_class(
        configs=[
            dict(
                ann_file='tests/data/h36m/test_h36m.npz',
                img_prefix='tests/data/h36m',
                data_cfg=data_cfg,
                pipeline=[]),
            dict(
                ann_file='tests/data/h36m/test_h36m.npz',
                img_prefix='tests/data/h36m',
                data_cfg=data_cfg,
                pipeline=[]),
        ],
        partition=[0.6, 0.4])

    # load adversarial dataset
    dataset = 'MoshDataset'
    dataset_class = DATASETS.get(dataset)
    adversarial_dataset = dataset_class(
        ann_file='tests/data/mosh/test_mosh.npz', pipeline=[])

    # combine train and adversarial dataset to form a new dataset
    dataset = 'MeshAdversarialDataset'
    dataset_class = DATASETS.get(dataset)
    custom_dataset = dataset_class(train_dataset, adversarial_dataset)
    item = custom_dataset[0]
    assert 'mosh_theta' in item.keys()
Beispiel #20
0
def test_bottom_up_CrowdPose_dataset():
    dataset = 'BottomUpCrowdPoseDataset'
    # test CrowdPose datasets
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(
        num_output_channels=14,
        dataset_joints=14,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
        ],
        inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])

    data_cfg = dict(
        image_size=512,
        base_size=256,
        base_sigma=2,
        heatmap_size=[128, 256],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        num_scales=2,
        scale_aware_sigma=False)

    _ = dataset_class(
        ann_file='tests/data/crowdpose/test_crowdpose.json',
        img_prefix='tests/data/crowdpose/',
        data_cfg=data_cfg,
        pipeline=[],
        test_mode=False)

    custom_dataset = dataset_class(
        ann_file='tests/data/crowdpose/test_crowdpose.json',
        img_prefix='tests/data/crowdpose/',
        data_cfg=data_cfg,
        pipeline=[],
        test_mode=True)

    image_id = 103319
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 2
    _ = custom_dataset[0]
    assert custom_dataset.dataset_name == 'crowdpose'

    outputs = convert_coco_to_output(custom_dataset.coco)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
        assert_almost_equal(infos['AP'], 1.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
Beispiel #21
0
def test_face_COFW_dataset():
    dataset = 'FaceCOFWDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/cofw.py').dataset_info
    # test Face COFW datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(num_output_channels=29,
                       dataset_joints=29,
                       dataset_channel=[
                           list(range(29)),
                       ],
                       inference_channel=list(range(29)))

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/cofw/test_cofw.json',
                      img_prefix='tests/data/cofw/',
                      data_cfg=data_cfg_copy,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=True)

    custom_dataset = dataset_class(ann_file='tests/data/cofw/test_cofw.json',
                                   img_prefix='tests/data/cofw/',
                                   data_cfg=data_cfg_copy,
                                   pipeline=[],
                                   dataset_info=dataset_info,
                                   test_mode=False)

    assert custom_dataset.dataset_name == 'cofw'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)

    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['NME'])
        assert_almost_equal(infos['NME'], 0.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
def test_NVGesture_dataset():

    dataset = 'NVGestureDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/nvgesture.py').dataset_info

    dataset_class = DATASETS.get(dataset)

    data_cfg = dict(
        video_size=[320, 240],
        modality=['rgb', 'depth'],
        bbox_file='tests/data/nvgesture/bboxes.json',
    )

    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/nvgesture/test_nvgesture.lst',
                      vid_prefix='tests/data/nvgesture/',
                      data_cfg=data_cfg_copy,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/nvgesture/test_nvgesture.lst',
        vid_prefix='tests/data/nvgesture/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=False)

    assert custom_dataset.dataset_name == 'nvgesture'
    assert custom_dataset.test_mode is False
    assert len(custom_dataset) == 1
    sample = custom_dataset[0]

    # make pseudo prediction for evaluation
    sample['logits'] = {
        modal: torch.zeros(1, 25, 1)
        for modal in sample['modality']
    }
    sample['logits']['rgb'][:, sample['label']] = 1
    sample['logits']['depth'][:, (sample['label'] + 1) % 25] = 1
    sample['label'] = torch.tensor([sample['label']]).long()
    infos = custom_dataset.evaluate([sample], metric=['AP'])
    assert_almost_equal(infos['AP_rgb'], 1.0)
    assert_almost_equal(infos['AP_depth'], 0.0)
    assert_almost_equal(infos['AP_mean'], 0.5)

    with pytest.raises(KeyError):
        infos = custom_dataset.evaluate([sample], metric='mAP')
def test_top_down_InterHand2D_dataset():
    dataset = 'InterHand2DDataset'
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(
        num_output_channels=21,
        dataset_joints=21,
        dataset_channel=[
            [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
                18, 19, 20
            ],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
            19, 20
        ])

    data_cfg = dict(
        image_size=[256, 256],
        heatmap_size=[64, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(
        ann_file='tests/data/interhand2d/test_interhand2d_data.json',
        camera_file='tests/data/interhand2d/test_interhand2d_camera.json',
        joint_file='tests/data/interhand2d/test_interhand2d_joint_3d.json',
        img_prefix='tests/data/interhand2d/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/interhand2d/test_interhand2d_data.json',
        camera_file='tests/data/interhand2d/test_interhand2d_camera.json',
        joint_file='tests/data/interhand2d/test_interhand2d_joint_3d.json',
        img_prefix='tests/data/interhand2d/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 4
    assert len(custom_dataset.db) == 6

    _ = custom_dataset[0]
def test_top_down_h36m_dataset():
    dataset = 'TopDownH36MDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/h36m.py').dataset_info
    # test AIC datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=17,
        dataset_joints=17,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
        ])

    data_cfg = dict(
        image_size=[256, 256],
        heatmap_size=[64, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'])

    # Test gt bbox
    custom_dataset = dataset_class(
        ann_file='tests/data/h36m/h36m_coco.json',
        img_prefix='tests/data/h36m/',
        data_cfg=data_cfg,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'h36m'

    image_id = 1
    assert image_id in custom_dataset.img_ids
    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric='EPE')
    assert_almost_equal(infos['EPE'], 0.0)

    with pytest.raises(KeyError):
        _ = custom_dataset.evaluate(results, metric='AUC')
Beispiel #25
0
def test_face_coco_wholebody_dataset():
    dataset = 'FaceCocoWholeBodyDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/coco_wholebody_face.py').dataset_info
    # test Face wholebody datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(num_output_channels=68,
                       dataset_joints=68,
                       dataset_channel=[
                           list(range(68)),
                       ],
                       inference_channel=list(range(68)))

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/coco/test_coco_wholebody.json',
                      img_prefix='tests/data/coco/',
                      data_cfg=data_cfg_copy,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/coco/test_coco_wholebody.json',
        img_prefix='tests/data/coco/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 4
    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric=['NME'])
    assert_almost_equal(infos['NME'], 0.0)

    with pytest.raises(KeyError):
        _ = custom_dataset.evaluate(results, metric='mAP')
Beispiel #26
0
def test_animal_zebra_dataset():
    dataset = 'AnimalZebraDataset'
    dataset_class = DATASETS.get(dataset)
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/zebra.py').dataset_info

    channel_cfg = dict(num_output_channels=9,
                       dataset_joints=9,
                       dataset_channel=[
                           [0, 1, 2, 3, 4, 5, 6, 7, 8],
                       ],
                       inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8])

    data_cfg = dict(image_size=[160, 160],
                    heatmap_size=[40, 40],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])

    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/zebra/test_zebra.json',
                      img_prefix='tests/data/zebra/',
                      data_cfg=data_cfg_copy,
                      dataset_info=dataset_info,
                      pipeline=[],
                      test_mode=True)

    custom_dataset = dataset_class(ann_file='tests/data/zebra/test_zebra.json',
                                   img_prefix='tests/data/zebra/',
                                   data_cfg=data_cfg_copy,
                                   dataset_info=dataset_info,
                                   pipeline=[],
                                   test_mode=False)

    assert custom_dataset.dataset_name == 'zebra'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
        assert_almost_equal(infos['PCK'], 1.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
Beispiel #27
0
def test_top_down_h36m_dataset_compatibility():
    dataset = 'TopDownH36MDataset'
    # test AIC datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=17,
        dataset_joints=17,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
        ])

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])

    # Test gt bbox
    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(
            ann_file='tests/data/h36m/h36m_coco.json',
            img_prefix='tests/data/h36m/',
            data_cfg=data_cfg,
            pipeline=[],
            test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'h36m'

    image_id = 1
    assert image_id in custom_dataset.img_ids
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'EPE')
        assert_almost_equal(infos['EPE'], 0.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'AUC')
Beispiel #28
0
def test_top_down_OneHand10K_dataset():
    dataset = 'TopDownOneHand10KDataset'
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(
        num_output_channels=21,
        dataset_joints=21,
        dataset_channel=[
            [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
                18, 19, 20
            ],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
            19, 20
        ])

    data_cfg = dict(
        image_size=[256, 256],
        heatmap_size=[64, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(
        ann_file='tests/data/OneHand10K/test_onehand10k.json',
        img_prefix='tests/data/OneHand10K/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/OneHand10K/test_onehand10k.json',
        img_prefix='tests/data/OneHand10K/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 4
    _ = custom_dataset[0]
def test_body3d_h36m_dataset():
    # Test Human3.6M dataset
    dataset = 'Body3DH36MDataset'
    dataset_class = DATASETS.get(dataset)

    data_cfg = dict(num_joints=17,
                    seq_len=1,
                    seq_frame_interval=1,
                    joint_2d_src='pipeline',
                    joint_2d_det_file=None,
                    causal=False,
                    need_camera_param=True,
                    camera_param_file='tests/data/h36m/cameras.pkl')

    _ = dataset_class(ann_file='tests/data/h36m/test_h36m_body3d.npz',
                      img_prefix='tests/data/h36m',
                      data_cfg=data_cfg,
                      pipeline=[],
                      test_mode=False)

    custom_dataset = dataset_class(
        ann_file='tests/data/h36m/test_h36m_body3d.npz',
        img_prefix='tests/data/h36m',
        data_cfg=data_cfg,
        pipeline=[],
        test_mode=True)

    assert custom_dataset.test_mode is True
    _ = custom_dataset[0]

    with tempfile.TemporaryDirectory() as tmpdir:
        outputs = []
        for result in custom_dataset:
            outputs.append({
                'preds': result['target'][None, ...],
                'target_image_paths': [result['target_image_path']],
            })

        metrics = ['mpjpe', 'p-mpjpe', 'n-mpjpe']
        infos = custom_dataset.evaluate(outputs, tmpdir, metrics)

        np.testing.assert_almost_equal(infos['MPJPE'], 0.0)
        np.testing.assert_almost_equal(infos['P-MPJPE'], 0.0)
        np.testing.assert_almost_equal(infos['N-MPJPE'], 0.0)
Beispiel #30
0
def test_top_down_MPII_dataset():
    dataset = 'TopDownMpiiDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/mpii.py').dataset_info
    # test COCO datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=16,
        dataset_joints=16,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
        ])

    data_cfg = dict(
        image_size=[256, 256],
        heatmap_size=[64, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
    )

    # Test det bbox
    data_cfg_copy = copy.deepcopy(data_cfg)
    custom_dataset = dataset_class(
        ann_file='tests/data/mpii/test_mpii.json',
        img_prefix='tests/data/mpii/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
    )

    assert len(custom_dataset) == 5
    assert custom_dataset.dataset_name == 'mpii'
    _ = custom_dataset[0]