コード例 #1
0
def test_animal_ATRW_dataset():
    dataset = 'AnimalATRWDataset'
    dataset_class = DATASETS.get(dataset)
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/atrw.py').dataset_info

    channel_cfg = dict(
        num_output_channels=15,
        dataset_joints=15,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
        ],
        inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])

    data_cfg = dict(
        image_size=[256, 256],
        heatmap_size=[64, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        soft_nms=False,
        nms_thr=1.0,
        oks_thr=0.9,
        vis_thr=0.2,
        use_gt_bbox=True,
        det_bbox_thr=0.0,
        bbox_file='',
    )

    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/atrw/test_atrw.json',
                      img_prefix='tests/data/atrw/',
                      data_cfg=data_cfg_copy,
                      dataset_info=dataset_info,
                      pipeline=[],
                      test_mode=True)

    custom_dataset = dataset_class(ann_file='tests/data/atrw/test_atrw.json',
                                   img_prefix='tests/data/atrw/',
                                   data_cfg=data_cfg_copy,
                                   dataset_info=dataset_info,
                                   pipeline=[],
                                   test_mode=False)

    assert custom_dataset.dataset_name == 'atrw'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
        assert_almost_equal(infos['AP'], 1.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
コード例 #2
0
ファイル: test_fashion_dataset.py プロジェクト: wusize/mmpose
def test_deepfashion_dataset():
    dataset = 'DeepFashionDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/deepfashion_full.py').dataset_info
    # test JHMDB datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(num_output_channels=8,
                       dataset_joints=8,
                       dataset_channel=[
                           [0, 1, 2, 3, 4, 5, 6, 7],
                       ],
                       inference_channel=[0, 1, 2, 3, 4, 5, 6, 7])

    data_cfg = dict(image_size=[192, 256],
                    heatmap_size=[48, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'],
                    soft_nms=False,
                    nms_thr=1.0,
                    oks_thr=0.9,
                    vis_thr=0.2,
                    use_gt_bbox=True,
                    det_bbox_thr=0.0,
                    image_thr=0.0,
                    bbox_file='')

    # Test gt bbox
    custom_dataset = dataset_class(ann_file='tests/data/fld/test_fld.json',
                                   img_prefix='tests/data/fld/',
                                   subset='full',
                                   data_cfg=data_cfg,
                                   pipeline=[],
                                   dataset_info=dataset_info,
                                   test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'deepfashion_full'

    image_id = 128
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK', 'EPE', 'AUC'])
    assert_almost_equal(infos['PCK'], 1.0)
    assert_almost_equal(infos['AUC'], 0.95)
    assert_almost_equal(infos['EPE'], 0.0)

    with pytest.raises(KeyError):
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
コード例 #3
0
def test_animal_Macaque_dataset_compatibility():
    dataset = 'AnimalMacaqueDataset'
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(
        num_output_channels=17,
        dataset_joints=17,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
        ])

    data_cfg = dict(
        image_size=[192, 256],
        heatmap_size=[48, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        soft_nms=False,
        nms_thr=1.0,
        oks_thr=0.9,
        vis_thr=0.2,
        use_gt_bbox=True,
        det_bbox_thr=0.0,
        bbox_file='',
    )

    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    with pytest.warns(DeprecationWarning):
        _ = dataset_class(ann_file='tests/data/macaque/test_macaque.json',
                          img_prefix='tests/data/macaque/',
                          data_cfg=data_cfg_copy,
                          pipeline=[],
                          test_mode=True)
    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(
            ann_file='tests/data/macaque/test_macaque.json',
            img_prefix='tests/data/macaque/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
        assert_almost_equal(infos['AP'], 1.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
コード例 #4
0
def test_animal_locust_dataset():
    dataset = 'AnimalLocustDataset'
    dataset_class = DATASETS.get(dataset)
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/locust.py').dataset_info

    channel_cfg = dict(num_output_channels=35,
                       dataset_joints=35,
                       dataset_channel=[
                           [
                               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                               14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
                               26, 27, 28, 29, 30, 31, 32, 33, 34
                           ],
                       ],
                       inference_channel=[
                           0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                           15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
                           28, 29, 30, 31, 32, 33, 34
                       ])

    data_cfg = dict(image_size=[160, 160],
                    heatmap_size=[40, 40],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])

    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/locust/test_locust.json',
                      img_prefix='tests/data/locust/',
                      data_cfg=data_cfg_copy,
                      dataset_info=dataset_info,
                      pipeline=[],
                      test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/locust/test_locust.json',
        img_prefix='tests/data/locust/',
        data_cfg=data_cfg_copy,
        dataset_info=dataset_info,
        pipeline=[],
        test_mode=False)

    assert custom_dataset.dataset_name == 'locust'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
        assert_almost_equal(infos['PCK'], 1.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
コード例 #5
0
def test_Panoptic2D_dataset():
    dataset = 'PanopticDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/panoptic_hand2d.py').dataset_info

    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(num_output_channels=21,
                       dataset_joints=21,
                       dataset_channel=[
                           [
                               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                               14, 15, 16, 17, 18, 19, 20
                           ],
                       ],
                       inference_channel=[
                           0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                           15, 16, 17, 18, 19, 20
                       ])

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/panoptic/test_panoptic.json',
                      img_prefix='tests/data/panoptic/',
                      data_cfg=data_cfg_copy,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/panoptic/test_panoptic.json',
        img_prefix='tests/data/panoptic/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=False)

    assert custom_dataset.dataset_name == 'panoptic_hand2d'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 4
    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric=['PCKh', 'EPE', 'AUC'])
    assert_almost_equal(infos['PCKh'], 1.0)
    assert_almost_equal(infos['AUC'], 0.95)
    assert_almost_equal(infos['EPE'], 0.0)

    with pytest.raises(KeyError):
        infos = custom_dataset.evaluate(results, metric='mAP')
コード例 #6
0
def test_animal_fly_dataset():
    dataset = 'AnimalFlyDataset'
    dataset_class = DATASETS.get(dataset)
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/fly.py').dataset_info

    channel_cfg = dict(num_output_channels=32,
                       dataset_joints=32,
                       dataset_channel=[
                           [
                               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                               14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
                               26, 27, 28, 29, 30, 31
                           ],
                       ],
                       inference_channel=[
                           0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                           15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
                           28, 29, 30, 31
                       ])

    data_cfg = dict(image_size=[192, 192],
                    heatmap_size=[48, 48],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])

    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/fly/test_fly.json',
                      img_prefix='tests/data/fly/',
                      data_cfg=data_cfg_copy,
                      dataset_info=dataset_info,
                      pipeline=[],
                      test_mode=True)

    custom_dataset = dataset_class(ann_file='tests/data/fly/test_fly.json',
                                   img_prefix='tests/data/fly/',
                                   data_cfg=data_cfg_copy,
                                   dataset_info=dataset_info,
                                   pipeline=[],
                                   test_mode=False)

    assert custom_dataset.dataset_name == 'fly'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db)

    infos = custom_dataset.evaluate(results, metric=['PCK'])
    assert_almost_equal(infos['PCK'], 1.0)

    with pytest.raises(KeyError):
        infos = custom_dataset.evaluate(results, metric='mAP')
コード例 #7
0
def test_top_down_Panoptic_dataset_compatibility():
    dataset = 'PanopticDataset'
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(num_output_channels=21,
                       dataset_joints=21,
                       dataset_channel=[
                           [
                               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                               14, 15, 16, 17, 18, 19, 20
                           ],
                       ],
                       inference_channel=[
                           0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                           15, 16, 17, 18, 19, 20
                       ])

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    with pytest.warns(DeprecationWarning):
        _ = dataset_class(ann_file='tests/data/panoptic/test_panoptic.json',
                          img_prefix='tests/data/panoptic/',
                          data_cfg=data_cfg_copy,
                          pipeline=[],
                          test_mode=True)

    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(
            ann_file='tests/data/panoptic/test_panoptic.json',
            img_prefix='tests/data/panoptic/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 4
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir,
                                        ['PCKh', 'EPE', 'AUC'])
        assert_almost_equal(infos['PCKh'], 1.0)
        assert_almost_equal(infos['AUC'], 0.95)
        assert_almost_equal(infos['EPE'], 0.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
コード例 #8
0
def test_animal_fly_dataset_compatibility():
    dataset = 'AnimalFlyDataset'
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(num_output_channels=32,
                       dataset_joints=32,
                       dataset_channel=[
                           [
                               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                               14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
                               26, 27, 28, 29, 30, 31
                           ],
                       ],
                       inference_channel=[
                           0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                           15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
                           28, 29, 30, 31
                       ])

    data_cfg = dict(image_size=[192, 192],
                    heatmap_size=[48, 48],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])

    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    with pytest.warns(DeprecationWarning):
        _ = dataset_class(ann_file='tests/data/fly/test_fly.json',
                          img_prefix='tests/data/fly/',
                          data_cfg=data_cfg_copy,
                          pipeline=[],
                          test_mode=True)

    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(ann_file='tests/data/fly/test_fly.json',
                                       img_prefix='tests/data/fly/',
                                       data_cfg=data_cfg_copy,
                                       pipeline=[],
                                       test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
        assert_almost_equal(infos['PCK'], 1.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
コード例 #9
0
def test_face_WFLW_dataset_compatibility():
    dataset = 'FaceWFLWDataset'
    # test Face WFLW datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=98,
        dataset_joints=98,
        dataset_channel=[
            list(range(98)),
        ],
        inference_channel=list(range(98)))

    data_cfg = dict(
        image_size=[256, 256],
        heatmap_size=[64, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    with pytest.warns(DeprecationWarning):
        _ = dataset_class(
            ann_file='tests/data/wflw/test_wflw.json',
            img_prefix='tests/data/wflw/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            test_mode=True)

    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(
            ann_file='tests/data/wflw/test_wflw.json',
            img_prefix='tests/data/wflw/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)

    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['NME'])
        assert_almost_equal(infos['NME'], 0.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
コード例 #10
0
ファイル: test_face_dataset.py プロジェクト: wusize/mmpose
def test_face_COFW_dataset():
    dataset = 'FaceCOFWDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/cofw.py').dataset_info
    # test Face COFW datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(num_output_channels=29,
                       dataset_joints=29,
                       dataset_channel=[
                           list(range(29)),
                       ],
                       inference_channel=list(range(29)))

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/cofw/test_cofw.json',
                      img_prefix='tests/data/cofw/',
                      data_cfg=data_cfg_copy,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=True)

    custom_dataset = dataset_class(ann_file='tests/data/cofw/test_cofw.json',
                                   img_prefix='tests/data/cofw/',
                                   data_cfg=data_cfg_copy,
                                   pipeline=[],
                                   dataset_info=dataset_info,
                                   test_mode=False)

    assert custom_dataset.dataset_name == 'cofw'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)

    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['NME'])
        assert_almost_equal(infos['NME'], 0.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
コード例 #11
0
def test_face_coco_wholebody_dataset():
    dataset = 'FaceCocoWholeBodyDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/coco_wholebody_face.py').dataset_info
    # test Face wholebody datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(num_output_channels=68,
                       dataset_joints=68,
                       dataset_channel=[
                           list(range(68)),
                       ],
                       inference_channel=list(range(68)))

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/coco/test_coco_wholebody.json',
                      img_prefix='tests/data/coco/',
                      data_cfg=data_cfg_copy,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/coco/test_coco_wholebody.json',
        img_prefix='tests/data/coco/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 4
    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric=['NME'])
    assert_almost_equal(infos['NME'], 0.0)

    with pytest.raises(KeyError):
        _ = custom_dataset.evaluate(results, metric='mAP')
コード例 #12
0
def test_top_down_h36m_dataset():
    dataset = 'TopDownH36MDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/h36m.py').dataset_info
    # test AIC datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=17,
        dataset_joints=17,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
        ])

    data_cfg = dict(
        image_size=[256, 256],
        heatmap_size=[64, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'])

    # Test gt bbox
    custom_dataset = dataset_class(
        ann_file='tests/data/h36m/h36m_coco.json',
        img_prefix='tests/data/h36m/',
        data_cfg=data_cfg,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'h36m'

    image_id = 1
    assert image_id in custom_dataset.img_ids
    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric='EPE')
    assert_almost_equal(infos['EPE'], 0.0)

    with pytest.raises(KeyError):
        _ = custom_dataset.evaluate(results, metric='AUC')
コード例 #13
0
def test_animal_zebra_dataset():
    dataset = 'AnimalZebraDataset'
    dataset_class = DATASETS.get(dataset)
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/zebra.py').dataset_info

    channel_cfg = dict(num_output_channels=9,
                       dataset_joints=9,
                       dataset_channel=[
                           [0, 1, 2, 3, 4, 5, 6, 7, 8],
                       ],
                       inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8])

    data_cfg = dict(image_size=[160, 160],
                    heatmap_size=[40, 40],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])

    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/zebra/test_zebra.json',
                      img_prefix='tests/data/zebra/',
                      data_cfg=data_cfg_copy,
                      dataset_info=dataset_info,
                      pipeline=[],
                      test_mode=True)

    custom_dataset = dataset_class(ann_file='tests/data/zebra/test_zebra.json',
                                   img_prefix='tests/data/zebra/',
                                   data_cfg=data_cfg_copy,
                                   dataset_info=dataset_info,
                                   pipeline=[],
                                   test_mode=False)

    assert custom_dataset.dataset_name == 'zebra'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
        assert_almost_equal(infos['PCK'], 1.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
コード例 #14
0
def test_top_down_h36m_dataset_compatibility():
    dataset = 'TopDownH36MDataset'
    # test AIC datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=17,
        dataset_joints=17,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
        ])

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])

    # Test gt bbox
    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(
            ann_file='tests/data/h36m/h36m_coco.json',
            img_prefix='tests/data/h36m/',
            data_cfg=data_cfg,
            pipeline=[],
            test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'h36m'

    image_id = 1
    assert image_id in custom_dataset.img_ids
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'EPE')
        assert_almost_equal(infos['EPE'], 0.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'AUC')
コード例 #15
0
def test_top_down_MHP_dataset():
    dataset = 'TopDownMhpDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/mhp.py').dataset_info
    # test MHP datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=16,
        dataset_joints=16,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
        ])

    data_cfg = dict(
        image_size=[192, 256],
        heatmap_size=[48, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        soft_nms=False,
        nms_thr=1.0,
        oks_thr=0.9,
        vis_thr=0.2,
        bbox_thr=1.0,
        use_gt_bbox=True,
        det_bbox_thr=0.0,
        bbox_file='',
    )

    # Test det bbox
    with pytest.raises(AssertionError):
        data_cfg_copy = copy.deepcopy(data_cfg)
        data_cfg_copy['use_gt_bbox'] = False

        _ = dataset_class(ann_file='tests/data/mhp/test_mhp.json',
                          img_prefix='tests/data/mhp/',
                          data_cfg=data_cfg_copy,
                          pipeline=[],
                          dataset_info=dataset_info,
                          test_mode=True)

    # Test gt bbox
    _ = dataset_class(ann_file='tests/data/mhp/test_mhp.json',
                      img_prefix='tests/data/mhp/',
                      data_cfg=data_cfg,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=False)

    custom_dataset = dataset_class(ann_file='tests/data/mhp/test_mhp.json',
                                   img_prefix='tests/data/mhp/',
                                   data_cfg=data_cfg,
                                   pipeline=[],
                                   dataset_info=dataset_info,
                                   test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'mhp'

    image_id = 2889
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 2
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
        assert_almost_equal(infos['AP'], 1.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
コード例 #16
0
def test_top_down_halpe_dataset():
    dataset = 'TopDownHalpeDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/halpe.py').dataset_info
    # test Halpe datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(num_output_channels=136,
                       dataset_joints=136,
                       dataset_channel=[
                           list(range(136)),
                       ],
                       inference_channel=list(range(136)))

    data_cfg = dict(
        image_size=[192, 256],
        heatmap_size=[48, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        soft_nms=False,
        nms_thr=1.0,
        oks_thr=0.9,
        vis_thr=0.2,
        use_gt_bbox=True,
        det_bbox_thr=0.0,
        bbox_file='tests/data/coco/test_coco_det_AP_H_56.json',
    )
    # Test det bbox
    data_cfg_copy = copy.deepcopy(data_cfg)
    data_cfg_copy['use_gt_bbox'] = False
    _ = dataset_class(ann_file='tests/data/halpe/test_halpe.json',
                      img_prefix='tests/data/coco/',
                      data_cfg=data_cfg_copy,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=True)

    _ = dataset_class(ann_file='tests/data/halpe/test_halpe.json',
                      img_prefix='tests/data/coco/',
                      data_cfg=data_cfg_copy,
                      pipeline=[],
                      dataset_info=dataset_info,
                      test_mode=False)

    # Test gt bbox
    custom_dataset = dataset_class(ann_file='tests/data/halpe/test_halpe.json',
                                   img_prefix='tests/data/coco/',
                                   data_cfg=data_cfg,
                                   pipeline=[],
                                   dataset_info=dataset_info,
                                   test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'halpe'

    image_id = 785
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 4
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
        assert_almost_equal(infos['AP'], 1.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
コード例 #17
0
def test_top_down_JHMDB_dataset_compatibility():
    dataset = 'TopDownJhmdbDataset'
    # test JHMDB datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=15,
        dataset_joints=15,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
        ],
        inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])

    data_cfg = dict(image_size=[192, 256],
                    heatmap_size=[48, 64],
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'],
                    soft_nms=False,
                    nms_thr=1.0,
                    oks_thr=0.9,
                    vis_thr=0.2,
                    use_gt_bbox=True,
                    det_bbox_thr=0.0,
                    bbox_file='')

    with pytest.raises(AssertionError):
        # Test det bbox
        data_cfg_copy = copy.deepcopy(data_cfg)
        data_cfg_copy['use_gt_bbox'] = False
        with pytest.warns(DeprecationWarning):
            _ = dataset_class(ann_file='tests/data/jhmdb/test_jhmdb_sub1.json',
                              img_prefix='tests/data/jhmdb/',
                              data_cfg=data_cfg_copy,
                              pipeline=[],
                              test_mode=True)

        with pytest.warns(DeprecationWarning):
            _ = dataset_class(ann_file='tests/data/jhmdb/test_jhmdb_sub1.json',
                              img_prefix='tests/data/jhmdb/',
                              data_cfg=data_cfg_copy,
                              pipeline=[],
                              test_mode=False)

    # Test gt bbox
    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(
            ann_file='tests/data/jhmdb/test_jhmdb_sub1.json',
            img_prefix='tests/data/jhmdb/',
            data_cfg=data_cfg,
            pipeline=[],
            test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'jhmdb'

    image_id = 2290001
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 3
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
        assert_almost_equal(infos['Mean PCK'], 1.0)

        infos = custom_dataset.evaluate(outputs, tmpdir, ['tPCK'])
        assert_almost_equal(infos['Mean tPCK'], 1.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
コード例 #18
0
def test_top_down_COCO_wholebody_dataset_compatibility():
    dataset = 'TopDownCocoWholeBodyDataset'
    # test COCO datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(num_output_channels=133,
                       dataset_joints=133,
                       dataset_channel=[
                           list(range(133)),
                       ],
                       inference_channel=list(range(133)))

    data_cfg = dict(
        image_size=[192, 256],
        heatmap_size=[48, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        soft_nms=False,
        nms_thr=1.0,
        oks_thr=0.9,
        vis_thr=0.2,
        use_gt_bbox=True,
        det_bbox_thr=0.0,
        bbox_file='tests/data/coco/test_coco_det_AP_H_56.json',
    )
    # Test det bbox
    data_cfg_copy = copy.deepcopy(data_cfg)
    data_cfg_copy['use_gt_bbox'] = False
    with pytest.warns(DeprecationWarning):
        _ = dataset_class(ann_file='tests/data/coco/test_coco_wholebody.json',
                          img_prefix='tests/data/coco/',
                          data_cfg=data_cfg_copy,
                          pipeline=[],
                          test_mode=True)

    with pytest.warns(DeprecationWarning):
        _ = dataset_class(ann_file='tests/data/coco/test_coco_wholebody.json',
                          img_prefix='tests/data/coco/',
                          data_cfg=data_cfg_copy,
                          pipeline=[],
                          test_mode=False)

    # Test gt bbox
    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(
            ann_file='tests/data/coco/test_coco_wholebody.json',
            img_prefix='tests/data/coco/',
            data_cfg=data_cfg,
            pipeline=[],
            test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'coco_wholebody'

    image_id = 785
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 4
    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
        assert_almost_equal(infos['AP'], 1.0)

        with pytest.raises(KeyError):
            _ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
コード例 #19
0
def test_InterHand3D_dataset():
    dataset = 'InterHand3DDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/interhand3d.py').dataset_info

    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(num_output_channels=42,
                       dataset_joints=42,
                       dataset_channel=[
                           [
                               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                               14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
                               26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
                               38, 39, 40, 41
                           ],
                       ],
                       inference_channel=[
                           0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                           15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
                           28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
                           41
                       ])

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64, 64],
                    heatmap3d_depth_bound=400.0,
                    heatmap_size_root=64,
                    root_depth_bound=400.0,
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(
        ann_file='tests/data/interhand2.6m/test_interhand2.6m_data.json',
        camera_file='tests/data/interhand2.6m/test_interhand2.6m_camera.json',
        joint_file='tests/data/interhand2.6m/test_interhand2.6m_joint_3d.json',
        img_prefix='tests/data/interhand2.6m/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    custom_dataset = dataset_class(
        ann_file='tests/data/interhand2.6m/test_interhand2.6m_data.json',
        camera_file='tests/data/interhand2.6m/test_interhand2.6m_camera.json',
        joint_file='tests/data/interhand2.6m/test_interhand2.6m_joint_3d.json',
        img_prefix='tests/data/interhand2.6m/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=False)

    assert custom_dataset.dataset_name == 'interhand3d'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 4
    assert len(custom_dataset.db) == 4

    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db,
                                   keys=['rel_root_depth', 'hand_type'],
                                   is_3d=True)
    infos = custom_dataset.evaluate(
        results, metric=['MRRPE', 'MPJPE', 'Handedness_acc'])
    assert_almost_equal(infos['MRRPE'], 0.0, decimal=5)
    assert_almost_equal(infos['MPJPE_all'], 0.0, decimal=5)
    assert_almost_equal(infos['MPJPE_single'], 0.0, decimal=5)
    assert_almost_equal(infos['MPJPE_interacting'], 0.0, decimal=5)
    assert_almost_equal(infos['Handedness_acc'], 1.0)

    with pytest.raises(KeyError):
        infos = custom_dataset.evaluate(results, metric='mAP')
コード例 #20
0
def test_top_down_COCO_dataset():
    dataset = 'TopDownCocoDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/coco.py').dataset_info
    # test COCO datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=17,
        dataset_joints=17,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
        ])

    data_cfg = dict(
        image_size=[192, 256],
        heatmap_size=[48, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        soft_nms=False,
        nms_thr=1.0,
        oks_thr=0.9,
        vis_thr=0.2,
        use_gt_bbox=True,
        det_bbox_thr=0.0,
        bbox_file='tests/data/coco/test_coco_det_AP_H_56.json',
    )
    # Test det bbox
    data_cfg_copy = copy.deepcopy(data_cfg)
    data_cfg_copy['use_gt_bbox'] = False
    _ = dataset_class(
        ann_file='tests/data/coco/test_coco.json',
        img_prefix='tests/data/coco/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    _ = dataset_class(
        ann_file='tests/data/coco/test_coco.json',
        img_prefix='tests/data/coco/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=False)

    # Test gt bbox
    custom_dataset = dataset_class(
        ann_file='tests/data/coco/test_coco.json',
        img_prefix='tests/data/coco/',
        data_cfg=data_cfg,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'coco'

    image_id = 785
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 4
    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric='mAP')
    assert_almost_equal(infos['AP'], 1.0)
    infos = custom_dataset.evaluate(results, metric='mAP', rle_score=True)
    assert_almost_equal(infos['AP'], 1.0)

    with pytest.raises(KeyError):
        _ = custom_dataset.evaluate(results, metric='PCK')

    # Test when gt annotations are absent
    del custom_dataset.coco.dataset['annotations']
    with pytest.warns(UserWarning):
        _ = custom_dataset.evaluate(results, metric='mAP')
コード例 #21
0
def test_top_down_PoseTrack18_dataset():
    dataset = 'TopDownPoseTrack18Dataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/posetrack18.py').dataset_info
    # test PoseTrack datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=17,
        dataset_joints=17,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
        ])

    data_cfg = dict(
        image_size=[192, 256],
        heatmap_size=[48, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        soft_nms=False,
        nms_thr=1.0,
        oks_thr=0.9,
        vis_thr=0.2,
        use_gt_bbox=True,
        det_bbox_thr=0.0,
        bbox_file='tests/data/posetrack18/annotations/'
        'test_posetrack18_human_detections.json',
    )
    # Test det bbox
    data_cfg_copy = copy.deepcopy(data_cfg)
    data_cfg_copy['use_gt_bbox'] = False
    _ = dataset_class(
        ann_file='tests/data/posetrack18/annotations/'
        'test_posetrack18_val.json',
        img_prefix='tests/data/posetrack18/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    _ = dataset_class(
        ann_file='tests/data/posetrack18/annotations/'
        'test_posetrack18_val.json',
        img_prefix='tests/data/posetrack18/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=False)

    # Test gt bbox
    custom_dataset = dataset_class(
        ann_file='tests/data/posetrack18/annotations/'
        'test_posetrack18_val.json',
        img_prefix='tests/data/posetrack18/',
        data_cfg=data_cfg,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'posetrack18'

    image_id = 10128340000
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 3
    assert len(custom_dataset) == 14
    _ = custom_dataset[0]

    # Test evaluate function, use gt bbox
    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric='mAP')
    assert_almost_equal(infos['Total AP'], 100)

    with pytest.raises(KeyError):
        _ = custom_dataset.evaluate(results, metric='PCK')

    # Test evaluate function, use det bbox
    data_cfg_copy = copy.deepcopy(data_cfg)
    data_cfg_copy['use_gt_bbox'] = False

    custom_dataset = dataset_class(
        ann_file='tests/data/posetrack18/annotations/'
        'test_posetrack18_val.json',
        img_prefix='tests/data/posetrack18/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    assert len(custom_dataset) == 278

    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric='mAP')
    # since the det box input assume each keypoint position to be (0,0)
    # the Total AP will be zero.
    assert_almost_equal(infos['Total AP'], 0.)

    with pytest.raises(KeyError):
        _ = custom_dataset.evaluate(results, metric='PCK')
コード例 #22
0
def test_top_down_PoseTrack18Video_dataset():
    dataset = 'TopDownPoseTrack18VideoDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/posetrack18.py').dataset_info
    # test PoseTrack18Video dataset
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=17,
        dataset_joints=17,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
        ])

    data_cfg = dict(
        image_size=[288, 384],
        heatmap_size=[72, 96],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        use_nms=True,
        soft_nms=False,
        nms_thr=1.0,
        oks_thr=0.9,
        vis_thr=0.2,
        use_gt_bbox=True,
        det_bbox_thr=0.0,
        bbox_file='tests/data/posetrack18/annotations/'
        'test_posetrack18_human_detections.json',
        # frame-related arguments
        frame_index_rand=True,
        frame_index_range=[-2, 2],
        num_adj_frames=1,
        frame_indices_test=[-2, 2, -1, 1, 0],
        frame_weight_train=(0.0, 1.0),
        frame_weight_test=(0.3, 0.1, 0.25, 0.25, 0.1),
    )

    # Test value of dataset_info
    with pytest.raises(ValueError):
        _ = dataset_class(
            ann_file='tests/data/posetrack18/annotations/'
            'test_posetrack18_val.json',
            img_prefix='tests/data/posetrack18/',
            data_cfg=data_cfg,
            pipeline=[],
            dataset_info=None,
            test_mode=False)

    # Test train mode (must use gt bbox)
    _ = dataset_class(
        ann_file='tests/data/posetrack18/annotations/'
        'test_posetrack18_val.json',
        img_prefix='tests/data/posetrack18/',
        data_cfg=data_cfg,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=False)

    # # Test gt bbox + test mode
    custom_dataset = dataset_class(
        ann_file='tests/data/posetrack18/annotations/'
        'test_posetrack18_val.json',
        img_prefix='tests/data/posetrack18/',
        data_cfg=data_cfg,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'posetrack18'
    assert custom_dataset.ph_fill_len == 6

    image_id = 10128340000
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 3
    assert len(custom_dataset) == 14
    _ = custom_dataset[0]

    # Test det bbox + test mode
    data_cfg_copy = copy.deepcopy(data_cfg)
    data_cfg_copy['use_gt_bbox'] = False

    custom_dataset = dataset_class(
        ann_file='tests/data/posetrack18/annotations/'
        'test_posetrack18_val.json',
        img_prefix='tests/data/posetrack18/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    assert custom_dataset.frame_indices_test == [-2, -1, 0, 1, 2]
    assert len(custom_dataset) == 278

    # Test non-random index
    data_cfg_copy = copy.deepcopy(data_cfg)
    data_cfg_copy['frame_index_rand'] = False
    data_cfg_copy['frame_indices_train'] = [0, -1]

    custom_dataset = dataset_class(
        ann_file='tests/data/posetrack18/annotations/'
        'test_posetrack18_val.json',
        img_prefix='tests/data/posetrack18/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=False)

    assert custom_dataset.frame_indices_train == [-1, 0]

    # Test evaluate function, use gt bbox
    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric='mAP')
    assert_almost_equal(infos['Total AP'], 100)

    with pytest.raises(KeyError):
        _ = custom_dataset.evaluate(results, metric='PCK')

    # Test evaluate function, use det bbox
    data_cfg_copy = copy.deepcopy(data_cfg)
    data_cfg_copy['use_gt_bbox'] = False
    custom_dataset = dataset_class(
        ann_file='tests/data/posetrack18/annotations/'
        'test_posetrack18_val.json',
        img_prefix='tests/data/posetrack18/',
        data_cfg=data_cfg_copy,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric='mAP')
    # since the det box input assume each keypoint position to be (0,0),
    # the Total AP will be zero.
    assert_almost_equal(infos['Total AP'], 0)

    with pytest.raises(KeyError):
        _ = custom_dataset.evaluate(results, metric='PCK')
コード例 #23
0
def test_top_down_AIC_dataset():
    dataset = 'TopDownAicDataset'
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/aic.py').dataset_info
    # test AIC datasets
    dataset_class = DATASETS.get(dataset)
    dataset_class.load_annotations = MagicMock()
    dataset_class.coco = MagicMock()

    channel_cfg = dict(
        num_output_channels=14,
        dataset_joints=14,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
        ],
        inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])

    data_cfg = dict(
        image_size=[192, 256],
        heatmap_size=[48, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        soft_nms=False,
        nms_thr=1.0,
        oks_thr=0.9,
        vis_thr=0.2,
        use_gt_bbox=True,
        det_bbox_thr=0.0,
        bbox_file='')

    with pytest.raises(AssertionError):
        # Test det bbox
        data_cfg_copy = copy.deepcopy(data_cfg)
        data_cfg_copy['use_gt_bbox'] = False
        _ = dataset_class(
            ann_file='tests/data/aic/test_aic.json',
            img_prefix='tests/data/aic/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            dataset_info=dataset_info,
            test_mode=True)

        _ = dataset_class(
            ann_file='tests/data/aic/test_aic.json',
            img_prefix='tests/data/aic/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            dataset_info=dataset_info,
            test_mode=False)

    # Test gt bbox
    custom_dataset = dataset_class(
        ann_file='tests/data/aic/test_aic.json',
        img_prefix='tests/data/aic/',
        data_cfg=data_cfg,
        pipeline=[],
        dataset_info=dataset_info,
        test_mode=True)

    assert custom_dataset.test_mode is True
    assert custom_dataset.dataset_name == 'aic'

    image_id = 1
    assert image_id in custom_dataset.img_ids
    assert len(custom_dataset.img_ids) == 3
    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db)
    infos = custom_dataset.evaluate(results, metric='mAP')
    assert_almost_equal(infos['AP'], 1.0)

    with pytest.raises(KeyError):
        _ = custom_dataset.evaluate(results, metric='PCK')
コード例 #24
0
def test_top_down_InterHand3D_dataset_compatibility():
    dataset = 'InterHand3DDataset'
    dataset_class = DATASETS.get(dataset)

    channel_cfg = dict(num_output_channels=42,
                       dataset_joints=42,
                       dataset_channel=[
                           [
                               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
                               14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
                               26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
                               38, 39, 40, 41
                           ],
                       ],
                       inference_channel=[
                           0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                           15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
                           28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
                           41
                       ])

    data_cfg = dict(image_size=[256, 256],
                    heatmap_size=[64, 64, 64],
                    heatmap3d_depth_bound=400.0,
                    heatmap_size_root=64,
                    root_depth_bound=400.0,
                    num_output_channels=channel_cfg['num_output_channels'],
                    num_joints=channel_cfg['dataset_joints'],
                    dataset_channel=channel_cfg['dataset_channel'],
                    inference_channel=channel_cfg['inference_channel'])
    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    with pytest.warns(DeprecationWarning):
        _ = dataset_class(
            ann_file='tests/data/interhand2.6m/test_interhand2.6m_data.json',
            camera_file='tests/data/interhand2.6m/'
            'test_interhand2.6m_camera.json',
            joint_file='tests/data/interhand2.6m/'
            'test_interhand2.6m_joint_3d.json',
            img_prefix='tests/data/interhand2.6m/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            test_mode=True)

    with pytest.warns(DeprecationWarning):
        custom_dataset = dataset_class(
            ann_file='tests/data/interhand2.6m/test_interhand2.6m_data.json',
            camera_file='tests/data/interhand2.6m/'
            'test_interhand2.6m_camera.json',
            joint_file='tests/data/interhand2.6m/'
            'test_interhand2.6m_joint_3d.json',
            img_prefix='tests/data/interhand2.6m/',
            data_cfg=data_cfg_copy,
            pipeline=[],
            test_mode=False)

    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 4
    assert len(custom_dataset.db) == 4

    _ = custom_dataset[0]

    outputs = convert_db_to_output(custom_dataset.db,
                                   keys=['rel_root_depth', 'hand_type'],
                                   is_3d=True)
    with tempfile.TemporaryDirectory() as tmpdir:
        infos = custom_dataset.evaluate(outputs, tmpdir,
                                        ['MRRPE', 'MPJPE', 'Handedness_acc'])
        assert_almost_equal(infos['MRRPE'], 0.0, decimal=5)
        assert_almost_equal(infos['MPJPE_all'], 0.0, decimal=5)
        assert_almost_equal(infos['MPJPE_single'], 0.0, decimal=5)
        assert_almost_equal(infos['MPJPE_interacting'], 0.0, decimal=5)
        assert_almost_equal(infos['Handedness_acc'], 1.0)

        with pytest.raises(KeyError):
            infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
コード例 #25
0
def test_ap10k_dataset():
    dataset = 'AnimalAP10KDataset'
    dataset_class = DATASETS.get(dataset)
    dataset_info = Config.fromfile(
        'configs/_base_/datasets/ap10k.py').dataset_info

    channel_cfg = dict(
        num_output_channels=17,
        dataset_joints=17,
        dataset_channel=[
            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
        ],
        inference_channel=[
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
        ])

    data_cfg = dict(
        image_size=[256, 256],
        heatmap_size=[64, 64],
        num_output_channels=channel_cfg['num_output_channels'],
        num_joints=channel_cfg['dataset_joints'],
        dataset_channel=channel_cfg['dataset_channel'],
        inference_channel=channel_cfg['inference_channel'],
        soft_nms=False,
        nms_thr=1.0,
        oks_thr=0.9,
        vis_thr=0.2,
        use_gt_bbox=True,
        det_bbox_thr=0.0,
        bbox_file='',
    )

    # Test
    data_cfg_copy = copy.deepcopy(data_cfg)
    _ = dataset_class(ann_file='tests/data/ap10k/test_ap10k.json',
                      img_prefix='tests/data/ap10k/',
                      data_cfg=data_cfg_copy,
                      dataset_info=dataset_info,
                      pipeline=[],
                      test_mode=True)

    custom_dataset = dataset_class(ann_file='tests/data/ap10k/test_ap10k.json',
                                   img_prefix='tests/data/ap10k/',
                                   data_cfg=data_cfg_copy,
                                   dataset_info=dataset_info,
                                   pipeline=[],
                                   test_mode=False)

    assert custom_dataset.dataset_name == 'ap10k'
    assert custom_dataset.test_mode is False
    assert custom_dataset.num_images == 2
    _ = custom_dataset[0]

    results = convert_db_to_output(custom_dataset.db)

    for output in results:
        # as there is only one box in each image for test
        output['bbox_ids'] = [0 for _ in range(len(output['bbox_ids']))]

    infos = custom_dataset.evaluate(results, metric='mAP')
    assert_almost_equal(infos['AP'], 1.0)

    with pytest.raises(KeyError):
        infos = custom_dataset.evaluate(results, metric=['PCK'])

    # Test when gt annotations are absent
    del custom_dataset.coco.dataset['annotations']
    with pytest.warns(UserWarning):
        _ = custom_dataset.evaluate(results, metric='mAP')