Esempio n. 1
0
def test_coco_ids_consisitent_with_name():
    names1 = ('person', 'bicycle', 'car')
    names2 = ('bicycle', 'car', 'person')
    tmp_dir = tempfile.TemporaryDirectory()
    three_class_json_file = osp.join(tmp_dir.name, 'three_class.json')
    _create_three_class(three_class_json_file)
    cat_names1 = CocoDataset(
        ann_file=three_class_json_file, classes=names1, pipeline=[])
    cat_names2 = CocoDataset(
        ann_file=three_class_json_file, classes=names2, pipeline=[])
    assert cat_names1.cat_ids == [0, 1, 2]
    assert cat_names2.cat_ids == [1, 2, 0]
Esempio n. 2
0
def test_coco_annotation_ids_unique():
    tmp_dir = tempfile.TemporaryDirectory()
    fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
    _create_ids_error_coco_json(fake_json_file)

    # test annotation ids not unique error
    with pytest.raises(AssertionError):
        CocoDataset(ann_file=fake_json_file, classes=('car', ), pipeline=[])
Esempio n. 3
0
def test_dataset_evaluation():
    tmp_dir = tempfile.TemporaryDirectory()
    # create dummy data
    fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
    _create_dummy_coco_json(fake_json_file)

    # test single coco dataset evaluation
    coco_dataset = CocoDataset(
        ann_file=fake_json_file, classes=('car', ), pipeline=[])
    fake_results = _create_dummy_results()
    eval_results = coco_dataset.evaluate(fake_results, classwise=True)
    assert eval_results['bbox_mAP'] == 1
    assert eval_results['bbox_mAP_50'] == 1
    assert eval_results['bbox_mAP_75'] == 1

    # test concat dataset evaluation
    fake_concat_results = _create_dummy_results() + _create_dummy_results()

    # build concat dataset through two config dict
    coco_cfg = dict(
        type='CocoDataset',
        ann_file=fake_json_file,
        classes=('car', ),
        pipeline=[])
    concat_cfgs = [coco_cfg, coco_cfg]
    concat_dataset = build_dataset(concat_cfgs)
    eval_results = concat_dataset.evaluate(fake_concat_results)
    assert eval_results['0_bbox_mAP'] == 1
    assert eval_results['0_bbox_mAP_50'] == 1
    assert eval_results['0_bbox_mAP_75'] == 1
    assert eval_results['1_bbox_mAP'] == 1
    assert eval_results['1_bbox_mAP_50'] == 1
    assert eval_results['1_bbox_mAP_75'] == 1

    # build concat dataset through concatenated ann_file
    coco_cfg = dict(
        type='CocoDataset',
        ann_file=[fake_json_file, fake_json_file],
        classes=('car', ),
        pipeline=[])
    concat_dataset = build_dataset(coco_cfg)
    eval_results = concat_dataset.evaluate(fake_concat_results)
    assert eval_results['0_bbox_mAP'] == 1
    assert eval_results['0_bbox_mAP_50'] == 1
    assert eval_results['0_bbox_mAP_75'] == 1
    assert eval_results['1_bbox_mAP'] == 1
    assert eval_results['1_bbox_mAP_50'] == 1
    assert eval_results['1_bbox_mAP_75'] == 1

    # create dummy data
    fake_pkl_file = osp.join(tmp_dir.name, 'fake_data.pkl')
    _create_dummy_custom_pkl(fake_pkl_file)

    # test single custom dataset evaluation
    custom_dataset = CustomDataset(
        ann_file=fake_pkl_file, classes=('car', ), pipeline=[])
    fake_results = _create_dummy_results()
    eval_results = custom_dataset.evaluate(fake_results)
    assert eval_results['mAP'] == 1

    # test concat dataset evaluation
    fake_concat_results = _create_dummy_results() + _create_dummy_results()

    # build concat dataset through two config dict
    custom_cfg = dict(
        type='CustomDataset',
        ann_file=fake_pkl_file,
        classes=('car', ),
        pipeline=[])
    concat_cfgs = [custom_cfg, custom_cfg]
    concat_dataset = build_dataset(concat_cfgs)
    eval_results = concat_dataset.evaluate(fake_concat_results)
    assert eval_results['0_mAP'] == 1
    assert eval_results['1_mAP'] == 1

    # build concat dataset through concatenated ann_file
    concat_cfg = dict(
        type='CustomDataset',
        ann_file=[fake_pkl_file, fake_pkl_file],
        classes=('car', ),
        pipeline=[])
    concat_dataset = build_dataset(concat_cfg)
    eval_results = concat_dataset.evaluate(fake_concat_results)
    assert eval_results['0_mAP'] == 1
    assert eval_results['1_mAP'] == 1

    # build concat dataset through explicit type
    concat_cfg = dict(
        type='ConcatDataset',
        datasets=[custom_cfg, custom_cfg],
        separate_eval=False)
    concat_dataset = build_dataset(concat_cfg)
    eval_results = concat_dataset.evaluate(fake_concat_results, metric='mAP')
    assert eval_results['mAP'] == 1
    assert len(concat_dataset.datasets[0].data_infos) == \
        len(concat_dataset.datasets[1].data_infos)
    assert len(concat_dataset.datasets[0].data_infos) == 1
    tmp_dir.cleanup()
Esempio n. 4
0
parser.add_argument('--gpus', type=str, default='0', help='gpus, default is 0')
args = parser.parse_args()

args.model_save_path = '%s/%s/' % \
            (config.model_save_path, time.strftime('%Y-%m-%d', time.localtime(time.time())))

if not os.path.exists(args.model_save_path):
    _logger.warn("{} not exists, create it".format(args.model_save_path))
    os.makedirs(args.model_save_path)
_set_file(args.model_save_path + 'log.log')

# Build model
from ..models.fbnet_faster_rcnn import FBNetCustomFasterRCNN
from ..search.detection_searcher import DetectionSearcher
from mmdet.datasets import CocoDataset, build_dataloader
coco_dataset = CocoDataset(**data_cfg['train'])
coco_dataset = build_dataloader(coco_dataset,
                                imgs_per_gpu=config.imgs_per_gpu,
                                workers_per_gpu=config.imgs_per_gpu,
                                dist=False,
                                num_gpus=len(args.gpus.split(',')))

model = FBNetCustomFasterRCNN(cfg=mmcv_config(model_cfg),
                              train_cfg=mmcv_config(train_cfg),
                              test_cfg=mmcv_config(test_cfg),
                              channels=model_cfg['neck']['in_channels'])
model.speed_test(torch.randn((1, 3, 224, 224)),
                 verbose=False,
                 device='cuda:' + args.gpus[0])

searcher = DetectionSearcher(model=model,