示例#1
0
def test_parse_ann_info(dataset):
    dataset_class = DATASETS.get(dataset)

    dataset = dataset_class(ann_file=DEMO_ANN_FILE,
                            classes=('car', 'person'),
                            pipeline=[])

    # image 1 doesn't have gt and detected objects
    img_id = 1
    img_info = dataset.coco.load_imgs([img_id])[0]
    ann_ids = dataset.coco.get_ann_ids([img_id])
    ann_info = dataset.coco.loadAnns(ann_ids)
    ann = dataset._parse_ann_info(img_info, ann_info)
    assert ann['bboxes'].shape == (0, 4)
    assert ann['bboxes_ignore'].shape == (3, 4)

    # image 5 has 2 objects
    img_id = 5
    img_info = dataset.coco.load_imgs([img_id])[0]
    ann_ids = dataset.coco.get_ann_ids([img_id])
    ann_info = dataset.coco.loadAnns(ann_ids)
    ann = dataset._parse_ann_info(img_info, ann_info)
    assert ann['bboxes'].shape == (2, 4)
    assert ann['bboxes_ignore'].shape == (0, 4)

    # image 8 doesn't have objects
    img_id = 8
    img_info = dataset.coco.load_imgs([img_id])[0]
    ann_ids = dataset.coco.get_ann_ids([img_id])
    ann_info = dataset.coco.loadAnns(ann_ids)
    ann = dataset._parse_ann_info(img_info, ann_info)
    assert ann['bboxes'].shape == (0, 4)
    assert ann['bboxes_ignore'].shape == (0, 4)
示例#2
0
def test_sot_vot_evaluation():
    dataset_class = DATASETS.get('VOTDataset')
    dataset = dataset_class(
        ann_file=osp.join(LASOT_ANN_PATH, 'lasot_test_dummy.json'),
        pipeline=[])

    for _, img_ann in dataset.coco.anns.items():
        x, y, w, h = img_ann['bbox']
        img_ann['bbox'] = [x, y, x + w, y, x + w, y + h, x, y + h]

    results = []
    for video_name in ['airplane-1', 'airplane-2']:
        results.extend(
            mmcv.list_from_file(
                osp.join(LASOT_ANN_PATH, video_name, 'vot_track_results.txt')))
    track_bboxes = []
    for result in results:
        result = result.split(',')
        if len(result) == 1:
            track_bboxes.append(np.array([float(result[0]), 0.]))
        else:
            track_bboxes.append(
                np.array([
                    float(result[0]),
                    float(result[1]),
                    float(result[2]),
                    float(result[3]), 0.
                ]))

    track_bboxes = dict(track_bboxes=track_bboxes)
    eval_results = dataset.evaluate(
        track_bboxes, interval=[1, 3], metric=['track'])
    assert abs(eval_results['eao'] - 0.6394) < 0.0001
    assert round(eval_results['accuracy'], 4) == 0.5431
    assert round(eval_results['robustness'], 4) == 6.0
示例#3
0
def test_format_results(dataset):
    dataset_class = DATASETS.get(dataset)
    dataset = dataset_class(
        ann_file=osp.join(LASOT_ANN_PATH, 'lasot_test_dummy.json'),
        pipeline=[])

    results = []
    for video_name in ['airplane-1', 'airplane-2']:
        results.extend(
            mmcv.list_from_file(
                osp.join(LASOT_ANN_PATH, video_name, 'track_results.txt')))

    track_bboxes = []
    for result in results:
        x1, y1, x2, y2 = result.split(',')
        track_bboxes.append(
            np.array([float(x1),
                      float(y1),
                      float(x2),
                      float(y2), 0.]))

    track_results = dict(track_bboxes=track_bboxes)

    tmp_dir = tempfile.TemporaryDirectory()
    dataset.format_results(track_results, resfile_path=tmp_dir.name)
    if osp.isdir(tmp_dir.name):
        tmp_dir.cleanup()
示例#4
0
def test_load_data_infos(dataset):
    dataset_class = DATASETS.get(dataset)

    dataset_class(**DATASET_INFOS[dataset],
                  pipeline=[],
                  split='train',
                  test_mode=False)
示例#5
0
def test_video_data_sampling(dataset):
    dataset_class = DATASETS.get(dataset)

    # key image sampling
    for interval in [4, 2, 1]:
        dataset = dataset_class(ann_file=DEMO_ANN_FILE,
                                load_as_video=True,
                                classes=['car', 'person'],
                                key_img_sampler=dict(interval=interval),
                                ref_img_sampler=dict(num_ref_imgs=1,
                                                     frame_range=3,
                                                     filter_key_frame=True,
                                                     method='uniform'),
                                pipeline=[],
                                test_mode=True)
        assert len(dataset.data_infos) == 8 // interval

    # ref image sampling
    data = dataset.data_infos[3]
    sampler = dict(num_ref_imgs=1, frame_range=3, method='uniform')
    ref_data = dataset.ref_img_sampling(data, **sampler)[1]
    assert abs(ref_data['frame_id'] -
               data['frame_id']) <= sampler['frame_range']
    sampler = dict(num_ref_imgs=2, frame_range=3, method='bilateral_uniform')
    ref_data = dataset.ref_img_sampling(data, **sampler)
    assert len(ref_data) == 3
    ref_data = dataset.ref_img_sampling(data, **sampler, return_key_img=False)
    assert len(ref_data) == 2
    assert ref_data[0]['frame_id'] < data['frame_id']
    assert ref_data[1]['frame_id'] > data['frame_id']
    assert data['frame_id'] - ref_data[0]['frame_id'] <= sampler['frame_range']
    assert ref_data[1]['frame_id'] - data['frame_id'] <= sampler['frame_range']
示例#6
0
def test_sot_ope_evaluation():
    dataset_class = DATASETS.get('UAV123Dataset')
    dataset_object = dataset_class(**DATASET_INFOS['UAV123Dataset'],
                                   pipeline=[],
                                   split='test',
                                   test_mode=True)

    dataset_object.num_frames_per_video = [25, 25]
    results = []
    data_infos = []
    lasot_root = osp.join(SOT_DATA_PREFIX, 'lasot_full')
    for video_name in ['airplane/airplane-1', 'basketball/basketball-2']:
        bboxes = np.loadtxt(osp.join(lasot_root, video_name,
                                     'track_results.txt'),
                            delimiter=',')
        scores = np.zeros((len(bboxes), 1))
        bboxes = np.concatenate((bboxes, scores), axis=-1)
        results.extend(bboxes)
        data_infos.append(
            dict(video_path=osp.join(lasot_root, video_name, 'img'),
                 ann_path=osp.join(lasot_root, video_name, 'gt_for_eval.txt'),
                 start_frame_id=1,
                 end_frame_id=25,
                 framename_template='%06d.jpg'))

    dataset_object.data_infos = data_infos
    track_results = dict(track_bboxes=results)
    eval_results = dataset_object.evaluate(track_results, metric=['track'])
    assert eval_results['success'] == 67.524
    assert eval_results['norm_precision'] == 70.0
    assert eval_results['precision'] == 50.0
示例#7
0
def test_coco_video_evaluation():
    classes = ('car', 'person')
    dataset_class = DATASETS.get('CocoVideoDataset')
    dataset = dataset_class(ann_file=DEMO_ANN_FILE,
                            classes=classes,
                            pipeline=[])
    results = _create_coco_gt_results(dataset)
    eval_results = dataset.evaluate(results, metric=['bbox', 'track'])
    assert eval_results['bbox_mAP'] == 1.0
    assert eval_results['bbox_mAP_50'] == 1.0
    assert eval_results['bbox_mAP_75'] == 1.0
    assert 'bbox_mAP_copypaste' in eval_results
    assert eval_results['MOTA'] == 1.0
    assert eval_results['IDF1'] == 1.0
    assert eval_results['MT'] == 2
    assert 'track_OVERALL_copypaste' in eval_results
    assert 'track_AVERAGE_copypaste' in eval_results

    classes = ('car', )
    dataset = dataset_class(ann_file=DEMO_ANN_FILE,
                            classes=classes,
                            pipeline=[])
    results = _create_coco_gt_results(dataset)
    eval_results = dataset.evaluate(results, metric=['bbox', 'track'])
    assert eval_results['bbox_mAP'] == 1.0
    assert eval_results['bbox_mAP_50'] == 1.0
    assert eval_results['bbox_mAP_75'] == 1.0
    assert 'bbox_mAP_copypaste' in eval_results
    assert eval_results['MOTA'] == 1.0
    assert eval_results['IDF1'] == 1.0
    assert eval_results['MT'] == 1
    assert 'track_OVERALL_copypaste' in eval_results
    assert 'track_AVERAGE_copypaste' in eval_results
示例#8
0
def test_format_results(dataset):
    dataset_class = DATASETS.get(dataset)

    dataset_object = dataset_class(**DATASET_INFOS[dataset],
                                   pipeline=[],
                                   split='train',
                                   test_mode=True)

    results = []
    for video_name in ['airplane-1', 'airplane-2']:
        results.extend(
            mmcv.list_from_file(
                osp.join(SOT_DATA_PREFIX, 'lasot', video_name,
                         'track_results.txt')))

    track_bboxes = []
    for result in results:
        x1, y1, x2, y2 = result.split(',')
        track_bboxes.append(
            np.array([float(x1),
                      float(y1),
                      float(x2),
                      float(y2), 0.]))

    track_results = dict(track_bboxes=track_bboxes)

    tmp_dir = tempfile.TemporaryDirectory()
    dataset_object.format_results(track_results, resfile_path=tmp_dir.name)
    if osp.isdir(tmp_dir.name):
        tmp_dir.cleanup()
def test_sot_ope_evaluation():
    dataset_class = DATASETS.get('SOTTestDataset')
    dataset = dataset_class(ann_file=osp.join(LASOT_ANN_PATH,
                                              'lasot_test_dummy.json'),
                            pipeline=[])

    results = []
    for video_name in ['airplane-1', 'airplane-2']:
        results.extend(
            mmcv.list_from_file(
                osp.join(LASOT_ANN_PATH, video_name, 'track_results.txt')))
    track_results = []
    for result in results:
        x1, y1, x2, y2 = result.split(',')
        track_results.append(
            np.array([float(x1),
                      float(y1),
                      float(x2),
                      float(y2), 0.]))

    track_results = dict(track_results=track_results)
    eval_results = dataset.evaluate(track_results, metric=['track'])
    assert eval_results['success'] == 67.524
    assert eval_results['norm_precision'] == 70.0
    assert eval_results['precision'] == 50.0
示例#10
0
def test_get_ann_infos_from_video(dataset):
    dataset_class = DATASETS.get(dataset)

    dataset_object = dataset_class(**DATASET_INFOS[dataset],
                                   pipeline=[],
                                   split='train',
                                   test_mode=False)
    dataset_object.get_ann_infos_from_video(0)
示例#11
0
def test_prepare_train_data(dataset):
    dataset_class = DATASETS.get(dataset)

    dataset_object = dataset_class(**DATASET_INFOS[dataset],
                                   pipeline=[],
                                   split='train',
                                   test_mode=False)
    dataset_object.prepare_train_data(0)
示例#12
0
def test_tao_evaluation():
    dataset_class = DATASETS.get('TaoDataset')
    dataset_object = dataset_class(ann_file=DEMO_ANN_FILE,
                                   classes=['serving_dish', 'baby'],
                                   pipeline=[])
    results = _create_coco_gt_results(dataset_object)
    eval_results = dataset_object.evaluate(results, metric=['track', 'bbox'])
    assert eval_results['bbox_AP'] == 1
    assert eval_results['track_AP'] == 1
示例#13
0
def test_get_visibility_from_video(dataset):
    dataset_class = DATASETS.get(dataset)

    dataset_object = dataset_class(**DATASET_INFOS[dataset],
                                   pipeline=[],
                                   split='train',
                                   test_mode=False)
    visibility = dataset_object.get_visibility_from_video(0)
    assert len(visibility['visible']) == dataset_object.num_frames_per_video[0]
def test_mot15_track_evaluation(dataset):
    tmp_dir = tempfile.TemporaryDirectory()
    videos = ['TUD-Campus', 'TUD-Stadtmitte']

    dataset_class = DATASETS.get(dataset)
    dataset_class.cat_ids = MagicMock()
    dataset_class.coco = MagicMock()

    dataset = dataset_class(ann_file=MagicMock(),
                            visibility_thr=-1,
                            pipeline=[])
    dataset.img_prefix = MOT_ANN_PATH
    dataset.vid_ids = [1, 2]
    vid_infos = [dict(name=_) for _ in videos]
    dataset.coco.load_vids = MagicMock(return_value=vid_infos)
    dataset.data_infos = []

    def _load_results(videos):
        track_bboxes, data_infos = [], []
        for video in videos:
            dets = mmcv.list_from_file(
                osp.join(MOT_ANN_PATH, 'results', f'{video}.txt'))
            track_bbox = defaultdict(list)
            for det in dets:
                det = det.strip().split(',')
                frame_id, ins_id = map(int, det[:2])
                bbox = list(map(float, det[2:7]))
                track = [
                    ins_id, bbox[0], bbox[1], bbox[0] + bbox[2],
                    bbox[1] + bbox[3], bbox[4]
                ]
                track_bbox[frame_id].append(track)
            max_frame = max(track_bbox.keys())
            for i in range(1, max_frame + 1):
                track_bboxes.append(
                    [np.array(track_bbox[i], dtype=np.float32)])
                data_infos.append(dict(frame_id=i - 1))
        return track_bboxes, data_infos

    track_bboxes, data_infos = _load_results(videos)
    dataset.data_infos = data_infos

    eval_results = dataset.evaluate(dict(track_bboxes=track_bboxes),
                                    metric='track',
                                    logger=None,
                                    resfile_path=None,
                                    track_iou_thr=0.5)
    assert eval_results['IDF1'] == 0.624
    assert eval_results['IDP'] == 0.799
    assert eval_results['MOTA'] == 0.555
    assert eval_results['IDs'] == 14
    assert eval_results['HOTA'] == 0.400

    tmp_dir.cleanup()
示例#15
0
def test_mot17_bbox_evaluation():
    classes = ('car', 'person')
    dataset_class = DATASETS.get('MOTChallengeDataset')
    dataset = dataset_class(
        ann_file=DEMO_ANN_FILE, classes=classes, pipeline=[])
    results = _create_coco_gt_results(dataset)

    eval_results = dataset.evaluate(results, metric='bbox')
    assert eval_results['mAP'] == 1.0
    eval_results = dataset.evaluate(results['bbox_results'], metric='bbox')
    assert eval_results['mAP'] == 1.0
示例#16
0
def test_reid_evaluation(dataset):
    dataset_class = DATASETS.get(dataset)

    dataset = dataset_class(data_prefix='reid',
                            ann_file=REID_ANN_FILE,
                            pipeline=[])
    results = _create_reid_gt_results(dataset)
    eval_results = dataset.evaluate(results, metric=['mAP', 'CMC'])
    assert eval_results['mAP'] == 1
    assert eval_results['R1'] == 1
    assert eval_results['R5'] == 1
    assert eval_results['R10'] == 1
    assert eval_results['R20'] == 1
示例#17
0
def test_sot_train_dataset_parse_ann_info(dataset):
    dataset_class = DATASETS.get(dataset)

    dataset = dataset_class(ann_file=DEMO_ANN_FILE, pipeline=[])

    # image 5 has 2 objects, we only load the object with instance_id = 1
    img_id = 5
    instance_id = 1
    ann_ids = dataset.coco.get_ann_ids([img_id])
    ann_info = dataset.coco.loadAnns(ann_ids)
    ann = dataset._parse_ann_info(instance_id, ann_info)
    assert ann['bboxes'].shape == (1, 4)
    assert ann['labels'].shape == (1, ) and ann['labels'][0] == 0
示例#18
0
def test_get_bboxes_from_video(dataset):
    dataset_class = DATASETS.get(dataset)

    dataset_object = dataset_class(**DATASET_INFOS[dataset],
                                   pipeline=[],
                                   split='train',
                                   test_mode=False)

    bboxes = dataset_object.get_bboxes_from_video(0)
    assert bboxes.shape[0] == dataset_object.num_frames_per_video[0]
    if dataset == 'VOTDataset':
        assert bboxes.shape[1] == 8
    else:
        assert bboxes.shape[1] == 4
示例#19
0
def test_prepare_data(dataset):
    dataset_class = DATASETS.get(dataset)

    # train
    dataset = dataset_class(
        ann_file=DEMO_ANN_FILE,
        classes=['car', 'person'],
        ref_img_sampler=dict(
            num_ref_imgs=1,
            frame_range=1,
            filter_key_img=True,
            method='uniform'),
        pipeline=[],
        test_mode=False)
    assert len(dataset) == 7

    results = dataset.prepare_train_img(0)
    assert isinstance(results, list)
    assert len(results) == 2
    assert 'ann_info' in results[0]
    assert results[0].keys() == results[1].keys()

    dataset.ref_img_sampler = None
    results = dataset.prepare_train_img(0)
    assert isinstance(results, dict)
    assert 'ann_info' in results

    # test
    dataset = dataset_class(
        ann_file=DEMO_ANN_FILE,
        classes=['car', 'person'],
        ref_img_sampler=dict(
            num_ref_imgs=1,
            frame_range=1,
            filter_key_img=True,
            method='uniform'),
        pipeline=[],
        test_mode=True)
    assert len(dataset) == 8

    results = dataset.prepare_test_img(0)
    assert isinstance(results, list)
    assert len(results) == 2
    assert 'ann_info' not in results[0]
    assert results[0].keys() == results[1].keys()

    dataset.ref_img_sampler = None
    results = dataset.prepare_test_img(0)
    assert isinstance(results, dict)
    assert 'ann_info' not in results
示例#20
0
def test_lasot_dataset_parse_ann_info(dataset):
    dataset_class = DATASETS.get(dataset)

    dataset = dataset_class(ann_file=osp.join(LASOT_ANN_PATH,
                                              'lasot_test_dummy.json'),
                            pipeline=[])

    # image 5 has 1 objects
    img_id = 5
    img_info = dataset.coco.load_imgs([img_id])[0]
    ann_ids = dataset.coco.get_ann_ids([img_id])
    ann_info = dataset.coco.loadAnns(ann_ids)
    ann = dataset._parse_ann_info(img_info, ann_info)
    assert ann['bboxes'].shape == (4, )
    assert ann['labels'] == 0
示例#21
0
def test_load_annotation():
    dataset_class = DATASETS.get('TaoDataset')
    dataset_object = dataset_class(ann_file=DEMO_ANN_FILE,
                                   classes=['serving_dish', 'baby'],
                                   pipeline=[])

    dataset_object.load_as_video = True
    data_infos = dataset_object.load_lvis_anns(DEMO_ANN_FILE)
    assert isinstance(data_infos, list)
    assert len(data_infos) == 2

    dataset_object.load_as_video = False
    data_infos = dataset_object.load_tao_anns(DEMO_ANN_FILE)
    assert isinstance(data_infos, list)
    assert len(data_infos) == 2
    assert len(dataset_object.vid_ids) == 1
示例#22
0
def test_sot_train_dataset_prepare_data(dataset):
    dataset_class = DATASETS.get(dataset)

    # train
    dataset = dataset_class(ann_file=DEMO_ANN_FILE,
                            ref_img_sampler=dict(frame_range=100,
                                                 pos_prob=0.8,
                                                 filter_key_img=False,
                                                 return_key_img=True),
                            pipeline=[],
                            test_mode=False)
    assert len(dataset) == 1

    results = dataset.prepare_train_img(0)
    assert isinstance(results, list)
    assert len(results) == 2
    assert 'ann_info' in results[0]
    assert results[0].keys() == results[1].keys()
示例#23
0
def test_sot_vot_evaluation():
    dataset_class = DATASETS.get('VOTDataset')
    dataset_object = dataset_class(**DATASET_INFOS['VOTDataset'],
                                   pipeline=[],
                                   split='test',
                                   test_mode=True)

    dataset_object.num_frames_per_video = [25, 25]
    data_infos = []
    results = []
    vot_root = osp.join(SOT_DATA_PREFIX, 'vot2018')
    for video_name in ['ants1', 'ants3']:
        results.extend(
            mmcv.list_from_file(
                osp.join(vot_root, video_name, 'track_results.txt')))
        data_infos.append(
            dict(video_path=osp.join(vot_root, video_name, 'color'),
                 ann_path=osp.join(vot_root, video_name, 'gt_for_eval.txt'),
                 start_frame_id=1,
                 end_frame_id=25,
                 framename_template='%08d.jpg'))
    dataset_object.data_infos = data_infos

    track_bboxes = []
    for result in results:
        result = result.split(',')
        if len(result) == 1:
            track_bboxes.append(np.array([float(result[0]), 0.]))
        else:
            track_bboxes.append(
                np.array([
                    float(result[0]),
                    float(result[1]),
                    float(result[2]),
                    float(result[3]), 0.
                ]))

    track_bboxes = dict(track_bboxes=track_bboxes)
    eval_results = dataset_object.evaluate(track_bboxes,
                                           interval=[1, 3],
                                           metric=['track'])
    assert abs(eval_results['eao'] - 0.6661) < 0.0001
    assert round(eval_results['accuracy'], 4) == 0.5826
    assert round(eval_results['robustness'], 4) == 6.0
示例#24
0
def test_reid_dataset_parse_ann_info(dataset):
    dataset_class = DATASETS.get(dataset)

    dataset = dataset_class(data_prefix='reid',
                            ann_file=REID_ANN_FILE,
                            pipeline=[])
    data_infos = dataset.load_annotations()
    img_id = 0
    # image 0 has 21 objects
    assert len([
        data_info for data_info in data_infos
        if data_info['gt_label'] == img_id
    ]) == 21
    img_id = 11
    # image 11 doesn't have objects
    assert len([
        data_info for data_info in data_infos
        if data_info['gt_label'] == img_id
    ]) == 0
示例#25
0
def test_load_detections(dataset):
    dataset_class = DATASETS.get(dataset)
    dataset = dataset_class(
        ann_file=DEMO_ANN_FILE,
        classes=('car', 'person'),
        pipeline=[],
        test_mode=True)

    tmp_dir = tempfile.TemporaryDirectory()
    det_file = osp.join(tmp_dir.name, 'det.pkl')
    outputs = _create_coco_gt_results(dataset)

    mmcv.dump(outputs['bbox_results'], det_file)
    detections = dataset.load_detections(det_file)
    assert isinstance(detections, list)
    assert len(detections) == 8

    mmcv.dump(outputs, det_file)
    detections = dataset.load_detections(det_file)
    assert isinstance(detections, list)
    assert len(detections) == 8
    dataset.detections = detections
    i = np.random.randint(0, len(dataset.data_infos))
    results = dataset.prepare_results(dataset.data_infos[i])
    assert 'detections' in results
    for a, b in zip(results['detections'], outputs['bbox_results'][i]):
        assert (a == b).all()

    out = dict()
    for i in range(len(dataset.data_infos)):
        out[dataset.data_infos[i]['file_name']] = outputs['bbox_results'][i]
    mmcv.dump(out, det_file)
    detections = dataset.load_detections(det_file)
    assert isinstance(detections, dict)
    assert len(detections) == 8
    dataset.detections = detections
    i = np.random.randint(0, len(dataset.data_infos))
    results = dataset.prepare_results(dataset.data_infos[i])
    assert 'detections' in results
    for a, b in zip(results['detections'], outputs['bbox_results'][i]):
        assert (a == b).all()

    tmp_dir.cleanup()
示例#26
0
def test_parse_ann_info(dataset):
    dataset_class = DATASETS.get(dataset)

    ann_file = osp.join(LASOT_ANN_PATH, 'lasot_test_dummy.json')
    dataset_object = dataset_class(ann_file=ann_file, pipeline=[])

    if dataset == 'VOTDataset':
        for _, img_ann in dataset_object.coco.anns.items():
            x, y, w, h = img_ann['bbox']
            img_ann['bbox'] = [x, y, x + w, y, x + w, y + h, x, y + h]

    # image 5 has 1 objects
    img_id = 5
    img_info = dataset_object.coco.load_imgs([img_id])[0]
    ann_ids = dataset_object.coco.get_ann_ids([img_id])
    ann_info = dataset_object.coco.loadAnns(ann_ids)
    ann = dataset_object._parse_ann_info(img_info, ann_info)
    assert ann['bboxes'].shape == (
        4, ) if dataset != 'VOTDataset' else ann['bboxes'].shape == (8, )
    assert ann['labels'] == 0
示例#27
0
def test_reid_dataset_prepare_data(dataset):
    dataset_class = DATASETS.get(dataset)

    num_ids = 8
    ins_per_id = 4
    dataset = dataset_class(data_prefix='reid',
                            ann_file=REID_ANN_FILE,
                            triplet_sampler=dict(num_ids=num_ids,
                                                 ins_per_id=ins_per_id),
                            pipeline=[],
                            test_mode=False)
    assert len(dataset) == 704

    results = dataset.prepare_data(0)
    assert isinstance(results, list)
    assert len(results) == 32
    assert 'img_info' in results[0]
    assert 'gt_label' in results[0]
    assert results[0].keys() == results[1].keys()
    # triplet sampling
    for idx in range(len(results) - 1):
        if (idx + 1) % ins_per_id != 0:
            assert results[idx]['gt_label'] == results[idx + 1]['gt_label']