def test_can_load_image(self): expected_dataset = Dataset.from_iterable([ DatasetItem(id='img0', subset='train', image=np.ones((8, 8, 3)), annotations=[ Bbox(0, 2, 4, 2, label=0, z_order=1, attributes={ 'occluded': True, 'a1': True, 'a2': 'v3' }), PolyLine([1, 2, 3, 4, 5, 6, 7, 8], attributes={'occluded': False}), ], attributes={'frame': 0}), DatasetItem(id='img1', subset='train', image=np.ones((10, 10, 3)), annotations=[ Polygon([1, 2, 3, 4, 6, 5], z_order=1, attributes={'occluded': False}), Points([1, 2, 3, 4, 5, 6], label=1, z_order=2, attributes={'occluded': False}), ], attributes={'frame': 1}), ], categories={ AnnotationType.label: LabelCategories.from_iterable([ ['label1', '', {'a1', 'a2'}], ['label2'], ]) }) parsed_dataset = CvatImporter()(DUMMY_IMAGE_DATASET_DIR).make_dataset() compare_datasets(self, expected_dataset, parsed_dataset)
def test_can_load(self): class TestExtractor(Extractor): def __iter__(self): return iter([ DatasetItem(id=0, subset='train', image=np.ones((8, 8, 3)), annotations=[ Bbox(0, 2, 4, 2, label=0, attributes={ 'occluded': True, 'z_order': 1, 'a1': True, 'a2': 'v3' }), PolyLine([1, 2, 3, 4, 5, 6, 7, 8], attributes={ 'occluded': False, 'z_order': 0 }), ]), DatasetItem(id=1, subset='train', image=np.ones((10, 10, 3)), annotations=[ Polygon([1, 2, 3, 4, 6, 5], attributes={ 'occluded': False, 'z_order': 1 }), Points([1, 2, 3, 4, 5, 6], label=1, attributes={ 'occluded': False, 'z_order': 2 }), ]), ]) def categories(self): label_categories = LabelCategories() label_categories.add('label1', attributes={'a1', 'a2'}) label_categories.add('label2') return { AnnotationType.label: label_categories, } with TestDir() as test_dir: self.generate_dummy_cvat(test_dir) source_dataset = TestExtractor() parsed_dataset = CvatImporter()(test_dir).make_dataset() compare_datasets(self, source_dataset, parsed_dataset)
def _test_save_and_load(self, source_dataset, converter, test_dir, target_dataset=None, importer_args=None): converter(source_dataset, test_dir) if importer_args is None: importer_args = {} parsed_dataset = CvatImporter()(test_dir, **importer_args).make_dataset() if target_dataset is None: target_dataset = source_dataset compare_datasets(self, expected=target_dataset, actual=parsed_dataset)
def test_can_load_image(self): class DstExtractor(Extractor): def __iter__(self): return iter([ DatasetItem(id='img0', subset='train', image=np.ones((8, 8, 3)), annotations=[ Bbox(0, 2, 4, 2, label=0, z_order=1, attributes={ 'occluded': True, 'a1': True, 'a2': 'v3' }), PolyLine([1, 2, 3, 4, 5, 6, 7, 8], attributes={'occluded': False}), ], attributes={'frame': 0}), DatasetItem(id='img1', subset='train', image=np.ones((10, 10, 3)), annotations=[ Polygon([1, 2, 3, 4, 6, 5], z_order=1, attributes={'occluded': False}), Points([1, 2, 3, 4, 5, 6], label=1, z_order=2, attributes={'occluded': False}), ], attributes={'frame': 1}), ]) def categories(self): label_categories = LabelCategories() label_categories.add('label1', attributes={'a1', 'a2'}) label_categories.add('label2') return {AnnotationType.label: label_categories} parsed_dataset = CvatImporter()(DUMMY_IMAGE_DATASET_DIR).make_dataset() compare_datasets(self, DstExtractor(), parsed_dataset)
def test_can_detect(self): with TestDir() as test_dir: generate_dummy_cvat(test_dir) self.assertTrue(CvatImporter.detect(test_dir))
def test_can_load_video(self): class DstExtractor(Extractor): def __iter__(self): return iter([ DatasetItem(id='frame_000010', subset='annotations', image=np.ones((20, 25, 3)), annotations=[ Bbox(3, 4, 7, 1, label=2, id=0, attributes={ 'occluded': True, 'outside': False, 'keyframe': True, 'track_id': 0 }), Points( [21.95, 8.00, 2.55, 15.09, 2.23, 3.16], label=0, id=1, attributes={ 'occluded': False, 'outside': False, 'keyframe': True, 'track_id': 1, 'hgl': 'hgkf', }), ], attributes={'frame': 10}), DatasetItem(id='frame_000013', subset='annotations', image=np.ones((20, 25, 3)), annotations=[ Bbox(7, 6, 7, 2, label=2, id=0, attributes={ 'occluded': False, 'outside': True, 'keyframe': True, 'track_id': 0 }), Points( [21.95, 8.00, 9.55, 15.09, 5.23, 1.16], label=0, id=1, attributes={ 'occluded': False, 'outside': True, 'keyframe': True, 'track_id': 1, 'hgl': 'jk', }), PolyLine( [ 7.85, 13.88, 3.50, 6.67, 15.90, 2.00, 13.31, 7.21 ], label=2, id=2, attributes={ 'occluded': False, 'outside': False, 'keyframe': True, 'track_id': 2, }), ], attributes={'frame': 13}), DatasetItem(id='frame_000016', subset='annotations', image=Image(path='frame_0000016.png', size=(20, 25)), annotations=[ Bbox(8, 7, 6, 10, label=2, id=0, attributes={ 'occluded': False, 'outside': True, 'keyframe': True, 'track_id': 0 }), PolyLine( [ 7.85, 13.88, 3.50, 6.67, 15.90, 2.00, 13.31, 7.21 ], label=2, id=2, attributes={ 'occluded': False, 'outside': True, 'keyframe': True, 'track_id': 2, }), ], attributes={'frame': 16}), ]) def categories(self): label_categories = LabelCategories() label_categories.add('klhg', attributes={'hgl'}) label_categories.add('z U k') label_categories.add('II') return {AnnotationType.label: label_categories} parsed_dataset = CvatImporter()(DUMMY_VIDEO_DATASET_DIR).make_dataset() compare_datasets(self, DstExtractor(), parsed_dataset)
def test_can_detect_video(self): self.assertTrue(CvatImporter.detect(DUMMY_VIDEO_DATASET_DIR))
def test_can_detect_image(self): self.assertTrue(CvatImporter.detect(DUMMY_IMAGE_DATASET_DIR))
def test_can_load_video(self): expected_dataset = Dataset.from_iterable( [ DatasetItem(id='frame_000010', subset='annotations', image=np.ones((20, 25, 3)), annotations=[ Bbox(3, 4, 7, 1, label=2, id=0, attributes={ 'occluded': True, 'outside': False, 'keyframe': True, 'track_id': 0 }), Points( [21.95, 8.00, 2.55, 15.09, 2.23, 3.16], label=0, id=1, attributes={ 'occluded': False, 'outside': False, 'keyframe': True, 'track_id': 1, 'hgl': 'hgkf', }), ], attributes={'frame': 10}), DatasetItem(id='frame_000013', subset='annotations', image=np.ones((20, 25, 3)), annotations=[ Bbox(7, 6, 7, 2, label=2, id=0, attributes={ 'occluded': False, 'outside': True, 'keyframe': True, 'track_id': 0 }), Points( [21.95, 8.00, 9.55, 15.09, 5.23, 1.16], label=0, id=1, attributes={ 'occluded': False, 'outside': True, 'keyframe': True, 'track_id': 1, 'hgl': 'jk', }), PolyLine( [ 7.85, 13.88, 3.50, 6.67, 15.90, 2.00, 13.31, 7.21 ], label=2, id=2, attributes={ 'occluded': False, 'outside': False, 'keyframe': True, 'track_id': 2, }), ], attributes={'frame': 13}), DatasetItem(id='frame_000016', subset='annotations', image=Image(path='frame_0000016.png', size=(20, 25)), annotations=[ Bbox(8, 7, 6, 10, label=2, id=0, attributes={ 'occluded': False, 'outside': True, 'keyframe': True, 'track_id': 0 }), PolyLine( [ 7.85, 13.88, 3.50, 6.67, 15.90, 2.00, 13.31, 7.21 ], label=2, id=2, attributes={ 'occluded': False, 'outside': True, 'keyframe': True, 'track_id': 2, }), ], attributes={'frame': 16}), ], categories={ AnnotationType.label: LabelCategories.from_iterable([['klhg', '', {'hgl'}], ['z U k'], ['II']]), }) parsed_dataset = CvatImporter()(DUMMY_VIDEO_DATASET_DIR).make_dataset() compare_datasets(self, expected_dataset, parsed_dataset)