def test_dataset_with_save_dataset_meta_file(self): source_dataset = Dataset.from_iterable( [ DatasetItem(id='a/b/1', image=np.ones((8, 8, 3)), subset='train', annotations=[ Bbox(0, 2, 4, 2, label=2), Bbox(0, 1, 2, 3, label=1, attributes={ 'blur': '2', 'expression': '0', 'illumination': '0', 'occluded': '0', 'pose': '2', 'invalid': '0' }), ]), ], categories=['face', 'label_0', 'label_1']) with TestDir() as test_dir: WiderFaceConverter.convert(source_dataset, test_dir, save_images=True, save_dataset_meta=True) parsed_dataset = Dataset.import_from(test_dir, 'wider_face') self.assertTrue(osp.isfile(osp.join(test_dir, 'dataset_meta.json'))) compare_datasets(self, source_dataset, parsed_dataset)
def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self): source_dataset = Dataset.from_iterable([ DatasetItem(id='кириллица с пробелом', image=np.ones((8, 8, 3)), annotations=[ Bbox(0, 1, 2, 3, label=0, attributes={ 'blur': '2', 'expression': '0', 'illumination': '0', 'occluded': '0', 'pose': '2', 'invalid': '0' }), ]), ], categories=['face']) with TestDir() as test_dir: WiderFaceConverter.convert(source_dataset, test_dir, save_images=True) parsed_dataset = Dataset.import_from(test_dir, 'wider_face') compare_datasets(self, source_dataset, parsed_dataset, require_images=True)
def test_can_save_and_load_with_no_save_images(self): source_dataset = Dataset.from_iterable([ DatasetItem(id='1', subset='train', image=np.ones((8, 8, 3)), annotations=[ Bbox(0, 2, 4, 2, label=1), Bbox(0, 1, 2, 3, label=0, attributes={ 'blur': '2', 'expression': '0', 'illumination': '0', 'occluded': '0', 'pose': '2', 'invalid': '0' }), Label(1), ]) ], categories=['face', 'label_0']) with TestDir() as test_dir: WiderFaceConverter.convert(source_dataset, test_dir, save_images=False) parsed_dataset = Dataset.import_from(test_dir, 'wider_face') compare_datasets(self, source_dataset, parsed_dataset)
def test_can_save_dataset_with_no_subsets(self): source_dataset = Dataset.from_iterable([ DatasetItem(id='a/b/1', image=np.ones((8, 8, 3)), annotations=[ Bbox(0, 2, 4, 2), Bbox(0, 1, 2, 3, attributes={ 'blur': 2, 'expression': 0, 'illumination': 0, 'occluded': 0, 'pose': 2, 'invalid': 0 }), ]), ], categories=[]) with TestDir() as test_dir: WiderFaceConverter.convert(source_dataset, test_dir, save_images=True) parsed_dataset = Dataset.import_from(test_dir, 'wider_face') compare_datasets(self, source_dataset, parsed_dataset)
def test_can_save_dataset_with_no_subsets(self): source_dataset = Dataset.from_iterable( [ DatasetItem(id='a/b/1', image=np.ones((8, 8, 3)), annotations=[ Bbox(0, 2, 4, 2, label=2), Bbox(0, 1, 2, 3, label=1, attributes={ 'blur': '2', 'expression': '0', 'illumination': '0', 'occluded': '0', 'pose': '2', 'invalid': '0' }), ]), ], categories={ AnnotationType.label: LabelCategories.from_iterable('label_' + str(i) for i in range(3)), }) with TestDir() as test_dir: WiderFaceConverter.convert(source_dataset, test_dir, save_images=True) parsed_dataset = Dataset.import_from(test_dir, 'wider_face') compare_datasets(self, source_dataset, parsed_dataset)
def test_can_save_dataset_with_non_widerface_attributes(self): source_dataset = Dataset.from_iterable([ DatasetItem(id='a/b/1', image=np.ones((8, 8, 3)), annotations=[ Bbox(0, 2, 4, 2, label=0), Bbox(0, 1, 2, 3, label=0, attributes={ 'non-widerface attribute': '0', 'blur': 1, 'invalid': '1' }), Bbox(1, 1, 2, 2, label=0, attributes={'non-widerface attribute': '0'}), ]), ], categories=['face']) target_dataset = Dataset.from_iterable([ DatasetItem(id='a/b/1', image=np.ones((8, 8, 3)), annotations=[ Bbox(0, 2, 4, 2, label=0), Bbox(0, 1, 2, 3, label=0, attributes={ 'blur': '1', 'invalid': '1' }), Bbox(1, 1, 2, 2, label=0), ]), ], categories=['face']) with TestDir() as test_dir: WiderFaceConverter.convert(source_dataset, test_dir, save_images=True) parsed_dataset = Dataset.import_from(test_dir, 'wider_face') compare_datasets(self, target_dataset, parsed_dataset)
def test_can_save_and_load_image_with_arbitrary_extension(self): dataset = Dataset.from_iterable([ DatasetItem('q/1', image=Image(path='q/1.JPEG', data=np.zeros( (4, 3, 3)))), DatasetItem('a/b/c/2', image=Image(path='a/b/c/2.bmp', data=np.zeros((3, 4, 3)))), ], categories=[]) with TestDir() as test_dir: WiderFaceConverter.convert(dataset, test_dir, save_images=True) parsed_dataset = Dataset.import_from(test_dir, 'wider_face') compare_datasets(self, dataset, parsed_dataset, require_images=True)
def test_can_save_and_load(self): source_dataset = Dataset.from_iterable([ DatasetItem(id='1', subset='train', image=np.ones((8, 8, 3)), annotations=[ Bbox(0, 2, 4, 2, label=0), Bbox(0, 1, 2, 3, label=0, attributes={ 'blur': '2', 'expression': '0', 'illumination': '0', 'occluded': '0', 'pose': '2', 'invalid': '0' }), Label(1), ]), DatasetItem(id='2', subset='train', image=np.ones((10, 10, 3)), annotations=[ Bbox(0, 2, 4, 2, label=0, attributes={ 'blur': '2', 'expression': '0', 'illumination': '1', 'occluded': '0', 'pose': '1', 'invalid': '0' }), Bbox(3, 3, 2, 3, label=0, attributes={ 'blur': '0', 'expression': '1', 'illumination': '0', 'occluded': '0', 'pose': '2', 'invalid': '0' }), Bbox(2, 1, 2, 3, label=0, attributes={ 'blur': '2', 'expression': '0', 'illumination': '0', 'occluded': '0', 'pose': '0', 'invalid': '1' }), Label(2), ]), DatasetItem( id='3', subset='val', image=np.ones((8, 8, 3)), annotations=[ Bbox( 0, 1.1, 5.3, 2.1, label=0, attributes={ 'blur': '2', 'expression': '1', 'illumination': '0', 'occluded': '0', 'pose': '1', 'invalid': '0' }), Bbox(0, 2, 3, 2, label=0, attributes={'occluded': False}), Bbox(0, 3, 4, 2, label=0, attributes={'occluded': True}), Bbox(0, 2, 4, 2, label=0), Bbox(0, 7, 3, 2, label=0, attributes={ 'blur': '2', 'expression': '1', 'illumination': '0', 'occluded': '0', 'pose': '1', 'invalid': '0' }), ]), DatasetItem(id='4', subset='val', image=np.ones((8, 8, 3))), ], categories=[ 'face', 'label_0', 'label_1' ]) with TestDir() as test_dir: WiderFaceConverter.convert(source_dataset, test_dir, save_images=True) parsed_dataset = Dataset.import_from(test_dir, 'wider_face') compare_datasets(self, source_dataset, parsed_dataset, require_images=True)
def test_can_save_and_load(self): source_dataset = Dataset.from_iterable( [ DatasetItem(id='1', subset='train', image=np.ones((8, 8, 3)), annotations=[ Bbox(0, 2, 4, 2), Bbox(0, 1, 2, 3, attributes={ 'blur': 2, 'expression': 0, 'illumination': 0, 'occluded': 0, 'pose': 2, 'invalid': 0 }), Label(0), ]), DatasetItem(id='2', subset='train', image=np.ones((10, 10, 3)), annotations=[ Bbox(0, 2, 4, 2, attributes={ 'blur': 2, 'expression': 0, 'illumination': 1, 'occluded': 0, 'pose': 1, 'invalid': 0 }), Bbox(3, 3, 2, 3, attributes={ 'blur': 0, 'expression': 1, 'illumination': 0, 'occluded': 0, 'pose': 2, 'invalid': 0 }), Bbox(2, 1, 2, 3, attributes={ 'blur': 2, 'expression': 0, 'illumination': 0, 'occluded': 0, 'pose': 0, 'invalid': 1 }), Label(1), ]), DatasetItem(id='3', subset='val', image=np.ones((8, 8, 3)), annotations=[ Bbox(0, 1, 5, 2, attributes={ 'blur': 2, 'expression': 1, 'illumination': 0, 'occluded': 0, 'pose': 1, 'invalid': 0 }), Bbox(0, 2, 3, 2), Bbox(0, 2, 4, 2), Bbox(0, 7, 3, 2, attributes={ 'blur': 2, 'expression': 1, 'illumination': 0, 'occluded': 0, 'pose': 1, 'invalid': 0 }), ]), DatasetItem(id='4', subset='val', image=np.ones((8, 8, 3))), ], categories={ AnnotationType.label: LabelCategories.from_iterable('label_' + str(i) for i in range(3)), }) with TestDir() as test_dir: WiderFaceConverter.convert(source_dataset, test_dir, save_images=True) parsed_dataset = Dataset.import_from(test_dir, 'wider_face') compare_datasets(self, source_dataset, parsed_dataset)