def test_can_reiterate_sequence(self): source = Dataset.from_iterable([ DatasetItem('1', subset='a', annotations=[Label(0), Label(1)]), DatasetItem('2', subset='a', annotations=[Label(1)]), DatasetItem('3', subset='a', annotations=[Label(2)]), DatasetItem('4', subset='a', annotations=[Label(1), Label(2)]), DatasetItem('5', subset='b', annotations=[Label(0)]), DatasetItem('6', subset='b', annotations=[Label(0), Label(2)]), DatasetItem('7', subset='b', annotations=[Label(1), Label(2)]), DatasetItem('8', subset='b', annotations=[Label(2)]), ], categories=['a', 'b', 'c']) transformed = LabelRandomSampler(source, count=2) actual1 = Dataset.from_extractors(transformed) actual1.init_cache() actual2 = Dataset.from_extractors(transformed) actual2.init_cache() compare_datasets_strict(self, actual1, actual2)
def _export(dst_file, instance_data, save_images=False): dataset = Dataset.from_extractors(GetCVATDataExtractor( instance_data, include_images=save_images), env=dm_env) with TemporaryDirectory() as temp_dir: dataset.export(temp_dir, 'yolo', save_images=save_images) make_zip_archive(temp_dir, dst_file)
def _export(dst_file, task_data, save_images=False): extractor = CvatTaskDataExtractor(task_data, include_images=save_images) extractor = Dataset.from_extractors(extractor) # apply lazy transforms with TemporaryDirectory() as temp_dir: dm_env.converters.get('mot_seq_gt').convert(extractor, save_dir=temp_dir, save_images=save_images) make_zip_archive(temp_dir, dst_file)
def _export(dst_file, task_data, save_images=False): extractor = CvatTaskDataExtractor(task_data, include_images=save_images) extractor = Dataset.from_extractors(extractor) # apply lazy transforms with TemporaryDirectory() as temp_dir: converter = dm_env.make_converter('yolo', save_images=save_images) converter(extractor, save_dir=temp_dir) make_zip_archive(temp_dir, dst_file)
def _export(dst_file, task_data, save_images=False): dataset = Dataset.from_extractors(CvatTaskDataExtractor( task_data, include_images=save_images), env=dm_env) with TemporaryDirectory() as temp_dir: dataset.export(temp_dir, 'tf_detection_api', save_images=save_images) make_zip_archive(temp_dir, dst_file)
def _export(dst_file, task_data, save_images=False): extractor = CvatTaskDataExtractor(task_data, include_images=save_images) envt = dm_env.transforms extractor = extractor.transform(envt.get('id_from_image_name')) extractor = Dataset.from_extractors(extractor) # apply lazy transforms with TemporaryDirectory() as temp_dir: converter = dm_env.make_converter('mot_seq_gt', save_images=save_images) converter(extractor, save_dir=temp_dir) make_zip_archive(temp_dir, dst_file)
def __call__(self, extractor, save_dir): from datumaro.components.project import Environment, Dataset env = Environment() id_from_image = env.transforms.get('id_from_image_name') extractor = extractor.transform(id_from_image) extractor = Dataset.from_extractors(extractor) # apply lazy transforms converter = env.make_converter('voc', label_map='source', save_images=self._save_images) converter(extractor, save_dir=save_dir)
def _export(dst_file, task_data, save_images=False): extractor = CvatTaskDataExtractor(task_data, include_images=save_images) envt = dm_env.transforms extractor = extractor.transform(KeepTracks) # can only export tracks extractor = extractor.transform(envt.get('polygons_to_masks')) extractor = extractor.transform(envt.get('boxes_to_masks')) extractor = extractor.transform(envt.get('merge_instance_segments')) extractor = Dataset.from_extractors(extractor) # apply lazy transforms with TemporaryDirectory() as temp_dir: dm_env.converters.get('mots_png').convert(extractor, save_dir=temp_dir, save_images=save_images) make_zip_archive(temp_dir, dst_file)
def _export(dst_file, task_data, save_images=False): extractor = CvatTaskDataExtractor(task_data, include_images=save_images) envt = dm_env.transforms extractor = extractor.transform(envt.get('polygons_to_masks')) extractor = extractor.transform(envt.get('boxes_to_masks')) extractor = extractor.transform(envt.get('merge_instance_segments')) extractor = Dataset.from_extractors(extractor) # apply lazy transforms with TemporaryDirectory() as temp_dir: converter = dm_env.make_converter('voc_segmentation', apply_colormap=True, label_map=make_colormap(task_data), save_images=save_images) converter(extractor, save_dir=temp_dir) make_zip_archive(temp_dir, dst_file)
def dump(file_object, annotations): from cvat.apps.dataset_manager.bindings import CvatAnnotationsExtractor from cvat.apps.dataset_manager.util import make_zip_archive from datumaro.components.project import Environment, Dataset from tempfile import TemporaryDirectory env = Environment() id_from_image = env.transforms.get('id_from_image_name') extractor = CvatAnnotationsExtractor('', annotations) extractor = extractor.transform(id_from_image) extractor = Dataset.from_extractors(extractor) # apply lazy transforms converter = env.make_converter('voc', label_map='source') with TemporaryDirectory() as temp_dir: converter(extractor, save_dir=temp_dir) make_zip_archive(temp_dir, file_object)
def _export(dst_file, task_data, save_images=False): extractor = CvatTaskDataExtractor(task_data, include_images=save_images) envt = dm_env.transforms extractor = extractor.transform(envt.get('polygons_to_masks')) extractor = extractor.transform(envt.get('boxes_to_masks')) extractor = extractor.transform(envt.get('merge_instance_segments')) extractor = Dataset.from_extractors(extractor) # apply lazy transforms label_map = make_colormap(task_data) with TemporaryDirectory() as temp_dir: dm_env.converters.get('camvid').convert( extractor, save_dir=temp_dir, save_images=save_images, apply_colormap=True, label_map={label: label_map[label][0] for label in label_map}) make_zip_archive(temp_dir, dst_file)
def test_create_from_extractors(self): class SrcExtractor1(Extractor): def __iter__(self): return iter([ DatasetItem(id=1, subset='train', annotations=[ Bbox(1, 2, 3, 4), Label(4), ]), DatasetItem(id=1, subset='val', annotations=[ Label(4), ]), ]) class SrcExtractor2(Extractor): def __iter__(self): return iter([ DatasetItem(id=1, subset='val', annotations=[ Label(5), ]), ]) class DstExtractor(Extractor): def __iter__(self): return iter([ DatasetItem(id=1, subset='train', annotations=[ Bbox(1, 2, 3, 4), Label(4), ]), DatasetItem(id=1, subset='val', annotations=[ Label(4), Label(5), ]), ]) dataset = Dataset.from_extractors(SrcExtractor1(), SrcExtractor2()) compare_datasets(self, DstExtractor(), dataset)
def __call__(self, extractor, save_dir): from datumaro.components.project import Environment, Dataset env = Environment() polygons_to_masks = env.transforms.get('polygons_to_masks') boxes_to_masks = env.transforms.get('boxes_to_masks') merge_instance_segments = env.transforms.get('merge_instance_segments') id_from_image = env.transforms.get('id_from_image_name') extractor = extractor.transform(polygons_to_masks) extractor = extractor.transform(boxes_to_masks) extractor = extractor.transform(merge_instance_segments) extractor = extractor.transform(id_from_image) extractor = Dataset.from_extractors(extractor) # apply lazy transforms converter = env.make_converter('voc_segmentation', apply_colormap=True, label_map='source', save_images=self._save_images) converter(extractor, save_dir=save_dir)
def test_can_load(self): class DstExtractor(Extractor): def __iter__(self): img1 = np.ones((77, 102, 3)) * 255 img1[6:32, 7:41] = 0 mask1 = np.zeros((77, 102), dtype=int) mask1[67:69, 58:63] = 1 mask2 = np.zeros((77, 102), dtype=int) mask2[13:25, 54:71] = [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] return iter([ DatasetItem(id='img1', image=img1, annotations=[ Polygon([43, 34, 45, 34, 45, 37, 43, 37], label=0, id=0, attributes={ 'occluded': False, 'username': '******' }), Mask(mask1, label=1, id=1, attributes={ 'occluded': False, 'username': '******' }), Polygon( [ 30, 12, 42, 21, 24, 26, 15, 22, 18, 14, 22, 12, 27, 12 ], label=2, group=2, id=2, attributes={ 'a1': True, 'occluded': True, 'username': '******' }), Polygon( [ 35, 21, 43, 22, 40, 28, 28, 31, 31, 22, 32, 25 ], label=3, group=2, id=3, attributes={ 'kj': True, 'occluded': False, 'username': '******' }), Bbox(13, 19, 10, 11, label=4, group=2, id=4, attributes={ 'hg': True, 'occluded': True, 'username': '******' }), Mask(mask2, label=5, group=1, id=5, attributes={ 'd': True, 'occluded': False, 'username': '******' }), Polygon( [ 64, 21, 74, 24, 72, 32, 62, 34, 60, 27, 62, 22 ], label=6, group=1, id=6, attributes={ 'gfd lkj lkj hi': True, 'occluded': False, 'username': '******' }), ]), ]) def categories(self): label_cat = LabelCategories() label_cat.add('window') label_cat.add('license plate') label_cat.add('o1') label_cat.add('q1') label_cat.add('b1') label_cat.add('m1') label_cat.add('hg') return { AnnotationType.label: label_cat, } parsed = Dataset.from_extractors(LabelMeExtractor(DUMMY_DATASET_DIR)) compare_datasets(self, expected=DstExtractor(), actual=parsed)