Пример #1
0
    def test_polygons_to_masks(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id=1, image=np.zeros((5, 10, 3)), annotations=[
                Polygon([0, 0, 4, 0, 4, 4]),
                Polygon([5, 0, 9, 0, 5, 5]),
            ]),
        ])

        target_dataset = Dataset.from_iterable([
            DatasetItem(id=1, image=np.zeros((5, 10, 3)), annotations=[
                Mask(np.array([
                        [0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
                        [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
                        [0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
                        [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
                        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    ]),
                ),
                Mask(np.array([
                        [0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
                        [0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
                        [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
                        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    ]),
                ),
            ]),
        ])

        actual = transforms.PolygonsToMasks(source_dataset)
        compare_datasets(self, target_dataset, actual)
Пример #2
0
    def test_crop_covered_segments(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id=1, image=np.zeros((5, 5, 3)), annotations=[
                # The mask is partially covered by the polygon
                Mask(np.array([
                        [0, 0, 1, 1, 1],
                        [0, 0, 1, 1, 1],
                        [1, 1, 1, 1, 1],
                        [1, 1, 1, 0, 0],
                        [1, 1, 1, 0, 0]],
                    ), z_order=0),
                Polygon([1, 1, 4, 1, 4, 4, 1, 4], z_order=1),
            ]),
        ])

        target_dataset = Dataset.from_iterable([
            DatasetItem(id=1, image=np.zeros((5, 5, 3)), annotations=[
                Mask(np.array([
                        [0, 0, 1, 1, 1],
                        [0, 0, 0, 0, 1],
                        [1, 0, 0, 0, 1],
                        [1, 0, 0, 0, 0],
                        [1, 1, 1, 0, 0]],
                    ), z_order=0),
                Polygon([1, 1, 4, 1, 4, 4, 1, 4], z_order=1),
            ]),
        ])

        actual = transforms.CropCoveredSegments(source_dataset)
        compare_datasets(self, target_dataset, actual)
    def test_can_import_v2_0_panoptic_with_keeping_category_ids(self):
        labels = [f'class-{i}' for i in range(101)]
        labels[1] = ('animal--bird', 'animal')
        labels[10] = ('construction--barrier--separator', 'construction')
        labels[100] = ('object--vehicle--bicycle', 'object')

        label_cat = LabelCategories.from_iterable(labels)
        mask_cat = MaskCategories({
            1: (165, 42, 42),
            10: (128, 128, 128),
            100: (119, 11, 32)
        })

        expected_dataset = Dataset.from_iterable([
            DatasetItem(id='0', subset='val', annotations=[
                Mask(image=np.array([[1, 1, 1, 0, 0]] * 5), id=1, group=1,
                    label=1, attributes={'is_crowd': True}),
                Mask(image=np.array([[0, 0, 0, 1, 1]] * 5), id=2, group=2,
                    label=10, attributes={'is_crowd': False}),
                Polygon(points=[0, 0, 1, 0, 2, 0, 2, 4, 0, 4], label=1),
                Polygon(points=[3, 0, 4, 0, 4, 1, 4, 4, 3, 4], label=10),
            ], image=np.ones((5, 5, 3))),
            DatasetItem(id='1', subset='val', annotations=[
                Mask(image=np.array([[1, 1, 0, 0, 0]] * 5), id=1, group=1,
                    label=100, attributes={'is_crowd': False}),
                Mask(image=np.array([[0, 0, 1, 0, 0]] * 5), id=2, group=2,
                    label=10, attributes={'is_crowd': False}),
                Mask(image=np.array([[0, 0, 0, 1, 1]] * 5), id=3, group=3,
                    label=100, attributes={'is_crowd': True}),
                Polygon(points=[2, 0, 2, 1, 2, 2, 2, 3, 2, 4], label=10),
                Polygon(points=[0, 0, 1, 0, 1, 4, 4, 0, 0, 0], label=100),
                Polygon(points=[3, 0, 4, 0, 4, 4, 3, 4, 3, 0], label=100),
            ], image=np.ones((5, 5, 3))),
            DatasetItem(id='2', subset='train', annotations=[
                Mask(image=np.array([[1, 0, 0, 0, 0]] * 5), id=1, group=1,
                    label=1, attributes={'is_crowd': False}),
                Mask(image=np.array([[0, 1, 0, 0, 0]] * 5), id=2, group=2,
                    label=10, attributes={'is_crowd': False}),
                Mask(image=np.array([[0, 0, 1, 0, 0]] * 5), id=3, group=3,
                    label=1, attributes={'is_crowd': False}),
                Mask(image=np.array([[0, 0, 0, 1, 0]] * 5), id=4, group=4,
                    label=10, attributes={'is_crowd': False}),
                Mask(image=np.array([[0, 0, 0, 0, 1]] * 5), id=5, group=5,
                    label=1, attributes={'is_crowd': False}),
                Polygon(points=[0, 0, 0, 1, 0, 2, 0, 3, 0, 4], label=1),
                Polygon(points=[2, 0, 2, 1, 2, 2, 2, 3, 2, 4], label=1),
                Polygon(points=[4, 0, 4, 1, 4, 2, 4, 3, 4, 4], label=1),
                Polygon(points=[1, 0, 1, 1, 1, 2, 1, 3, 1, 4], label=10),
                Polygon(points=[3, 0, 3, 1, 3, 2, 3, 3, 3, 4], label=10),
            ], image=np.ones((5, 5, 3))),
        ], categories={
            AnnotationType.label: label_cat,
            AnnotationType.mask: mask_cat
        })

        imported_dataset = Dataset.import_from(DUMMY_DATASET_V2_0,
            'mapillary_vistas_panoptic', keep_original_category_ids=True)

        compare_datasets(self, expected_dataset, imported_dataset,
            require_images=True)
Пример #4
0
    def test_can_save_and_load_bboxes(self):
        expected_dataset = Dataset.from_iterable([
            DatasetItem(id='a/b/1',
                        subset='train',
                        image=np.ones((10, 15, 3)),
                        annotations=[
                            Bbox(1, 3, 6, 10),
                            Bbox(0, 1, 3, 5, attributes={'text': 'word 0'}),
                        ]),
            DatasetItem(id=2,
                        subset='train',
                        image=np.ones((10, 15, 3)),
                        annotations=[
                            Polygon([0, 0, 3, 0, 4, 7, 1, 8],
                                    attributes={'text': 'word 1'}),
                            Polygon([1, 2, 5, 3, 6, 8, 0, 7]),
                        ]),
            DatasetItem(id=3,
                        subset='train',
                        image=np.ones((10, 15, 3)),
                        annotations=[
                            Polygon([2, 2, 8, 3, 7, 10, 2, 9],
                                    attributes={'text': 'word_2'}),
                            Bbox(0, 2, 5, 9, attributes={'text': 'word_3'}),
                        ]),
        ])

        with TestDir() as test_dir:
            self._test_save_and_load(
                expected_dataset,
                partial(IcdarTextLocalizationConverter.convert,
                        save_images=True), test_dir, 'icdar_text_localization')
Пример #5
0
    def test_shapes_to_boxes(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id=1, image=np.zeros((5, 5, 3)),
                annotations=[
                    Mask(np.array([
                            [0, 0, 1, 1, 1],
                            [0, 0, 0, 0, 1],
                            [1, 0, 0, 0, 1],
                            [1, 0, 0, 0, 0],
                            [1, 1, 1, 0, 0]],
                        ), id=1),
                    Polygon([1, 1, 4, 1, 4, 4, 1, 4], id=2),
                    PolyLine([1, 1, 2, 1, 2, 2, 1, 2], id=3),
                    Points([2, 2, 4, 2, 4, 4, 2, 4], id=4),
                ]
            ),
        ])

        target_dataset = Dataset.from_iterable([
            DatasetItem(id=1, image=np.zeros((5, 5, 3)),
                annotations=[
                    Bbox(0, 0, 4, 4, id=1),
                    Bbox(1, 1, 3, 3, id=2),
                    Bbox(1, 1, 1, 1, id=3),
                    Bbox(2, 2, 2, 2, id=4),
                ]
            ),
        ])

        actual = transforms.ShapesToBoxes(source_dataset)
        compare_datasets(self, target_dataset, actual)
Пример #6
0
    def _load_polygons(self, items):
        polygons_dir = osp.join(self._annotations_dir,
                                MapillaryVistasPath.POLYGON_DIR)
        for item_path in glob.glob(osp.join(polygons_dir, '**', '*.json'),
                recursive=True):
            item_id = osp.splitext(osp.relpath(item_path, polygons_dir))[0]
            item = items.get(item_id)
            item_info = {}
            item_info = parse_json_file(item_path)

            image_size = self._get_image_size(item_info)
            if image_size and item.has_image:
                item.image = Image(path=item.image.path, size=image_size)

            polygons = item_info['objects']
            annotations = []
            for polygon in polygons:
                label = polygon['label']
                label_id = self._categories[AnnotationType.label].find(label)[0]
                if label_id is None:
                    label_id = self._categories[AnnotationType.label].add(label)

                points = [coord for point in polygon['polygon'] for coord in point]
                annotations.append(Polygon(label=label_id, points=points))

            if item is None:
                items[item_id] = DatasetItem(id=item_id, subset=self._subset,
                    annotations=annotations)
            else:
                item.annotations.extend(annotations)
    def test_can_import_with_meta_file(self):
        expected_dataset = Dataset.from_iterable(
            [
                DatasetItem(id='street/1', subset='training',
                    image=np.ones((5, 5, 3)),
                    annotations=[
                        Polygon([1, 0, 1, 1, 1, 2, 1, 3, 1, 4],
                            group=1, z_order=0, id=1, label=1,
                            attributes={'walkin': True}),
                        Mask(image=np.array([[0, 0, 1, 1, 1]] * 5), label=0,
                            group=401, z_order=0, id=401),
                        Mask(image=np.array([[0, 1, 0, 0, 0]] * 5), label=1,
                            group=1831, z_order=0, id=1831),
                        Mask(image=np.array([[0, 0, 0, 1, 1]] * 5), label=2,
                            id=774, group=774, z_order=1),
                        Mask(image=np.array([[0, 0, 1, 1, 1]] * 5), label=0,
                            group=0, z_order=0, id=0),
                        Mask(image=np.array([[0, 1, 0, 0, 0]] * 5), label=1,
                            group=1, z_order=0, id=1,
                            attributes={'walkin': True}),
                        Mask(image=np.array([[0, 0, 0, 1, 1]] * 5), label=2,
                            group=2, z_order=1, id=2),
                    ])
            ], categories={AnnotationType.label: LabelCategories.from_iterable([
                    'car', 'person', 'door', 'rim'])
                }
        )

        imported_dataset = Dataset.import_from(DUMMY_DATASET_DIR_META_FILE, 'ade20k2020')
        compare_datasets(self, expected_dataset, imported_dataset,
            require_images=True)
Пример #8
0
    def test_can_load_image(self):
        expected_dataset = Dataset.from_iterable([
            DatasetItem(id='img0', subset='train',
                image=np.ones((8, 8, 3)),
                annotations=[
                    Bbox(0, 2, 4, 2, label=0, z_order=1,
                        attributes={
                            'occluded': True,
                            'a1': True, 'a2': 'v3', 'a3': '0003', 'a4': 2.4,
                        }),
                    PolyLine([1, 2, 3, 4, 5, 6, 7, 8],
                        attributes={'occluded': False}),
                ], attributes={'frame': 0}),
            DatasetItem(id='img1', subset='train',
                image=np.ones((10, 10, 3)),
                annotations=[
                    Polygon([1, 2, 3, 4, 6, 5], z_order=1,
                        attributes={'occluded': False}),
                    Points([1, 2, 3, 4, 5, 6], label=1, z_order=2,
                        attributes={'occluded': False}),
                ], attributes={'frame': 1}),
        ], categories={
            AnnotationType.label: LabelCategories.from_iterable([
                ['label1', '', {'a1', 'a2', 'a3', 'a4'}],
                ['label2'],
            ])
        })

        parsed_dataset = Dataset.import_from(DUMMY_IMAGE_DATASET_DIR, 'cvat')

        compare_datasets(self, expected_dataset, parsed_dataset)
Пример #9
0
    def test_transform_to_labels(self):
        src_dataset = Dataset.from_iterable([
            DatasetItem(id=1, annotations=[
                Label(1),
                Bbox(1, 2, 3, 4, label=2),
                Bbox(1, 3, 3, 3),
                Mask(image=np.array([1]), label=3),
                Polygon([1, 1, 2, 2, 3, 4], label=4),
                PolyLine([1, 3, 4, 2, 5, 6], label=5)
            ])
        ], categories=['label%s' % i for i in range(6)])

        dst_dataset = Dataset.from_iterable([
            DatasetItem(id=1, annotations=[
                Label(1),
                Label(2),
                Label(3),
                Label(4),
                Label(5)
            ]),
        ], categories=['label%s' % i for i in range(6)])

        actual = transforms.AnnsToLabels(src_dataset)

        compare_datasets(self, dst_dataset, actual)
    def test_can_import_v2_0_instances(self):
        label_cat = LabelCategories.from_iterable(['animal--bird',
            'construction--barrier--separator', 'object--vehicle--bicycle'])

        mask_cat = MaskCategories({
            0: (165, 42, 42),
            1: (128, 128, 128),
            2: (119, 11, 32)
        })

        expected_dataset = Dataset.from_iterable([
            DatasetItem(id='0', subset='val', annotations=[
                Mask(image=np.array([[1, 1, 1, 0, 0]] * 5), id=0, label=0),
                Mask(image=np.array([[0, 0, 0, 1, 1]] * 5), id=0, label=1),
                Polygon(points=[0, 0, 1, 0, 2, 0, 2, 4, 0, 4], label=0),
                Polygon(points=[3, 0, 4, 0, 4, 1, 4, 4, 3, 4], label=1),
            ], image=np.ones((5, 5, 3))),
            DatasetItem(id='1', subset='val', annotations=[
                Mask(image=np.array([[0, 0, 1, 0, 0]] * 5), id=0, label=1),
                Mask(image=np.array([[1, 1, 0, 0, 0]] * 5), id=0, label=2),
                Mask(image=np.array([[0, 0, 0, 1, 1]] * 5), id=1, label=2),
                Polygon(points=[2, 0, 2, 1, 2, 2, 2, 3, 2, 4], label=1),
                Polygon(points=[0, 0, 1, 0, 1, 4, 4, 0, 0, 0], label=2),
                Polygon(points=[3, 0, 4, 0, 4, 4, 3, 4, 3, 0], label=2),
            ], image=np.ones((5, 5, 3))),
            DatasetItem(id='2', subset='train', annotations=[
                Mask(image=np.array([[1, 0, 0, 0, 0]] * 5), id=0, label=0),
                Mask(image=np.array([[0, 0, 1, 0, 0]] * 5), id=1, label=0),
                Mask(image=np.array([[0, 0, 0, 0, 1]] * 5), id=2, label=0),
                Mask(image=np.array([[0, 1, 0, 0, 0]] * 5), id=0, label=1),
                Mask(image=np.array([[0, 0, 0, 1, 0]] * 5), id=1, label=1),
                Polygon(points=[0, 0, 0, 1, 0, 2, 0, 3, 0, 4], label=0),
                Polygon(points=[2, 0, 2, 1, 2, 2, 2, 3, 2, 4], label=0),
                Polygon(points=[4, 0, 4, 1, 4, 2, 4, 3, 4, 4], label=0),
                Polygon(points=[1, 0, 1, 1, 1, 2, 1, 3, 1, 4], label=1),
                Polygon(points=[3, 0, 3, 1, 3, 2, 3, 3, 3, 4], label=1),
            ], image=np.ones((5, 5, 3))),
        ], categories={
            AnnotationType.label: label_cat,
            AnnotationType.mask: mask_cat
        })

        imported_dataset = Dataset.import_from(DUMMY_DATASET_V2_0,
            'mapillary_vistas_instances')

        compare_datasets(self, expected_dataset, imported_dataset,
            require_images=True)
Пример #11
0
    def test_merge_instance_segments(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id=1, image=np.zeros((5, 5, 3)),
                annotations=[
                    Mask(np.array([
                            [0, 0, 1, 1, 1],
                            [0, 0, 0, 0, 1],
                            [1, 0, 0, 0, 1],
                            [1, 0, 0, 0, 0],
                            [1, 1, 1, 0, 0]],
                        ),
                        z_order=0, group=1),
                    Polygon([1, 1, 4, 1, 4, 4, 1, 4],
                        z_order=1, group=1),
                    Polygon([0, 0, 0, 2, 2, 2, 2, 0],
                        z_order=1),
                ]
            ),
        ])

        target_dataset = Dataset.from_iterable([
            DatasetItem(id=1, image=np.zeros((5, 5, 3)),
                annotations=[
                    Mask(np.array([
                            [0, 0, 1, 1, 1],
                            [0, 1, 1, 1, 1],
                            [1, 1, 1, 1, 1],
                            [1, 1, 1, 1, 0],
                            [1, 1, 1, 0, 0]],
                        ),
                        z_order=0, group=1),
                    Mask(np.array([
                            [1, 1, 0, 0, 0],
                            [1, 1, 0, 0, 0],
                            [0, 0, 0, 0, 0],
                            [0, 0, 0, 0, 0],
                            [0, 0, 0, 0, 0]],
                        ),
                        z_order=1),
                ]
            ),
        ])

        actual = transforms.MergeInstanceSegments(source_dataset,
            include_polygons=True)
        compare_datasets(self, target_dataset, actual)
    def test_can_import_v2_0_panoptic_wo_images(self):
        label_cat = LabelCategories.from_iterable([
            ('animal--bird', 'animal'),
            ('construction--barrier--separator', 'construction'),
            ('object--vehicle--bicycle', 'object')
        ])

        mask_cat = MaskCategories({
            0: (165, 42, 42),
            1: (128, 128, 128),
            2: (119, 11, 32)
        })

        expected_dataset = Dataset.from_iterable([
            DatasetItem(id='2', subset='dataset', annotations=[
                Mask(image=np.array([[1, 0, 0, 0, 0]] * 5), id=1, group=1,
                    label=0, attributes={'is_crowd': False}),
                Mask(image=np.array([[0, 1, 0, 0, 0]] * 5), id=2, group=2,
                    label=1, attributes={'is_crowd': False}),
                Mask(image=np.array([[0, 0, 1, 0, 0]] * 5), id=3, group=3,
                    label=0, attributes={'is_crowd': False}),
                Mask(image=np.array([[0, 0, 0, 1, 0]] * 5), id=4, group=4,
                    label=1, attributes={'is_crowd': False}),
                Mask(image=np.array([[0, 0, 0, 0, 1]] * 5), id=5, group=5,
                    label=0, attributes={'is_crowd': False}),
                Polygon(points=[0, 0, 0, 1, 0, 2, 0, 3, 0, 4], label=0),
                Polygon(points=[2, 0, 2, 1, 2, 2, 2, 3, 2, 4], label=0),
                Polygon(points=[4, 0, 4, 1, 4, 2, 4, 3, 4, 4], label=0),
                Polygon(points=[1, 0, 1, 1, 1, 2, 1, 3, 1, 4], label=1),
                Polygon(points=[3, 0, 3, 1, 3, 2, 3, 3, 3, 4], label=1),
            ])
        ], categories={
            AnnotationType.label: label_cat,
            AnnotationType.mask: mask_cat
        })

        with TestDir() as test_dir:
            dataset_path = osp.join(test_dir, 'dataset')
            shutil.copytree(osp.join(DUMMY_DATASET_V2_0, 'train'), dataset_path)
            shutil.rmtree(osp.join(dataset_path, 'images'))

            imported_dataset = Dataset.import_from(dataset_path,
                'mapillary_vistas_panoptic')

            compare_datasets(self, expected_dataset, imported_dataset,
                require_images=True)
Пример #13
0
    def test_can_resize(self):
        small_dataset = Dataset.from_iterable([
            DatasetItem(id=i, image=np.ones((4, 4)) * i, annotations=[
                Label(1),
                Bbox(1, 1, 2, 2, label=2),
                Polygon([1, 1, 1, 2, 2, 2, 2, 1], label=1),
                PolyLine([1, 1, 1, 2, 2, 2, 2, 1], label=2),
                Points([1, 1, 1, 2, 2, 2, 2, 1], label=2),
                Mask(np.array([
                    [0, 0, 1, 1],
                    [1, 0, 0, 1],
                    [0, 1, 1, 0],
                    [1, 1, 0, 0],
                ]))
            ]) for i in range(3)
        ], categories=['a', 'b', 'c'])

        big_dataset = Dataset.from_iterable([
            DatasetItem(id=i, image=np.ones((8, 8)) * i, annotations=[
                Label(1),
                Bbox(2, 2, 4, 4, label=2),
                Polygon([2, 2, 2, 4, 4, 4, 4, 2], label=1),
                PolyLine([2, 2, 2, 4, 4, 4, 4, 2], label=2),
                Points([2, 2, 2, 4, 4, 4, 4, 2], label=2),
                Mask(np.array([
                    [0, 0, 0, 0, 1, 1, 1, 1],
                    [0, 0, 0, 0, 1, 1, 1, 1],
                    [1, 1, 0, 0, 0, 0, 1, 1],
                    [1, 1, 0, 0, 0, 0, 1, 1],
                    [0, 0, 1, 1, 1, 1, 0, 0],
                    [0, 0, 1, 1, 1, 1, 0, 0],
                    [1, 1, 1, 1, 0, 0, 0, 0],
                    [1, 1, 1, 1, 0, 0, 0, 0],
                ]))
            ]) for i in range(3)
        ], categories=['a', 'b', 'c'])

        with self.subTest('upscale'):
            actual = transforms.ResizeTransform(small_dataset, width=8, height=8)
            compare_datasets(self, big_dataset, actual)

        with self.subTest('downscale'):
            actual = transforms.ResizeTransform(big_dataset, width=4, height=4)
            compare_datasets(self, small_dataset, actual)
Пример #14
0
 def append_polygon_voc(annotations, **kwargs):
     annotations.append(
         Polygon(
             [0, 0, 1, 0, 1, 2, 0, 2],
             label=kwargs["label_id"],
             id=kwargs["ann_id"] + 1,
             attributes=kwargs["attributes"],
             group=kwargs["ann_id"],
         ))  # obj
     annotations.append(
         Label(kwargs["label_id"], attributes=kwargs["attributes"]))
     annotations.append(
         Polygon(
             [0, 0, 1, 0, 1, 2, 0, 2],
             label=kwargs["label_id"] + 3,
             group=kwargs["ann_id"],
         ))  # part
     annotations.append(
         Label(kwargs["label_id"] + 3, attributes=kwargs["attributes"]))
Пример #15
0
 def append_polygon_labelme(annotations, **kwargs):
     annotations.append(
         Polygon(
             [0, 0, 1, 0, 1, 2, 0, 2],
             label=kwargs["label_id"],
             id=kwargs["ann_id"],
             attributes=kwargs["attributes"],
         ))
     annotations.append(
         Label(kwargs["label_id"], attributes=kwargs["attributes"]))
Пример #16
0
    def test_remap_labels(self):
        src_dataset = Dataset.from_iterable([
            DatasetItem(id=1, annotations=[
                # Should be remapped
                Label(1),
                Bbox(1, 2, 3, 4, label=2),
                Mask(image=np.array([1]), label=3),

                # Should be deleted
                Polygon([1, 1, 2, 2, 3, 4], label=4),

                # Should be kept
                PolyLine([1, 3, 4, 2, 5, 6]),
                Bbox(4, 3, 2, 1, label=5),
            ])
        ], categories={
            AnnotationType.label: LabelCategories.from_iterable(
                f'label{i}' for i in range(6)),
            AnnotationType.mask: MaskCategories(
                colormap=mask_tools.generate_colormap(6)),
            AnnotationType.points: PointsCategories.from_iterable(
                [(i, [str(i)]) for i in range(6)])
        })

        dst_dataset = Dataset.from_iterable([
            DatasetItem(id=1, annotations=[
                Label(1),
                Bbox(1, 2, 3, 4, label=0),
                Mask(image=np.array([1]), label=1),

                PolyLine([1, 3, 4, 2, 5, 6], label=None),
                Bbox(4, 3, 2, 1, label=2),
            ]),
        ], categories={
            AnnotationType.label: LabelCategories.from_iterable(
                ['label0', 'label9', 'label5']),
            AnnotationType.mask: MaskCategories(colormap={
                i: v for i, v in enumerate({
                    k: v for k, v in mask_tools.generate_colormap(6).items()
                    if k in { 0, 1, 5 }
                }.values())
            }),
            AnnotationType.points: PointsCategories.from_iterable(
                [(0, ['0']), (1, ['1']), (2, ['5'])])
        })

        actual = transforms.RemapLabels(src_dataset, mapping={
            'label1': 'label9', # rename & join with new label9 (from label3)
            'label2': 'label0', # rename & join with existing label0
            'label3': 'label9', # rename & join with new label9 (from label1)
            'label4': '', # delete the label and associated annotations
            # 'label5' - unchanged
        }, default='keep')

        compare_datasets(self, dst_dataset, actual)
Пример #17
0
    def convert_mask(mask):
        polygons = mask_tools.mask_to_polygons(mask.image)

        return [
            Polygon(points=p,
                    label=mask.label,
                    z_order=mask.z_order,
                    id=mask.id,
                    attributes=mask.attributes,
                    group=mask.group) for p in polygons
        ]
Пример #18
0
    def test_mask_to_polygons(self):
        source = Dataset.from_iterable([
            DatasetItem(id=1, image=np.zeros((5, 10, 3)), annotations=[
                Mask(np.array([
                        [0, 1, 1, 1, 0, 1, 1, 1, 1, 0],
                        [0, 0, 1, 1, 0, 1, 1, 1, 0, 0],
                        [0, 0, 0, 1, 0, 1, 1, 0, 0, 0],
                        [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
                        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    ]),
                ),
            ]),
        ])

        expected = Dataset.from_iterable([
            DatasetItem(id=1, image=np.zeros((5, 10, 3)), annotations=[
                Polygon([1, 0, 3, 2, 3, 0, 1, 0]),
                Polygon([5, 0, 5, 3, 8, 0, 5, 0]),
            ]),
        ])

        actual = transforms.MasksToPolygons(source)
        compare_datasets(self, expected, actual)
    def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id='кириллица с пробелом', subset='train',
                image=np.ones((16, 16, 3)),
                annotations=[ Polygon([0, 4, 4, 4, 5, 6], label=3) ]
            ),
        ], categories=['label_' + str(label) for label in range(10)])

        target_dataset = Dataset.from_iterable([
            DatasetItem(id='кириллица с пробелом', subset='train',
                image=np.ones((16, 16, 3)),
                annotations=[
                    Polygon([0, 4, 4, 4, 5, 6], label=0, id=0,
                        attributes={ 'occluded': False, 'username': '' }
                    ),
                ]
            ),
        ], categories=['label_3'])

        with TestDir() as test_dir:
            self._test_save_and_load(
                source_dataset,
                partial(LabelMeConverter.convert, save_images=True),
                test_dir, target_dataset=target_dataset, require_images=True)
Пример #20
0
    def test_can_save_and_load_bboxes_with_no_save_images(self):
        expected_dataset = Dataset.from_iterable([
            DatasetItem(id=3,
                        subset='train',
                        image=np.ones((10, 15, 3)),
                        annotations=[
                            Polygon([2, 2, 8, 3, 7, 10, 2, 9],
                                    attributes={'text': 'word_2'}),
                            Bbox(0, 2, 5, 9, attributes={'text': 'word_3'}),
                        ]),
        ])

        with TestDir() as test_dir:
            self._test_save_and_load(
                expected_dataset,
                partial(IcdarTextLocalizationConverter.convert,
                        save_images=False), test_dir,
                'icdar_text_localization')
Пример #21
0
    def crop_segments(cls, segment_anns, img_width, img_height):
        segment_anns = sorted(segment_anns, key=lambda x: x.z_order)

        segments = []
        for s in segment_anns:
            if s.type == AnnotationType.polygon:
                segments.append(s.points)
            elif s.type == AnnotationType.mask:
                if isinstance(s, RleMask):
                    rle = s.rle
                else:
                    rle = mask_tools.mask_to_rle(s.image)
                segments.append(rle)

        segments = mask_tools.crop_covered_segments(segments, img_width,
                                                    img_height)

        new_anns = []
        for ann, new_segment in zip(segment_anns, segments):
            fields = {
                'z_order': ann.z_order,
                'label': ann.label,
                'id': ann.id,
                'group': ann.group,
                'attributes': ann.attributes
            }
            if ann.type == AnnotationType.polygon:
                if fields['group'] is None:
                    fields['group'] = cls._make_group_id(
                        segment_anns + new_anns, fields['id'])
                for polygon in new_segment:
                    new_anns.append(Polygon(points=polygon, **fields))
            else:
                rle = mask_tools.mask_to_rle(new_segment)
                rle = mask_utils.frPyObjects(rle, *rle['size'])
                new_anns.append(RleMask(rle=rle, **fields))

        return new_anns
Пример #22
0
    def test_can_import_bboxes(self):
        expected_dataset = Dataset.from_iterable([
            DatasetItem(id='img_1',
                        subset='train',
                        image=np.ones((10, 15, 3)),
                        annotations=[
                            Polygon([0, 0, 3, 1, 4, 6, 1, 7],
                                    attributes={'text': 'FOOD'}),
                        ]),
            DatasetItem(id='img_2',
                        subset='train',
                        image=np.ones((10, 15, 3)),
                        annotations=[
                            Bbox(0, 0, 2, 3, attributes={'text': 'RED'}),
                            Bbox(3, 3, 2, 3, attributes={'text': 'LION'}),
                        ]),
        ])

        dataset = Dataset.import_from(
            osp.join(DUMMY_DATASET_DIR, 'text_localization'),
            'icdar_text_localization')

        compare_datasets(self, expected_dataset, dataset)
Пример #23
0
    def test_can_match_shapes(self):
        source0 = Dataset.from_iterable([
            DatasetItem(1, annotations=[
                # unique
                Bbox(1, 2, 3, 4, label=1),

                # common
                Mask(label=2, z_order=2, image=np.array([
                    [0, 0, 0, 0],
                    [0, 0, 0, 0],
                    [1, 1, 1, 0],
                    [1, 1, 1, 0],
                ])),
                Polygon([1, 0, 3, 2, 1, 2]),

                # an instance with keypoints
                Bbox(4, 5, 2, 4, label=2, z_order=1, group=1),
                Points([5, 6], label=0, group=1),
                Points([6, 8], label=1, group=1),

                PolyLine([1, 1, 2, 1, 3, 1]),
            ]),
        ], categories=['a', 'b', 'c'])

        source1 = Dataset.from_iterable([
            DatasetItem(1, annotations=[
                # common
                Mask(label=2, image=np.array([
                    [0, 0, 0, 0],
                    [0, 1, 1, 1],
                    [0, 1, 1, 1],
                    [0, 1, 1, 1],
                ])),
                Polygon([0, 2, 2, 0, 2, 1]),

                # an instance with keypoints
                Bbox(4, 4, 2, 5, label=2, z_order=1, group=2),
                Points([5.5, 6.5], label=0, group=2),
                Points([6, 8], label=1, group=2),

                PolyLine([1, 1.5, 2, 1.5]),
            ]),
        ], categories=['a', 'b', 'c'])

        source2 = Dataset.from_iterable([
            DatasetItem(1, annotations=[
                # common
                Mask(label=2, z_order=3, image=np.array([
                    [0, 0, 1, 1],
                    [0, 1, 1, 1],
                    [1, 1, 1, 1],
                    [1, 1, 1, 0],
                ])),
                Polygon([3, 1, 2, 2, 0, 1]),

                # an instance with keypoints, one is missing
                Bbox(3, 6, 2, 3, label=2, z_order=4, group=3),
                Points([4.5, 5.5], label=0, group=3),

                PolyLine([1, 1.25, 3, 1, 4, 2]),
            ]),
        ], categories=['a', 'b', 'c'])

        expected = Dataset.from_iterable([
            DatasetItem(1, annotations=[
                # unique
                Bbox(1, 2, 3, 4, label=1),

                # common
                # nearest to mean bbox
                Mask(label=2, z_order=3, image=np.array([
                    [0, 0, 0, 0],
                    [0, 1, 1, 1],
                    [0, 1, 1, 1],
                    [0, 1, 1, 1],
                ])),
                Polygon([1, 0, 3, 2, 1, 2]),

                # an instance with keypoints
                Bbox(4, 5, 2, 4, label=2, z_order=4, group=1),
                Points([5, 6], label=0, group=1),
                Points([6, 8], label=1, group=1),

                PolyLine([1, 1.25, 3, 1, 4, 2]),
            ]),
        ], categories=['a', 'b', 'c'])

        merger = IntersectMerge(conf={'quorum': 1, 'pairwise_dist': 0.1})
        merged = merger([source0, source1, source2])

        compare_datasets(self, expected, merged, ignored_attrs={'score'})
        self.assertEqual(
            [
                NoMatchingAnnError(item_id=('1', DEFAULT_SUBSET_NAME),
                    sources={2}, ann=source0.get('1').annotations[5]),
                NoMatchingAnnError(item_id=('1', DEFAULT_SUBSET_NAME),
                    sources={1, 2}, ann=source0.get('1').annotations[0]),
            ],
            sorted((e for e in merger.errors
                    if isinstance(e, NoMatchingAnnError)),
                key=lambda e: len(e.sources))
        )
Пример #24
0
    def _load_annotations(item):
        parsed = item['annotations']
        loaded = []

        for ann in parsed:
            ann_id = ann.get('id')
            ann_type = AnnotationType[ann['type']]
            attributes = ann.get('attributes')
            group = ann.get('group')

            label_id = ann.get('label_id')
            z_order = ann.get('z_order')
            points = ann.get('points')

            if ann_type == AnnotationType.label:
                loaded.append(
                    Label(label=label_id,
                          id=ann_id,
                          attributes=attributes,
                          group=group))

            elif ann_type == AnnotationType.mask:
                rle = ann['rle']
                rle['counts'] = rle['counts'].encode('ascii')
                loaded.append(
                    RleMask(rle=rle,
                            label=label_id,
                            id=ann_id,
                            attributes=attributes,
                            group=group,
                            z_order=z_order))

            elif ann_type == AnnotationType.polyline:
                loaded.append(
                    PolyLine(points,
                             label=label_id,
                             id=ann_id,
                             attributes=attributes,
                             group=group,
                             z_order=z_order))

            elif ann_type == AnnotationType.polygon:
                loaded.append(
                    Polygon(points,
                            label=label_id,
                            id=ann_id,
                            attributes=attributes,
                            group=group,
                            z_order=z_order))

            elif ann_type == AnnotationType.bbox:
                x, y, w, h = ann['bbox']
                loaded.append(
                    Bbox(x,
                         y,
                         w,
                         h,
                         label=label_id,
                         id=ann_id,
                         attributes=attributes,
                         group=group,
                         z_order=z_order))

            elif ann_type == AnnotationType.points:
                loaded.append(
                    Points(points,
                           label=label_id,
                           id=ann_id,
                           attributes=attributes,
                           group=group,
                           z_order=z_order))

            elif ann_type == AnnotationType.caption:
                caption = ann.get('caption')
                loaded.append(
                    Caption(caption,
                            id=ann_id,
                            attributes=attributes,
                            group=group))

            elif ann_type == AnnotationType.cuboid_3d:
                loaded.append(
                    Cuboid3d(ann.get('position'),
                             ann.get('rotation'),
                             ann.get('scale'),
                             label=label_id,
                             id=ann_id,
                             attributes=attributes,
                             group=group))

            else:
                raise NotImplementedError()

        return loaded
Пример #25
0
    def _load_annotations(self, ann, image_info=None, parsed_annotations=None):
        if parsed_annotations is None:
            parsed_annotations = []

        ann_id = ann['id']

        attributes = ann.get('attributes', {})
        if 'score' in ann:
            attributes['score'] = ann['score']

        group = ann_id  # make sure all tasks' annotations are merged

        if self._task is CocoTask.instances or \
                self._task is CocoTask.person_keypoints or \
                self._task is CocoTask.stuff:
            label_id = self._get_label_id(ann)

            attributes['is_crowd'] = bool(ann['iscrowd'])

            if self._task is CocoTask.person_keypoints:
                keypoints = ann['keypoints']
                points = []
                visibility = []
                for x, y, v in take_by(keypoints, 3):
                    points.append(x)
                    points.append(y)
                    visibility.append(v)

                parsed_annotations.append(
                    Points(points,
                           visibility,
                           label=label_id,
                           id=ann_id,
                           attributes=attributes,
                           group=group))

            segmentation = ann['segmentation']
            if segmentation and segmentation != [[]]:
                rle = None

                if isinstance(segmentation, list):
                    if not self._merge_instance_polygons:
                        # polygon - a single object can consist of multiple parts
                        for polygon_points in segmentation:
                            parsed_annotations.append(
                                Polygon(points=polygon_points,
                                        label=label_id,
                                        id=ann_id,
                                        attributes=attributes,
                                        group=group))
                    else:
                        # merge all parts into a single mask RLE
                        rle = self._lazy_merged_mask(segmentation,
                                                     image_info['height'],
                                                     image_info['width'])
                elif isinstance(segmentation['counts'], list):
                    # uncompressed RLE
                    img_h = image_info['height']
                    img_w = image_info['width']
                    mask_h, mask_w = segmentation['size']
                    if img_h == mask_h and img_w == mask_w:
                        rle = self._lazy_merged_mask([segmentation], mask_h,
                                                     mask_w)
                    else:
                        log.warning(
                            "item #%s: mask #%s "
                            "does not match image size: %s vs. %s. "
                            "Skipping this annotation.", image_info['id'],
                            ann_id, (mask_h, mask_w), (img_h, img_w))
                else:
                    # compressed RLE
                    rle = segmentation

                if rle:
                    parsed_annotations.append(
                        RleMask(rle=rle,
                                label=label_id,
                                id=ann_id,
                                attributes=attributes,
                                group=group))
            else:
                x, y, w, h = ann['bbox']
                parsed_annotations.append(
                    Bbox(x,
                         y,
                         w,
                         h,
                         label=label_id,
                         id=ann_id,
                         attributes=attributes,
                         group=group))
        elif self._task is CocoTask.labels:
            label_id = self._get_label_id(ann)
            parsed_annotations.append(
                Label(label=label_id,
                      id=ann_id,
                      attributes=attributes,
                      group=group))
        elif self._task is CocoTask.captions:
            caption = ann['caption']
            parsed_annotations.append(
                Caption(caption, id=ann_id, attributes=attributes,
                        group=group))
        else:
            raise NotImplementedError()

        return parsed_annotations
    def test_can_import(self):
        img1 = np.ones((77, 102, 3)) * 255
        img1[6:32, 7:41] = 0

        mask1 = np.zeros((77, 102), dtype=int)
        mask1[67:69, 58:63] = 1

        mask2 = np.zeros((77, 102), dtype=int)
        mask2[13:25, 54:71] = [
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
            [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
            [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
            [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
            [0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0],
            [0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0],
            [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
            [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
            [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
            [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        ]

        target_dataset = Dataset.from_iterable([
            DatasetItem(id='example_folder/img1', image=img1,
                annotations=[
                    Polygon([43, 34, 45, 34, 45, 37, 43, 37],
                        label=0, id=0,
                        attributes={
                            'occluded': False,
                            'username': '******'
                        }
                    ),
                    Mask(mask1, label=1, id=1,
                        attributes={
                            'occluded': False,
                            'username': '******'
                        }
                    ),
                    Polygon([30, 12, 42, 21, 24, 26, 15, 22, 18, 14, 22, 12, 27, 12],
                        label=2, group=2, id=2,
                        attributes={
                            'a1': True,
                            'occluded': True,
                            'username': '******'
                        }
                    ),
                    Polygon([35, 21, 43, 22, 40, 28, 28, 31, 31, 22, 32, 25],
                        label=3, group=2, id=3,
                        attributes={
                            'kj': True,
                            'occluded': False,
                            'username': '******'
                        }
                    ),
                    Bbox(13, 19, 10, 11, label=4, group=2, id=4,
                        attributes={
                            'hg': True,
                            'occluded': True,
                            'username': '******'
                        }
                    ),
                    Mask(mask2, label=5, group=1, id=5,
                        attributes={
                            'd': True,
                            'occluded': False,
                            'username': '******'
                        }
                    ),
                    Polygon([64, 21, 74, 24, 72, 32, 62, 34, 60, 27, 62, 22],
                        label=6, group=1, id=6,
                        attributes={
                            'gfd lkj lkj hi': True,
                            'occluded': False,
                            'username': '******'
                        }
                    ),
                ]
            ),
        ], categories=[
            'window', 'license plate', 'o1', 'q1', 'b1', 'm1', 'hg',
        ])

        parsed = Dataset.import_from(DUMMY_DATASET_DIR, 'label_me')
        compare_datasets(self, expected=target_dataset, actual=parsed)
    def test_can_save_and_load(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id='dir1/1', subset='train',
                image=np.ones((16, 16, 3)),
                annotations=[
                    Bbox(0, 4, 4, 8, label=2, group=2),
                    Polygon([0, 4, 4, 4, 5, 6], label=3, attributes={
                        'occluded': True,
                        'a1': 'qwe',
                        'a2': True,
                        'a3': 123,
                        'a4': '42', # must be escaped and recognized as string
                        'escaped': 'a,b. = \\= \\\\ " \\" \\, \\',
                    }),
                    Mask(np.array([[0, 1], [1, 0], [1, 1]]), group=2,
                        attributes={ 'username': '******' }),
                    Bbox(1, 2, 3, 4, group=3),
                    Mask(np.array([[0, 0], [0, 0], [1, 1]]), group=3,
                        attributes={ 'occluded': True }
                    ),
                ]
            ),
        ], categories=['label_' + str(label) for label in range(10)])

        target_dataset = Dataset.from_iterable([
            DatasetItem(id='dir1/1', subset='train',
                image=np.ones((16, 16, 3)),
                annotations=[
                    Bbox(0, 4, 4, 8, label=0, group=2, id=0,
                        attributes={
                            'occluded': False, 'username': '',
                        }
                    ),
                    Polygon([0, 4, 4, 4, 5, 6], label=1, id=1,
                        attributes={
                            'occluded': True, 'username': '',
                            'a1': 'qwe',
                            'a2': True,
                            'a3': 123,
                            'a4': '42',
                            'escaped': 'a,b. = \\= \\\\ " \\" \\, \\',
                        }
                    ),
                    Mask(np.array([[0, 1], [1, 0], [1, 1]]), group=2,
                        id=2, attributes={
                            'occluded': False, 'username': '******'
                        }
                    ),
                    Bbox(1, 2, 3, 4, group=1, id=3, attributes={
                        'occluded': False, 'username': '',
                    }),
                    Mask(np.array([[0, 0], [0, 0], [1, 1]]), group=1,
                        id=4, attributes={
                            'occluded': True, 'username': ''
                        }
                    ),
                ]
            ),
        ], categories=['label_2', 'label_3'])

        with TestDir() as test_dir:
            self._test_save_and_load(
                source_dataset,
                partial(LabelMeConverter.convert, save_images=True),
                test_dir, target_dataset=target_dataset, require_images=True)
Пример #28
0
    def test_can_save_and_load(self):
        src_label_cat = LabelCategories(attributes={'occluded', 'common'})
        for i in range(10):
            src_label_cat.add(str(i))
        src_label_cat.items[2].attributes.update(['a1', 'a2', 'empty'])

        source_dataset = Dataset.from_iterable([
            DatasetItem(id=0, subset='s1', image=np.zeros((5, 10, 3)),
                annotations=[
                    Polygon([0, 0, 4, 0, 4, 4],
                        label=1, group=4,
                        attributes={ 'occluded': True, 'common': 't' }),
                    Points([1, 1, 3, 2, 2, 3],
                        label=2,
                        attributes={ 'a1': 'x', 'a2': 42, 'empty': '',
                            'unknown': 'bar' }),
                    Label(1),
                    Label(2, attributes={ 'a1': 'y', 'a2': 44 }),
                ]
            ),
            DatasetItem(id=1, subset='s1',
                annotations=[
                    PolyLine([0, 0, 4, 0, 4, 4],
                        label=3, id=4, group=4),
                    Bbox(5, 0, 1, 9,
                        label=3, id=4, group=4),
                ]
            ),

            DatasetItem(id=2, subset='s2', image=np.ones((5, 10, 3)),
                annotations=[
                    Polygon([0, 0, 4, 0, 4, 4], z_order=1,
                        label=3, group=4,
                        attributes={ 'occluded': False }),
                    PolyLine([5, 0, 9, 0, 5, 5]), # will be skipped as no label
                ]
            ),

            DatasetItem(id=3, subset='s3', image=Image(
                path='3.jpg', size=(2, 4))),
        ], categories={ AnnotationType.label: src_label_cat })

        target_label_cat = LabelCategories(
            attributes={'occluded'}) # unable to represent a common attribute
        for i in range(10):
            target_label_cat.add(str(i), attributes={'common'})
        target_label_cat.items[2].attributes.update(['a1', 'a2', 'empty', 'common'])
        target_dataset = Dataset.from_iterable([
            DatasetItem(id=0, subset='s1', image=np.zeros((5, 10, 3)),
                annotations=[
                    Polygon([0, 0, 4, 0, 4, 4],
                        label=1, group=4,
                        attributes={ 'occluded': True, 'common': 't' }),
                    Points([1, 1, 3, 2, 2, 3],
                        label=2,
                        attributes={ 'occluded': False, 'empty': '',
                            'a1': 'x', 'a2': '42' }),
                    Label(1),
                    Label(2, attributes={ 'a1': 'y', 'a2': '44' }),
                ], attributes={'frame': 0}
            ),
            DatasetItem(id=1, subset='s1',
                annotations=[
                    PolyLine([0, 0, 4, 0, 4, 4],
                        label=3, group=4,
                        attributes={ 'occluded': False }),
                    Bbox(5, 0, 1, 9,
                        label=3, group=4,
                        attributes={ 'occluded': False }),
                ], attributes={'frame': 1}
            ),

            DatasetItem(id=2, subset='s2', image=np.ones((5, 10, 3)),
                annotations=[
                    Polygon([0, 0, 4, 0, 4, 4], z_order=1,
                        label=3, group=4,
                        attributes={ 'occluded': False }),
                ], attributes={'frame': 0}
            ),

            DatasetItem(id=3, subset='s3', image=Image(
                    path='3.jpg', size=(2, 4)),
                attributes={'frame': 0}),
        ], categories={ AnnotationType.label: target_label_cat })

        with TestDir() as test_dir:
            self._test_save_and_load(source_dataset,
                partial(CvatConverter.convert, save_images=True), test_dir,
                target_dataset=target_dataset)
Пример #29
0
    def _load_localization_items(self):
        items = {}

        image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)
        if osp.isdir(image_dir):
            images = {
                osp.splitext(osp.relpath(p, image_dir))[0].replace('\\', '/'): p
                for p in find_images(image_dir, recursive=True)
            }
        else:
            images = {}

        for path in glob.iglob(
                osp.join(self._path, '**', '*.txt'), recursive=True):
            item_id = osp.splitext(osp.relpath(path, self._path))[0]
            if osp.basename(item_id).startswith('gt_'):
                item_id = osp.join(osp.dirname(item_id), osp.basename(item_id)[3:])
            item_id = item_id.replace('\\', '/')

            if item_id not in items:
                items[item_id] = DatasetItem(item_id, subset=self._subset,
                    image=images.get(item_id))
            annotations = items[item_id].annotations

            with open(path, encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    objects = line.split('\"')
                    if 1 < len(objects):
                        if len(objects) == 3:
                            text = objects[1]
                        else:
                            raise Exception("Line %s: unexpected number "
                                "of quotes in filename" % line)
                    else:
                        text = ''
                    objects = objects[0].split()
                    if len(objects) == 1:
                        objects = objects[0].split(',')

                    if 8 <= len(objects):
                        points = [float(p) for p in objects[:8]]

                        attributes = {}
                        if 0 < len(text):
                            attributes['text'] = text
                        elif len(objects) == 9:
                            text = objects[8]
                            attributes['text'] = text

                        annotations.append(
                            Polygon(points, attributes=attributes))
                    elif 4 <= len(objects):
                        x = float(objects[0])
                        y = float(objects[1])
                        w = float(objects[2]) - x
                        h = float(objects[3]) - y

                        attributes = {}
                        if 0 < len(text):
                            attributes['text'] = text
                        elif len(objects) == 5:
                            text = objects[4]
                            attributes['text'] = text

                        annotations.append(
                            Bbox(x, y, w, h, attributes=attributes))
        return items
Пример #30
0
    def _load_items(self, subset):
        labels = self._categories.setdefault(AnnotationType.label,
                                             LabelCategories())
        path = osp.join(self._path, subset)

        images = [i for i in find_images(path, recursive=True)]

        for image_path in sorted(images):
            item_id = osp.splitext(osp.relpath(image_path, path))[0]

            if Ade20k2020Path.MASK_PATTERN.fullmatch(osp.basename(item_id)):
                continue

            item_annotations = []
            item_info = self._load_item_info(image_path)
            for item in item_info:
                label_idx = labels.find(item['label_name'])[0]
                if label_idx is None:
                    labels.add(item['label_name'])

            mask_path = osp.splitext(image_path)[0] + '_seg.png'
            max_part_level = max([p['part_level'] for p in item_info])
            for part_level in range(max_part_level + 1):
                if not osp.exists(mask_path):
                    log.warning('Can`t find part level %s mask for %s' \
                        % (part_level, image_path))
                    continue

                mask = lazy_image(mask_path, loader=self._load_class_mask)
                mask = CompiledMask(instance_mask=mask)

                classes = {(v['class_idx'], v['label_name'])
                           for v in item_info if v['part_level'] == part_level}

                for class_idx, label_name in classes:
                    label_id = labels.find(label_name)[0]
                    item_annotations.append(
                        Mask(label=label_id,
                             id=class_idx,
                             image=mask.lazy_extract(class_idx),
                             group=class_idx,
                             z_order=part_level))

                mask_path = osp.splitext(image_path)[0] \
                    + '_parts_%s.png' % (part_level + 1)

            for item in item_info:
                instance_path = osp.join(osp.dirname(image_path),
                                         item['instance_mask'])
                if not osp.isfile(instance_path):
                    log.warning('Can`t find instance mask: %s' % instance_path)
                    continue

                mask = lazy_image(instance_path,
                                  loader=self._load_instance_mask)
                mask = CompiledMask(instance_mask=mask)

                label_id = labels.find(item['label_name'])[0]
                instance_id = item['id']
                attributes = {k: True for k in item['attributes']}
                polygon_points = item['polygon_points']

                item_annotations.append(
                    Mask(label=label_id,
                         image=mask.lazy_extract(1),
                         id=instance_id,
                         attributes=attributes,
                         z_order=item['part_level'],
                         group=instance_id))

                if (len(item['polygon_points']) % 2 == 0 \
                        and 3 <= len(item['polygon_points']) // 2):
                    item_annotations.append(
                        Polygon(polygon_points,
                                label=label_id,
                                attributes=attributes,
                                id=instance_id,
                                z_order=item['part_level'],
                                group=instance_id))

            self._items.append(
                DatasetItem(item_id,
                            subset=subset,
                            image=image_path,
                            annotations=item_annotations))