Ejemplo n.º 1
0
    def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
        source_dataset = Dataset.from_iterable(
            [
                DatasetItem(id='кириллица с пробелом',
                            subset='train',
                            image=np.ones((16, 16, 3)),
                            annotations=[
                                Polygon(
                                    [0, 4, 4, 4, 5, 6],
                                    label=3,
                                    attributes={
                                        'occluded': True,
                                        'a1': 'qwe',
                                        'a2': True,
                                        'a3': 123,
                                    }),
                            ]),
            ],
            categories={
                AnnotationType.label:
                LabelCategories.from_iterable('label_' + str(label)
                                              for label in range(10)),
            })

        target_dataset = Dataset.from_iterable(
            [
                DatasetItem(id='кириллица с пробелом',
                            subset='train',
                            image=np.ones((16, 16, 3)),
                            annotations=[
                                Polygon(
                                    [0, 4, 4, 4, 5, 6],
                                    label=0,
                                    id=0,
                                    attributes={
                                        'occluded': True,
                                        'username': '',
                                        'a1': 'qwe',
                                        'a2': True,
                                        'a3': 123,
                                    }),
                            ]),
            ],
            categories={
                AnnotationType.label:
                LabelCategories.from_iterable(['label_3']),
            })

        with TestDir() as test_dir:
            self._test_save_and_load(source_dataset,
                                     partial(LabelMeConverter.convert,
                                             save_images=True),
                                     test_dir,
                                     target_dataset=target_dataset,
                                     require_images=True)
Ejemplo n.º 2
0
    def test_id_from_image(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id=1, image='path.jpg'),
            DatasetItem(id=2),
        ])
        target_dataset = Dataset.from_iterable([
            DatasetItem(id='path', image='path.jpg'),
            DatasetItem(id=2),
        ])

        actual = transforms.IdFromImageName(source_dataset)
        compare_datasets(self, target_dataset, actual)
Ejemplo n.º 3
0
    def test_inplace_save_writes_only_updated_data_with_direct_changes(self):
        expected = Dataset.from_iterable([
            DatasetItem(1, subset='a', image=np.ones((1, 2, 3)),
                annotations=[
                    # Bbox(0, 0, 0, 0, label=1) # won't find removed anns
                ]),

            DatasetItem(2, subset='b', image=np.ones((3, 2, 3)),
                annotations=[
                    Bbox(0, 0, 0, 0, label=4, id=1, group=1, attributes={
                        'truncated': False,
                        'difficult': False,
                        'occluded': False,
                    })
                ]),
        ], categories={
            AnnotationType.label: LabelCategories.from_iterable(
                ['background', 'a', 'b', 'c', 'd']),
            AnnotationType.mask: MaskCategories(
                colormap=VOC.generate_colormap(5)),
        })

        dataset = Dataset.from_iterable([
            DatasetItem(1, subset='a', image=np.ones((1, 2, 3)),
                annotations=[Bbox(0, 0, 0, 0, label=1)]),
            DatasetItem(2, subset='b',
                annotations=[Bbox(0, 0, 0, 0, label=2)]),
            DatasetItem(3, subset='c', image=np.ones((2, 2, 3)),
                annotations=[
                    Bbox(0, 0, 0, 0, label=3),
                    Mask(np.ones((2, 2)), label=1)
                ]),
        ], categories=['a', 'b', 'c', 'd'])

        with TestDir() as path:
            dataset.export(path, 'voc', save_images=True)
            os.unlink(osp.join(path, 'Annotations', '1.xml'))
            os.unlink(osp.join(path, 'Annotations', '2.xml'))
            os.unlink(osp.join(path, 'Annotations', '3.xml'))

            dataset.put(DatasetItem(2, subset='b', image=np.ones((3, 2, 3)),
                annotations=[Bbox(0, 0, 0, 0, label=3)]))
            dataset.remove(3, 'c')
            dataset.save(save_images=True)

            self.assertEqual({'2.xml'}, # '1.xml' won't be touched
                set(os.listdir(osp.join(path, 'Annotations'))))
            self.assertEqual({'1.jpg', '2.jpg'},
                set(os.listdir(osp.join(path, 'JPEGImages'))))
            self.assertEqual({'a.txt', 'b.txt'},
                set(os.listdir(osp.join(path, 'ImageSets', 'Main'))))
            compare_datasets(self, expected, Dataset.import_from(path, 'voc'),
                require_images=True)
Ejemplo n.º 4
0
    def test_can_save_dataset_with_non_widerface_attributes(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id='a/b/1',
                        image=np.ones((8, 8, 3)),
                        annotations=[
                            Bbox(0, 2, 4, 2, label=0),
                            Bbox(0,
                                 1,
                                 2,
                                 3,
                                 label=0,
                                 attributes={
                                     'non-widerface attribute': '0',
                                     'blur': 1,
                                     'invalid': '1'
                                 }),
                            Bbox(1,
                                 1,
                                 2,
                                 2,
                                 label=0,
                                 attributes={'non-widerface attribute': '0'}),
                        ]),
        ],
                                               categories=['face'])

        target_dataset = Dataset.from_iterable([
            DatasetItem(id='a/b/1',
                        image=np.ones((8, 8, 3)),
                        annotations=[
                            Bbox(0, 2, 4, 2, label=0),
                            Bbox(0,
                                 1,
                                 2,
                                 3,
                                 label=0,
                                 attributes={
                                     'blur': '1',
                                     'invalid': '1'
                                 }),
                            Bbox(1, 1, 2, 2, label=0),
                        ]),
        ],
                                               categories=['face'])

        with TestDir() as test_dir:
            WiderFaceConverter.convert(source_dataset,
                                       test_dir,
                                       save_images=True)
            parsed_dataset = Dataset.import_from(test_dir, 'wider_face')

            compare_datasets(self, target_dataset, parsed_dataset)
Ejemplo n.º 5
0
    def test_inplace_save_writes_only_updated_data(self):
        src_mask_cat = MaskCategories.generate(3, include_background=False)

        expected = Dataset.from_iterable(
            [
                DatasetItem(1,
                            subset='a',
                            image=np.ones((2, 1, 3)),
                            annotations=[Mask(np.ones((2, 1)), label=2)]),
                DatasetItem(2, subset='a', image=np.ones((3, 2, 3))),
                DatasetItem(2, subset='b'),
            ],
            categories=Camvid.make_camvid_categories(
                OrderedDict([
                    ('background', (0, 0, 0)),
                    ('a', src_mask_cat.colormap[0]),
                    ('b', src_mask_cat.colormap[1]),
                ])))

        with TestDir() as path:
            dataset = Dataset.from_iterable(
                [
                    DatasetItem(1,
                                subset='a',
                                image=np.ones((2, 1, 3)),
                                annotations=[Mask(np.ones((2, 1)), label=1)]),
                    DatasetItem(2, subset='b'),
                    DatasetItem(3,
                                subset='c',
                                image=np.ones((2, 2, 3)),
                                annotations=[Mask(np.ones((2, 2)), label=0)]),
                ],
                categories={
                    AnnotationType.label:
                    LabelCategories.from_iterable(['a', 'b']),
                    AnnotationType.mask: src_mask_cat
                })
            dataset.export(path, 'camvid', save_images=True)

            dataset.put(DatasetItem(2, subset='a', image=np.ones((3, 2, 3))))
            dataset.remove(3, 'c')
            dataset.save(save_images=True)

            self.assertEqual(
                {'a', 'aannot', 'a.txt', 'b.txt', 'label_colors.txt'},
                set(os.listdir(path)))
            self.assertEqual({'1.jpg', '2.jpg'},
                             set(os.listdir(osp.join(path, 'a'))))
            compare_datasets(self,
                             expected,
                             Dataset.import_from(path, 'camvid'),
                             require_images=True)
Ejemplo n.º 6
0
    def test_can_remove_items_by_ids(self):
        expected = Dataset.from_iterable([
            DatasetItem(id='1', subset='train')
        ])

        dataset = Dataset.from_iterable([
            DatasetItem(id='1', subset='train'),
            DatasetItem(id='2', subset='train')
        ])

        actual = transforms.RemoveItems(dataset, ids=[('2', 'train')])

        compare_datasets(self, expected, actual)
Ejemplo n.º 7
0
    def test_transform_fails_on_inplace_update_without_overwrite(self):
        with TestDir() as test_dir:
            Dataset.from_iterable([
                DatasetItem(id=1, annotations=[Bbox(1, 2, 3, 4, label=1)]),
            ],
                                  categories=['a',
                                              'b']).export(test_dir, 'coco')

            run(self,
                'transform',
                '-t',
                'random_split',
                test_dir + ':coco',
                expected_code=1)
Ejemplo n.º 8
0
    def test_can_remove_annotations_in_dataset(self):
        expected = Dataset.from_iterable([
            DatasetItem(id='1', subset='test'),
            DatasetItem(id='2', subset='test'),
        ], categories=['a', 'b'])

        dataset = Dataset.from_iterable([
            DatasetItem(id='1', subset='test', annotations=[Label(0)]),
            DatasetItem(id='2', subset='test', annotations=[Label(1)]),
        ], categories=['a', 'b'])

        actual = transforms.RemoveAnnotations(dataset)

        compare_datasets(self, expected, actual)
Ejemplo n.º 9
0
    def test_inplace_save_writes_only_updated_data(self):
        expected = Dataset.from_iterable([
            DatasetItem(1,
                        subset='a',
                        image=np.ones((2, 1, 3)),
                        annotations=[Label(0)]),
            DatasetItem(2,
                        subset='a',
                        image=np.ones((3, 2, 3)),
                        annotations=[Label(1)]),
            DatasetItem(2,
                        subset='b',
                        image=np.ones((2, 2, 3)),
                        annotations=[Label(1)]),
        ],
                                         categories=['a', 'b', 'c', 'd'])

        dataset = Dataset.from_iterable([
            DatasetItem(1,
                        subset='a',
                        image=np.ones((2, 1, 3)),
                        annotations=[Label(0)]),
            DatasetItem(2,
                        subset='b',
                        image=np.ones((2, 2, 3)),
                        annotations=[Label(1)]),
            DatasetItem(3,
                        subset='c',
                        image=np.ones((2, 3, 3)),
                        annotations=[Label(2)]),
        ],
                                        categories=['a', 'b', 'c', 'd'])

        with TestDir() as path:
            dataset.export(path, 'cifar', save_images=True)

            dataset.put(
                DatasetItem(2,
                            subset='a',
                            image=np.ones((3, 2, 3)),
                            annotations=[Label(1)]))
            dataset.remove(3, 'c')
            dataset.save(save_images=True)

            self.assertEqual({'a', 'b', 'batches.meta'}, set(os.listdir(path)))
            compare_datasets(self,
                             expected,
                             Dataset.import_from(path, 'cifar'),
                             require_images=True)
Ejemplo n.º 10
0
    def test_can_merge_categories(self):
        source0 = Dataset.from_iterable([
            DatasetItem(1, annotations=[ Label(0), ]),
        ], categories={
            AnnotationType.label: LabelCategories.from_iterable(['a', 'b']),
            AnnotationType.points: PointsCategories.from_iterable([
                (0, ['l0', 'l1']),
                (1, ['l2', 'l3']),
            ]),
            AnnotationType.mask: MaskCategories({
                0: (0, 1, 2),
                1: (1, 2, 3),
            }),
        })

        source1 = Dataset.from_iterable([
            DatasetItem(1, annotations=[ Label(0), ]),
        ], categories={
            AnnotationType.label: LabelCategories.from_iterable(['c', 'b']),
            AnnotationType.points: PointsCategories.from_iterable([
                (0, []),
                (1, ['l2', 'l3']),
            ]),
            AnnotationType.mask: MaskCategories({
                0: (0, 2, 4),
                1: (1, 2, 3),
            }),
        })

        expected = Dataset.from_iterable([
            DatasetItem(1, annotations=[ Label(0), Label(2), ]),
        ], categories={
            AnnotationType.label: LabelCategories.from_iterable(['a', 'b', 'c']),
            AnnotationType.points: PointsCategories.from_iterable([
                (0, ['l0', 'l1']),
                (1, ['l2', 'l3']),
                (2, []),
            ]),
            AnnotationType.mask: MaskCategories({
                0: (0, 1, 2),
                1: (1, 2, 3),
                2: (0, 2, 4),
            }),
        })

        merger = IntersectMerge()
        merged = merger([source0, source1])

        compare_datasets(self, expected, merged, ignored_attrs={'score'})
Ejemplo n.º 11
0
    def test_can_crop_covered_segments(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(
                id=1,
                image=np.zeros((5, 5, 3)),
                annotations=[
                    Mask(np.array(
                        [[0, 0, 1, 1, 1], [0, 0, 1, 1, 1], [1, 1, 0, 1, 1],
                         [1, 1, 1, 0, 0], [1, 1, 1, 0, 0]], ),
                         label=2,
                         id=1,
                         z_order=0),
                    Polygon([1, 1, 4, 1, 4, 4, 1, 4], label=1, id=2,
                            z_order=1),
                ]),
        ],
                                               categories=[
                                                   str(i) for i in range(10)
                                               ])

        target_dataset = Dataset.from_iterable([
            DatasetItem(id=1,
                        image=np.zeros((5, 5, 3)),
                        annotations=[
                            Mask(np.array([[0, 0, 1, 1, 1], [0, 0, 0, 0, 1],
                                           [1, 0, 0, 0, 1], [1, 0, 0, 0, 0],
                                           [1, 1, 1, 0, 0]], ),
                                 attributes={'is_crowd': True},
                                 label=2,
                                 id=1,
                                 group=1),
                            Polygon([1, 1, 4, 1, 4, 4, 1, 4],
                                    label=1,
                                    id=2,
                                    group=2,
                                    attributes={'is_crowd': False}),
                        ],
                        attributes={'id': 1}),
        ],
                                               categories=[
                                                   str(i) for i in range(10)
                                               ])

        with TestDir() as test_dir:
            self._test_save_and_load(source_dataset,
                                     partial(CocoInstancesConverter.convert,
                                             crop_covered=True),
                                     test_dir,
                                     target_dataset=target_dataset)
Ejemplo n.º 12
0
    def test_reindex(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id='some/name1', image=np.ones((4, 2, 3)),
                attributes={'frame': 40}),
        ])

        expected_dataset = Dataset.from_iterable([
            DatasetItem(id='some/name1', image=np.ones((4, 2, 3)),
                attributes={'frame': 0}),
        ], categories=[])

        with TestDir() as test_dir:
            self._test_save_and_load(source_dataset,
                partial(CvatConverter.convert, reindex=True), test_dir,
                target_dataset=expected_dataset)
Ejemplo n.º 13
0
    def test_reindex(self):
        source = Dataset.from_iterable([
            DatasetItem(id=10),
            DatasetItem(id=10, subset='train'),
            DatasetItem(id='a', subset='val'),
        ])

        expected = Dataset.from_iterable([
            DatasetItem(id=5),
            DatasetItem(id=6, subset='train'),
            DatasetItem(id=7, subset='val'),
        ])

        actual = transforms.Reindex(source, start=5)
        compare_datasets(self, expected, actual)
Ejemplo n.º 14
0
    def test_can_save_and_load_captions(self):
        expected_dataset = Dataset.from_iterable([
            DatasetItem(id=1,
                        subset='train',
                        annotations=[
                            Caption('hello', id=1, group=1),
                            Caption('world', id=2, group=2),
                        ],
                        attributes={'id': 1}),
            DatasetItem(id=2,
                        subset='train',
                        annotations=[
                            Caption('test', id=3, group=3),
                        ],
                        attributes={'id': 2}),
            DatasetItem(id=3,
                        subset='val',
                        annotations=[
                            Caption('word', id=1, group=1),
                        ],
                        attributes={'id': 1}),
        ])

        with TestDir() as test_dir:
            self._test_save_and_load(expected_dataset,
                                     CocoCaptionsConverter.convert, test_dir)
Ejemplo n.º 15
0
    def test_can_convert_voc_to_yolo(self):
        target_dataset = Dataset.from_iterable(
            [
                DatasetItem(id='2007_000001',
                            subset='train',
                            image=np.ones((10, 20, 3)),
                            annotations=[
                                Bbox(1.0, 2.0, 2.0, 2.0, label=8),
                                Bbox(4.0, 5.0, 2.0, 2.0, label=15),
                                Bbox(5.5, 6, 2, 2, label=22),
                            ])
            ],
            categories=[
                label.name
                for label in VOC.make_voc_categories()[AnnotationType.label]
            ])

        with TestDir() as test_dir:
            voc_dir = osp.join(
                __file__[:__file__.rfind(osp.join('tests', ''))], 'tests',
                'assets', 'voc_dataset', 'voc_dataset1')
            yolo_dir = osp.join(test_dir, 'yolo_dir')

            run(self, 'convert', '-if', 'voc', '-i', voc_dir, '-f', 'yolo',
                '-o', yolo_dir, '--', '--save-images')

            parsed_dataset = Dataset.import_from(yolo_dir, format='yolo')
            compare_datasets(self,
                             target_dataset,
                             parsed_dataset,
                             require_images=True)
Ejemplo n.º 16
0
    def test_can_save_and_load_yolo_dataset(self):
        target_dataset = Dataset.from_iterable(
            [
                DatasetItem(id='1',
                            subset='train',
                            image=np.ones((10, 15, 3)),
                            annotations=[
                                Bbox(3.0, 3.0, 2.0, 3.0, label=4),
                                Bbox(0.0, 2.0, 4.0, 2.0, label=2)
                            ])
            ],
            categories=['label_' + str(i) for i in range(10)])

        with TestDir() as test_dir:
            yolo_dir = osp.join(
                __file__[:__file__.rfind(osp.join('tests', ''))], 'tests',
                'assets', 'yolo_dataset')

            run(self, 'create', '-o', test_dir)
            run(self, 'import', '-p', test_dir, '-f', 'yolo', yolo_dir)

            export_dir = osp.join(test_dir, 'export_dir')
            run(self, 'export', '-p', test_dir, '-o', export_dir, '-f', 'yolo',
                '--', '--save-images')

            parsed_dataset = Dataset.import_from(export_dir, format='yolo')
            compare_datasets(self, target_dataset, parsed_dataset)
Ejemplo n.º 17
0
    def test_can_delete_labels_from_yolo_dataset(self):
        target_dataset = Dataset.from_iterable([
            DatasetItem(id='1',
                        subset='train',
                        image=np.ones((10, 15, 3)),
                        annotations=[Bbox(0.0, 2.0, 4.0, 2.0, label=0)])
        ],
                                               categories=['label_2'])

        with TestDir() as test_dir:
            yolo_dir = osp.join(
                __file__[:__file__.rfind(osp.join('tests', ''))], 'tests',
                'assets', 'yolo_dataset')

            run(self, 'create', '-o', test_dir)
            run(self, 'import', '-p', test_dir, '-f', 'yolo', yolo_dir)

            run(self, 'filter', '-p', test_dir, '-m', 'i+a', '-e',
                "/item/annotation[label='label_2']")

            run(self, 'transform', '-p', test_dir, '-t', 'remap_labels', '--',
                '-l', 'label_2:label_2', '--default', 'delete')

            export_dir = osp.join(test_dir, 'export')
            run(self, 'export', '-p', test_dir, '-o', export_dir, '-f', 'yolo',
                '--', '--save-image')

            parsed_dataset = Dataset.import_from(export_dir, format='yolo')
            compare_datasets(self, target_dataset, parsed_dataset)
Ejemplo n.º 18
0
    def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id='кириллица с пробелом',
                        image=np.ones((8, 8, 3)),
                        annotations=[
                            Bbox(0,
                                 1,
                                 2,
                                 3,
                                 label=0,
                                 attributes={
                                     'blur': '2',
                                     'expression': '0',
                                     'illumination': '0',
                                     'occluded': '0',
                                     'pose': '2',
                                     'invalid': '0'
                                 }),
                        ]),
        ],
                                               categories=['face'])

        with TestDir() as test_dir:
            WiderFaceConverter.convert(source_dataset,
                                       test_dir,
                                       save_images=True)
            parsed_dataset = Dataset.import_from(test_dir, 'wider_face')

            compare_datasets(self,
                             source_dataset,
                             parsed_dataset,
                             require_images=True)
Ejemplo n.º 19
0
    def test_convert_from_voc_format(self):
        """
        <b>Description:</b>
        Ensure that the dataset can be converted from VOC format with
        command `datum convert`.

        <b>Expected results:</b>
        A ImageNet dataset that matches the expected dataset.

        <b>Steps:</b>
        1. Get path to the source dataset from assets.
        2. Convert source dataset to LabelMe format, using the `convert` command.
        3. Verify that resulting dataset is equal to the expected dataset.
        """

        labels = sorted([l.name for l in VOC.VocLabel if l.value % 2 == 1])

        expected_dataset = Dataset.from_iterable([
            DatasetItem(id='/'.join([label, '2007_000001']),
                subset='default', annotations=[Label(i)])
                for i, label in enumerate(labels)
            ] + [DatasetItem(id='no_label/2007_000002', subset='default',
                   image=np.ones((10, 20, 3)))
            ], categories=labels
        )

        voc_dir = osp.join(DUMMY_DATASETS_DIR, 'voc_dataset1')
        with TestDir() as test_dir:
            imagenet_dir = osp.join(test_dir, 'imagenet')
            run(self, 'convert', '-if', 'voc', '-i', voc_dir,
                '-f', 'imagenet', '-o', imagenet_dir, '--', '--save-image')

            target_dataset = Dataset.import_from(imagenet_dir, format='imagenet')
            compare_datasets(self, expected_dataset, target_dataset,
                require_images=True)
Ejemplo n.º 20
0
    def test_can_save_and_load_with_no_save_images(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id='1',
                        subset='train',
                        image=np.ones((8, 8, 3)),
                        annotations=[
                            Bbox(0, 2, 4, 2, label=1),
                            Bbox(0,
                                 1,
                                 2,
                                 3,
                                 label=0,
                                 attributes={
                                     'blur': '2',
                                     'expression': '0',
                                     'illumination': '0',
                                     'occluded': '0',
                                     'pose': '2',
                                     'invalid': '0'
                                 }),
                            Label(1),
                        ])
        ],
                                               categories=['face', 'label_0'])

        with TestDir() as test_dir:
            WiderFaceConverter.convert(source_dataset,
                                       test_dir,
                                       save_images=False)
            parsed_dataset = Dataset.import_from(test_dir, 'wider_face')

            compare_datasets(self, source_dataset, parsed_dataset)
Ejemplo n.º 21
0
    def test_transform_fails_on_inplace_update_of_stage(self):
        with TestDir() as test_dir:
            dataset_url = osp.join(test_dir, 'dataset')
            dataset = Dataset.from_iterable([
                DatasetItem(id=1, annotations=[Bbox(1, 2, 3, 4, label=1)]),
            ],
                                            categories=['a', 'b'])
            dataset.export(dataset_url, 'coco', save_images=True)

            project_dir = osp.join(test_dir, 'proj')
            with Project.init(project_dir) as project:
                project.import_source('source-1',
                                      dataset_url,
                                      'coco',
                                      no_cache=True)
                project.commit('first commit')

            with self.subTest('without overwrite'):
                run(self,
                    'transform',
                    '-p',
                    project_dir,
                    '-t',
                    'random_split',
                    'HEAD:source-1',
                    expected_code=1)

            with self.subTest('with overwrite'):
                with self.assertRaises(ReadonlyDatasetError):
                    run(self, 'transform', '-p', project_dir, '--overwrite',
                        '-t', 'random_split', 'HEAD:source-1')
Ejemplo n.º 22
0
    def test_can_save_and_load_with_meta_file(self):
        dataset = Dataset.from_iterable([
            DatasetItem(id='a',
                        image=np.ones((5, 5, 3)),
                        annotations=[
                            Bbox(1,
                                 2,
                                 3,
                                 4,
                                 label=0,
                                 group=1,
                                 attributes={'score': 1.0}),
                            Mask(label=1,
                                 group=0,
                                 image=np.ones((5, 5)),
                                 attributes={'box_id': '00000000'})
                        ])
        ],
                                        categories=['label_0', 'label_1'])

        with TestDir() as test_dir:
            OpenImagesConverter.convert(dataset,
                                        test_dir,
                                        save_images=True,
                                        save_dataset_meta=True)

            parsed_dataset = Dataset.import_from(test_dir, 'open_images')

            self.assertTrue(osp.isfile(osp.join(test_dir,
                                                'dataset_meta.json')))
            compare_datasets(self,
                             dataset,
                             parsed_dataset,
                             require_images=True)
Ejemplo n.º 23
0
    def test_can_import_captions(self):
        expected_dataset = Dataset.from_iterable([
            DatasetItem(id=1,
                        subset='train',
                        annotations=[
                            Caption('hello', id=1, group=1),
                            Caption('world', id=2, group=2),
                        ],
                        attributes={'id': 1}),
            DatasetItem(id=2,
                        subset='train',
                        annotations=[
                            Caption('test', id=3, group=3),
                        ],
                        attributes={'id': 2}),
            DatasetItem(id=3,
                        subset='val',
                        annotations=[
                            Caption('word', id=1, group=1),
                        ],
                        attributes={'id': 1}),
        ])

        dataset = Dataset.import_from(
            osp.join(DUMMY_DATASET_DIR, 'coco_captions'), 'coco')

        compare_datasets(self, expected_dataset, dataset)
Ejemplo n.º 24
0
    def test_can_save_and_load_voc_segmentation_dataset(self):
        expected_dataset = Dataset.from_iterable([
            DatasetItem(id='2007_000001', subset='train',
                image=np.ones((10, 20, 3)),
                annotations=[
                    Mask(image=np.ones([10, 20]), label=2, group=1)
                ]),

            DatasetItem(id='2007_000002', subset='test',
                image=np.ones((10, 20, 3))),
        ], categories=VOC.make_voc_categories())

        dataset_dir = osp.join(DUMMY_DATASETS_DIR, 'voc_dataset1')
        rpath = osp.join('ImageSets', 'Segmentation', 'train.txt')
        matrix = [
            ('voc_segmentation', '', ''),
            ('voc_segmentation', 'train', rpath),
            ('voc', 'train', rpath),
        ]
        for format, subset, path in matrix:
            with self.subTest(format=format, subset=subset, path=path):
                if subset:
                    expected = expected_dataset.get_subset(subset)
                else:
                    expected = expected_dataset

                with TestDir() as test_dir:
                    self._test_can_save_and_load(test_dir, dataset_dir,
                        expected, format, result_path=path, label_map='voc')
Ejemplo n.º 25
0
    def test_can_import_instances(self):
        expected_dataset = Dataset.from_iterable([
            DatasetItem(id='000000000001',
                        image=np.ones((10, 5, 3)),
                        subset='val',
                        attributes={'id': 1},
                        annotations=[
                            Polygon([0, 0, 1, 0, 1, 2, 0, 2],
                                    label=0,
                                    id=1,
                                    group=1,
                                    attributes={
                                        'is_crowd': False,
                                        'x': 1,
                                        'y': 'hello'
                                    }),
                            Mask(np.array([[1, 0, 0, 1, 0]] * 5 +
                                          [[1, 1, 1, 1, 0]] * 5),
                                 label=0,
                                 id=2,
                                 group=2,
                                 attributes={'is_crowd': True}),
                        ]),
        ],
                                                 categories=[
                                                     'TEST',
                                                 ])

        dataset = Dataset.import_from(
            osp.join(DUMMY_DATASET_DIR, 'coco_instances'), 'coco')

        compare_datasets(self, expected_dataset, dataset)
Ejemplo n.º 26
0
    def test_dataset_with_save_dataset_meta_file(self):
        source_dataset = Dataset.from_iterable(
            [
                DatasetItem(id='a/b/1',
                            image=np.ones((8, 8, 3)),
                            subset='train',
                            annotations=[
                                Bbox(0, 2, 4, 2, label=2),
                                Bbox(0,
                                     1,
                                     2,
                                     3,
                                     label=1,
                                     attributes={
                                         'blur': '2',
                                         'expression': '0',
                                         'illumination': '0',
                                         'occluded': '0',
                                         'pose': '2',
                                         'invalid': '0'
                                     }),
                            ]),
            ],
            categories=['face', 'label_0', 'label_1'])

        with TestDir() as test_dir:
            WiderFaceConverter.convert(source_dataset,
                                       test_dir,
                                       save_images=True,
                                       save_dataset_meta=True)
            parsed_dataset = Dataset.import_from(test_dir, 'wider_face')

            self.assertTrue(osp.isfile(osp.join(test_dir,
                                                'dataset_meta.json')))
            compare_datasets(self, source_dataset, parsed_dataset)
Ejemplo n.º 27
0
    def test_can_save_and_load_image_with_arbitrary_extension(self):
        expected = Dataset.from_iterable([
            DatasetItem('1',
                        image=Image(path='1.JPEG', data=np.zeros((4, 3, 3))),
                        annotations=[
                            Bbox(0,
                                 4,
                                 4,
                                 8,
                                 label=0,
                                 attributes={
                                     'occluded': True,
                                     'visibility': 0.0,
                                     'ignored': False,
                                 }),
                        ]),
            DatasetItem(
                '2',
                image=Image(path='2.bmp', data=np.zeros((3, 4, 3))),
            ),
        ],
                                         categories=['a'])

        with TestDir() as test_dir:
            self._test_save_and_load(expected,
                                     partial(MotSeqGtConverter.convert,
                                             save_images=True),
                                     test_dir,
                                     require_images=True)
Ejemplo n.º 28
0
    def test_dataset_with_save_dataset_meta_file(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[
                Mask(np.array([[1, 0, 0, 1, 1]]), label=0),
                Mask(np.array([[0, 1, 1, 0, 0]]), label=1),
            ]),
        ], categories=['a', 'b'])

        class DstExtractor(TestExtractorBase):
            def __iter__(self):
                yield DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[
                    Mask(np.array([[1, 0, 0, 1, 1]]),
                        attributes={'is_crowd': False}, id=1,
                        label=self._label('a')),
                    Mask(np.array([[0, 1, 1, 0, 0]]),
                        attributes={'is_crowd': False}, id=2,
                        label=self._label('b')),
                ])

            def categories(self):
                label_map = OrderedDict()
                label_map['background'] = None
                label_map['a'] = None
                label_map['b'] = None
                return Cityscapes.make_cityscapes_categories(label_map)

        with TestDir() as test_dir:
            self._test_save_and_load(source_dataset,
                partial(CityscapesConverter.convert, label_map='source',
                    save_images=True, save_dataset_meta=True), test_dir,
                    target_dataset=DstExtractor())
            self.assertTrue(osp.isfile(osp.join(test_dir, 'dataset_meta.json')))
Ejemplo n.º 29
0
    def test_inplace_save_writes_only_updated_data(self):
        with TestDir() as path:
            # generate initial dataset
            dataset = Dataset.from_iterable([
                DatasetItem(1, subset='a'),
                DatasetItem(2, subset='b'),
                DatasetItem(3, subset='c', image=np.ones((2, 2, 3))),
            ])
            dataset.export(path, 'coco', save_images=True)
            os.unlink(osp.join(path, 'annotations', 'image_info_a.json'))
            os.unlink(osp.join(path, 'annotations', 'image_info_b.json'))
            os.unlink(osp.join(path, 'annotations', 'image_info_c.json'))
            self.assertFalse(osp.isfile(osp.join(path, 'images', 'b',
                                                 '2.jpg')))
            self.assertTrue(osp.isfile(osp.join(path, 'images', 'c', '3.jpg')))

            dataset.put(DatasetItem(2, subset='a', image=np.ones((3, 2, 3))))
            dataset.remove(3, 'c')
            dataset.save(save_images=True)

            self.assertTrue(
                osp.isfile(osp.join(path, 'annotations', 'image_info_a.json')))
            self.assertFalse(
                osp.isfile(osp.join(path, 'annotations', 'image_info_b.json')))
            self.assertFalse(
                osp.isfile(osp.join(path, 'annotations', 'image_info_c.json')))
            self.assertTrue(osp.isfile(osp.join(path, 'images', 'a', '2.jpg')))
            self.assertFalse(osp.isfile(osp.join(path, 'images', 'c',
                                                 '3.jpg')))
Ejemplo n.º 30
0
    def test_can_import(self):
        expected_dataset = Dataset.from_iterable(
            [
                DatasetItem(id=1,
                            image=np.ones((16, 16, 3)),
                            annotations=[
                                Bbox(0,
                                     4,
                                     4,
                                     8,
                                     label=2,
                                     attributes={
                                         'occluded': False,
                                         'visibility': 1.0,
                                         'ignored': False,
                                     }),
                            ]),
            ],
            categories={
                AnnotationType.label:
                LabelCategories.from_iterable('label_' + str(label)
                                              for label in range(10)),
            })

        dataset = Dataset.import_from(DUMMY_DATASET_DIR, 'mot_seq')

        compare_datasets(self, expected_dataset, dataset)