def test_can_catch_pickle_exception(self):
     with TestDir() as test_dir:
         anno_file = osp.join(test_dir, 'test')
         with open(anno_file, 'wb') as file:
             pickle.dump(enumerate([1, 2, 3]), file)
         with self.assertRaisesRegex(pickle.UnpicklingError, "Global"):
             Dataset.import_from(test_dir, 'cifar')
    def test_can_convert(self):
        source_dataset = Dataset.import_from(DUMMY_DATASET_DIR, 'label_me')
        with TestDir() as test_dir:
            LabelMeConverter.convert(source_dataset, test_dir, save_images=True)
            parsed_dataset = Dataset.import_from(test_dir, 'label_me')

            compare_datasets(self, source_dataset, parsed_dataset,
                require_images=True)
Beispiel #3
0
def _import(src_file, task_data):
    with TemporaryDirectory() as tmp_dir:
        zipfile.ZipFile(src_file).extractall(tmp_dir)
        if glob(osp.join(tmp_dir, '*.txt')):
            dataset = Dataset.import_from(tmp_dir, 'imagenet_txt', env=dm_env)
        else:
            dataset = Dataset.import_from(tmp_dir, 'imagenet', env=dm_env)
        import_dm_annotations(dataset, task_data)
Beispiel #4
0
def _import(src_file, instance_data, load_data_callback=None):
    with TemporaryDirectory() as tmp_dir:
        zipfile.ZipFile(src_file).extractall(tmp_dir)
        if glob(osp.join(tmp_dir, '*.txt')):
            dataset = Dataset.import_from(tmp_dir, 'imagenet_txt', env=dm_env)
        else:
            dataset = Dataset.import_from(tmp_dir, 'imagenet', env=dm_env)
            if load_data_callback is not None:
                load_data_callback(dataset, instance_data)
        import_dm_annotations(dataset, instance_data)
Beispiel #5
0
def _import(src_file, task_data):
    if zipfile.is_zipfile(src_file):
        with TemporaryDirectory() as tmp_dir:
            zipfile.ZipFile(src_file).extractall(tmp_dir)

            dataset = Dataset.import_from(tmp_dir, 'kitti_raw', env=dm_env)
            import_dm_annotations(dataset, task_data)
    else:

        dataset = Dataset.import_from(src_file.name, 'kitti_raw', env=dm_env)
        import_dm_annotations(dataset, task_data)
Beispiel #6
0
def _import(src_file, instance_data):
    if zipfile.is_zipfile(src_file):
        with TemporaryDirectory() as tmp_dir:
            zipfile.ZipFile(src_file).extractall(tmp_dir)

            dataset = Dataset.import_from(tmp_dir, 'coco', env=dm_env)
            import_dm_annotations(dataset, instance_data)
    else:
        dataset = Dataset.import_from(src_file.name,
            'coco_instances', env=dm_env)
        import_dm_annotations(dataset, instance_data)
Beispiel #7
0
def _import(src_file, instance_data, load_data_callback=None):

    with TemporaryDirectory() as tmp_dir:
        if zipfile.is_zipfile(src_file):
            zipfile.ZipFile(src_file).extractall(tmp_dir)

            dataset = Dataset.import_from(tmp_dir, 'sly_pointcloud', env=dm_env)
        else:
            dataset = Dataset.import_from(src_file.name,
                                        'sly_pointcloud', env=dm_env)
        if load_data_callback is not None:
            load_data_callback(dataset, instance_data)
        import_dm_annotations(dataset, instance_data)
Beispiel #8
0
def _import(src_file, instance_data):
    with TemporaryDirectory() as tmp_dir:
        zipfile.ZipFile(src_file).extractall(tmp_dir)

        dataset = Dataset.import_from(tmp_dir, 'vgg_face2', env=dm_env)
        dataset.transform('rename', r"|([^/]+/)?(.+)|\2|")
        import_dm_annotations(dataset, instance_data)
    def test_can_import_10(self):
        expected_dataset = Dataset.from_iterable(
            [
                DatasetItem(id='image_1',
                            subset='data_batch_1',
                            image=np.ones((32, 32, 3)),
                            annotations=[Label(0)]),
                DatasetItem(id='image_2',
                            subset='test_batch',
                            image=np.ones((32, 32, 3)),
                            annotations=[Label(1)]),
                DatasetItem(id='image_3',
                            subset='test_batch',
                            image=np.ones((32, 32, 3)),
                            annotations=[Label(3)]),
                DatasetItem(id='image_4',
                            subset='test_batch',
                            image=np.ones((32, 32, 3)),
                            annotations=[Label(2)]),
                DatasetItem(id='image_5',
                            subset='test_batch',
                            image=np.array([[[1, 2, 3], [4, 5, 6]],
                                            [[1, 2, 3], [4, 5, 6]]]),
                            annotations=[Label(3)])
            ],
            categories=['airplane', 'automobile', 'bird', 'cat'])

        dataset = Dataset.import_from(DUMMY_10_DATASET_DIR, 'cifar')

        compare_datasets(self, expected_dataset, dataset, require_images=True)
    def test_can_save_and_load_with_multiple_labels(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id='1',
                image=np.ones((8, 8, 3)),
                annotations=[Label(0), Label(1)]
            ),
            DatasetItem(id='2',
                image=np.ones((8, 8, 3))
            ),
        ], categories={
            AnnotationType.label: LabelCategories.from_iterable(
                'label_' + str(label) for label in range(2)),
        })

        excepted_dataset = Dataset.from_iterable([
            DatasetItem(id='label_0/1',
                image=np.ones((8, 8, 3)),
                annotations=[Label(0)]
            ),
            DatasetItem(id='label_1/1',
                image=np.ones((8, 8, 3)),
                annotations=[Label(1)]
            ),
            DatasetItem(id='no_label/2',
                image=np.ones((8, 8, 3))
            ),
        ], categories=['label_0', 'label_1'])

        with TestDir() as test_dir:
            ImagenetConverter.convert(source_dataset, test_dir, save_images=True)

            parsed_dataset = Dataset.import_from(test_dir, 'imagenet')

            compare_datasets(self, excepted_dataset, parsed_dataset,
                require_images=True)
Beispiel #11
0
def _import(src_file, instance_data, load_data_callback=None):
    with TemporaryDirectory() as tmp_dir:
        Archive(src_file.name).extractall(tmp_dir)

        # put label map from the task if not present
        labelmap_file = osp.join(tmp_dir, 'labelmap.txt')
        if not osp.isfile(labelmap_file):
            labels_meta = instance_data.meta['project']['labels'] \
                if isinstance(instance_data, ProjectData) else instance_data.meta['task']['labels']
            labels = (label['name'] + ':::' for _, label in labels_meta)
            with open(labelmap_file, 'w') as f:
                f.write('\n'.join(labels))

        # support flat archive layout
        anno_dir = osp.join(tmp_dir, 'Annotations')
        if not osp.isdir(anno_dir):
            anno_files = glob(osp.join(tmp_dir, '**', '*.xml'), recursive=True)
            subsets_dir = osp.join(tmp_dir, 'ImageSets', 'Main')
            os.makedirs(subsets_dir, exist_ok=True)
            with open(osp.join(subsets_dir, 'train.txt'), 'w') as subset_file:
                for f in anno_files:
                    subset_file.write(osp.splitext(osp.basename(f))[0] + '\n')

            os.makedirs(anno_dir, exist_ok=True)
            for f in anno_files:
                shutil.move(f, anno_dir)

        dataset = Dataset.import_from(tmp_dir, 'voc', env=dm_env)
        dataset.transform('masks_to_polygons')
        if load_data_callback is not None:
            load_data_callback(dataset, instance_data)
        import_dm_annotations(dataset, instance_data)
Beispiel #12
0
    def test_inplace_save_writes_only_updated_data(self):
        expected = Dataset.from_iterable([
            DatasetItem(1, subset='train', image=np.ones((2, 4, 3))),
            DatasetItem(2, subset='train', image=np.ones((3, 2, 3))),
        ],
                                         categories=[])

        with TestDir() as path:
            dataset = Dataset.from_iterable([
                DatasetItem(1, subset='train', image=np.ones((2, 4, 3))),
                DatasetItem(
                    2, subset='train', image=Image(path='2.jpg', size=(3, 2))),
                DatasetItem(3, subset='valid', image=np.ones((2, 2, 3))),
            ],
                                            categories=[])
            dataset.export(path, 'wider_face', save_images=True)

            dataset.put(
                DatasetItem(2, subset='train', image=np.ones((3, 2, 3))))
            dataset.remove(3, 'valid')
            dataset.save(save_images=True)

            self.assertEqual({'1.jpg', '2.jpg'},
                             set(
                                 os.listdir(
                                     osp.join(path, 'WIDER_train', 'images',
                                              'no_label'))))
            self.assertEqual({'wider_face_train_bbx_gt.txt'},
                             set(os.listdir(osp.join(path,
                                                     'wider_face_split'))))
            compare_datasets(self,
                             expected,
                             Dataset.import_from(path, 'wider_face'),
                             require_images=True,
                             ignored_attrs=IGNORE_ALL)
Beispiel #13
0
    def test_can_save_and_load_with_landmarks(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id='name0/name0_0001',
                subset='test', image=np.ones((2, 5, 3)),
                attributes={
                    'positive_pairs': ['name0/name0_0002'],
                    'negative_pairs': []
                },
                annotations=[
                    Points([0, 4, 3, 3, 2, 2, 1, 0, 3, 0]),
                ]
            ),
            DatasetItem(id='name0/name0_0002',
                subset='test', image=np.ones((2, 5, 3)),
                attributes={
                    'positive_pairs': [],
                    'negative_pairs': []
                },
                annotations=[
                    Points([0, 5, 3, 5, 2, 2, 1, 0, 3, 0]),
                ]
            ),
        ])

        with TestDir() as test_dir:
            LfwConverter.convert(source_dataset, test_dir, save_images=True)
            parsed_dataset = Dataset.import_from(test_dir, 'lfw')

            compare_datasets(self, source_dataset, parsed_dataset)
Beispiel #14
0
    def test_dataset_with_save_dataset_meta_file(self):
        source_dataset = Dataset.from_iterable(
            [
                DatasetItem(id='a/b/1',
                            image=np.ones((8, 8, 3)),
                            subset='train',
                            annotations=[
                                Bbox(0, 2, 4, 2, label=2),
                                Bbox(0,
                                     1,
                                     2,
                                     3,
                                     label=1,
                                     attributes={
                                         'blur': '2',
                                         'expression': '0',
                                         'illumination': '0',
                                         'occluded': '0',
                                         'pose': '2',
                                         'invalid': '0'
                                     }),
                            ]),
            ],
            categories=['face', 'label_0', 'label_1'])

        with TestDir() as test_dir:
            WiderFaceConverter.convert(source_dataset,
                                       test_dir,
                                       save_images=True,
                                       save_dataset_meta=True)
            parsed_dataset = Dataset.import_from(test_dir, 'wider_face')

            self.assertTrue(osp.isfile(osp.join(test_dir,
                                                'dataset_meta.json')))
            compare_datasets(self, source_dataset, parsed_dataset)
Beispiel #15
0
def _import(src_file, instance_data):
    with TemporaryDirectory() as tmp_dir:
        Archive(src_file.name).extractall(tmp_dir)

        dataset = Dataset.import_from(tmp_dir, 'label_me', env=dm_env)
        dataset.transform('masks_to_polygons')
        import_dm_annotations(dataset, instance_data)
    def test_convert_from_voc_format(self):
        """
        <b>Description:</b>
        Ensure that the dataset can be converted from VOC format with
        command `datum convert`.

        <b>Expected results:</b>
        A ImageNet dataset that matches the expected dataset.

        <b>Steps:</b>
        1. Get path to the source dataset from assets.
        2. Convert source dataset to LabelMe format, using the `convert` command.
        3. Verify that resulting dataset is equal to the expected dataset.
        """

        labels = sorted([l.name for l in VOC.VocLabel if l.value % 2 == 1])

        expected_dataset = Dataset.from_iterable([
            DatasetItem(id='/'.join([label, '2007_000001']),
                subset='default', annotations=[Label(i)])
                for i, label in enumerate(labels)
            ] + [DatasetItem(id='no_label/2007_000002', subset='default',
                   image=np.ones((10, 20, 3)))
            ], categories=labels
        )

        voc_dir = osp.join(DUMMY_DATASETS_DIR, 'voc_dataset1')
        with TestDir() as test_dir:
            imagenet_dir = osp.join(test_dir, 'imagenet')
            run(self, 'convert', '-if', 'voc', '-i', voc_dir,
                '-f', 'imagenet', '-o', imagenet_dir, '--', '--save-image')

            target_dataset = Dataset.import_from(imagenet_dir, format='imagenet')
            compare_datasets(self, expected_dataset, target_dataset,
                require_images=True)
    def test_can_save_and_load_with_meta_file(self):
        source_dataset = Dataset.from_iterable(
            [
                DatasetItem(id='image_2',
                            subset='test',
                            image=np.ones((32, 32, 3)),
                            annotations=[Label(0)]),
                DatasetItem(
                    id='image_3', subset='test', image=np.ones((32, 32, 3))),
                DatasetItem(id='image_4',
                            subset='test',
                            image=np.ones((32, 32, 3)),
                            annotations=[Label(1)])
            ],
            categories=['label_0', 'label_1'])

        with TestDir() as test_dir:
            CifarConverter.convert(source_dataset,
                                   test_dir,
                                   save_images=True,
                                   save_dataset_meta=True)
            parsed_dataset = Dataset.import_from(test_dir, 'cifar')

            self.assertTrue(osp.isfile(osp.join(test_dir,
                                                'dataset_meta.json')))
            compare_datasets(self,
                             source_dataset,
                             parsed_dataset,
                             require_images=True)
    def test_can_save_and_load_cifar100(self):
        source_dataset = Dataset.from_iterable(
            [
                DatasetItem(id='image_2',
                            subset='test',
                            image=np.ones((32, 32, 3)),
                            annotations=[Label(0)]),
                DatasetItem(
                    id='image_3', subset='test', image=np.ones((32, 32, 3))),
                DatasetItem(id='image_4',
                            subset='test',
                            image=np.ones((32, 32, 3)),
                            annotations=[Label(1)])
            ],
            categories=[['class_0', 'superclass_0'],
                        ['class_1', 'superclass_0']])

        with TestDir() as test_dir:
            CifarConverter.convert(source_dataset, test_dir, save_images=True)
            parsed_dataset = Dataset.import_from(test_dir, 'cifar')

            compare_datasets(self,
                             source_dataset,
                             parsed_dataset,
                             require_images=True)
Beispiel #19
0
    def test_can_import(self):
        expected_dataset = Dataset.from_iterable(
            [
                DatasetItem(id=1,
                            image=np.ones((16, 16, 3)),
                            annotations=[
                                Bbox(0,
                                     4,
                                     4,
                                     8,
                                     label=2,
                                     attributes={
                                         'occluded': False,
                                         'visibility': 1.0,
                                         'ignored': False,
                                     }),
                            ]),
            ],
            categories={
                AnnotationType.label:
                LabelCategories.from_iterable('label_' + str(label)
                                              for label in range(10)),
            })

        dataset = Dataset.import_from(DUMMY_DATASET_DIR, 'mot_seq')

        compare_datasets(self, expected_dataset, dataset)
Beispiel #20
0
def split_video_command(args):
    src_path = osp.abspath(args.src_path)

    dst_dir = args.dst_dir
    if dst_dir:
        if not args.overwrite and osp.isdir(dst_dir) and os.listdir(dst_dir):
            raise CliException("Directory '%s' already exists "
                               "(pass --overwrite to overwrite)" % dst_dir)
    else:
        dst_dir = generate_next_file_name('%s-frames' % osp.basename(src_path))
    dst_dir = osp.abspath(dst_dir)

    log.info("Exporting frames...")

    dataset = Dataset.import_from(src_path,
                                  'video_frames',
                                  name_pattern=args.name_pattern,
                                  step=args.step,
                                  start_frame=args.start_frame,
                                  end_frame=args.end_frame)

    dataset.export(format='image_dir',
                   save_dir=dst_dir,
                   image_ext=args.image_ext)

    log.info("Frames are exported into '%s'" % dst_dir)

    return 0
Beispiel #21
0
    def test_can_save_and_load_with_no_save_images(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id='1',
                        subset='train',
                        image=np.ones((8, 8, 3)),
                        annotations=[
                            Bbox(0, 2, 4, 2, label=1),
                            Bbox(0,
                                 1,
                                 2,
                                 3,
                                 label=0,
                                 attributes={
                                     'blur': '2',
                                     'expression': '0',
                                     'illumination': '0',
                                     'occluded': '0',
                                     'pose': '2',
                                     'invalid': '0'
                                 }),
                            Label(1),
                        ])
        ],
                                               categories=['face', 'label_0'])

        with TestDir() as test_dir:
            WiderFaceConverter.convert(source_dataset,
                                       test_dir,
                                       save_images=False)
            parsed_dataset = Dataset.import_from(test_dir, 'wider_face')

            compare_datasets(self, source_dataset, parsed_dataset)
    def test_can_import_with_colored_masks(self):
        expected_dataset = Dataset.from_iterable(
            [
                DatasetItem(
                    id='Stereo_Left/Omni_F/000000',
                    image=np.ones((1, 5, 3)),
                    annotations=[
                        Mask(np.array([[1, 1, 0, 0, 0]]), label=1),
                        Mask(np.array([[0, 0, 1, 1, 0]]), label=2),
                        Mask(np.array([[0, 0, 0, 0, 1]]), label=3),
                    ],
                ),
                DatasetItem(
                    id='Stereo_Left/Omni_F/000001',
                    image=np.ones((1, 5, 3)),
                    annotations=[
                        Mask(np.array([[1, 0, 0, 0, 0]]), label=1),
                        Mask(np.array([[0, 1, 0, 0, 0]]), label=2),
                        Mask(np.array([[0, 0, 1, 1, 0]]), label=15),
                        Mask(np.array([[0, 0, 0, 0, 1]]), label=3),
                    ],
                )
            ],
            categories=Synthia.make_categories())

        dataset = Dataset.import_from(DUMMY_COLOR_SEGM_DATASET_DIR, 'synthia')

        compare_datasets(self, expected_dataset, dataset, require_images=True)
Beispiel #23
0
    def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
        source_dataset = Dataset.from_iterable([
            DatasetItem(id='кириллица с пробелом',
                        image=np.ones((8, 8, 3)),
                        annotations=[
                            Bbox(0,
                                 1,
                                 2,
                                 3,
                                 label=0,
                                 attributes={
                                     'blur': '2',
                                     'expression': '0',
                                     'illumination': '0',
                                     'occluded': '0',
                                     'pose': '2',
                                     'invalid': '0'
                                 }),
                        ]),
        ],
                                               categories=['face'])

        with TestDir() as test_dir:
            WiderFaceConverter.convert(source_dataset,
                                       test_dir,
                                       save_images=True)
            parsed_dataset = Dataset.import_from(test_dir, 'wider_face')

            compare_datasets(self,
                             source_dataset,
                             parsed_dataset,
                             require_images=True)
Beispiel #24
0
    def test_can_save_and_load_without_saving_images(self):
        dataset = Dataset.from_iterable([
            DatasetItem(id='a',
                        image=np.ones((5, 5, 3)),
                        annotations=[
                            Bbox(1,
                                 2,
                                 3,
                                 4,
                                 label=0,
                                 group=1,
                                 attributes={'score': 1.0}),
                            Mask(label=1,
                                 group=0,
                                 image=np.ones((5, 5)),
                                 attributes={'box_id': '00000000'})
                        ])
        ],
                                        categories=['label_0', 'label_1'])

        with TestDir() as test_dir:
            OpenImagesConverter.convert(dataset, test_dir)

            parsed_dataset = Dataset.import_from(test_dir, 'open_images')

            compare_datasets(self, dataset, parsed_dataset)
Beispiel #25
0
    def test_can_delete_labels_from_yolo_dataset(self):
        target_dataset = Dataset.from_iterable([
            DatasetItem(id='1',
                        subset='train',
                        image=np.ones((10, 15, 3)),
                        annotations=[Bbox(0.0, 2.0, 4.0, 2.0, label=0)])
        ],
                                               categories=['label_2'])

        with TestDir() as test_dir:
            yolo_dir = osp.join(
                __file__[:__file__.rfind(osp.join('tests', ''))], 'tests',
                'assets', 'yolo_dataset')

            run(self, 'create', '-o', test_dir)
            run(self, 'import', '-p', test_dir, '-f', 'yolo', yolo_dir)

            run(self, 'filter', '-p', test_dir, '-m', 'i+a', '-e',
                "/item/annotation[label='label_2']")

            run(self, 'transform', '-p', test_dir, '-t', 'remap_labels', '--',
                '-l', 'label_2:label_2', '--default', 'delete')

            export_dir = osp.join(test_dir, 'export')
            run(self, 'export', '-p', test_dir, '-o', export_dir, '-f', 'yolo',
                '--', '--save-image')

            parsed_dataset = Dataset.import_from(export_dir, format='yolo')
            compare_datasets(self, target_dataset, parsed_dataset)
Beispiel #26
0
    def test_can_save_and_load_with_meta_file(self):
        dataset = Dataset.from_iterable([
            DatasetItem(id='a',
                        image=np.ones((5, 5, 3)),
                        annotations=[
                            Bbox(1,
                                 2,
                                 3,
                                 4,
                                 label=0,
                                 group=1,
                                 attributes={'score': 1.0}),
                            Mask(label=1,
                                 group=0,
                                 image=np.ones((5, 5)),
                                 attributes={'box_id': '00000000'})
                        ])
        ],
                                        categories=['label_0', 'label_1'])

        with TestDir() as test_dir:
            OpenImagesConverter.convert(dataset,
                                        test_dir,
                                        save_images=True,
                                        save_dataset_meta=True)

            parsed_dataset = Dataset.import_from(test_dir, 'open_images')

            self.assertTrue(osp.isfile(osp.join(test_dir,
                                                'dataset_meta.json')))
            compare_datasets(self,
                             dataset,
                             parsed_dataset,
                             require_images=True)