Пример #1
0
    def _load_instances_items(self):
        items = {}

        instances_dir = osp.join(self._annotations_dir, MapillaryVistasPath.INSTANCES_DIR)
        for instance_path in find_images(instances_dir, recursive=True):
            item_id = osp.splitext(osp.relpath(instance_path, instances_dir))[0]

            mask = load_image(instance_path, dtype=np.uint32)

            annotations = []
            for uval in np.unique(mask):
                label_id, instance_id = uval >> 8, uval & 255
                annotations.append(
                    Mask(
                        image=self._lazy_extract_mask(mask, uval),
                        label=label_id, id=instance_id
                    )
                )

            items[item_id] = DatasetItem(id=item_id, subset=self._subset,
                annotations=annotations)

        class_dir = osp.join(self._annotations_dir, MapillaryVistasPath.CLASS_DIR)
        for class_path in find_images(class_dir, recursive=True):
            item_id = osp.splitext(osp.relpath(class_path, class_dir))[0]
            if item_id in items:
                continue

            from PIL import Image as PILImage
            class_mask = np.array(PILImage.open(class_path))
            classes = np.unique(class_mask)

            annotations = []
            for label_id in classes:
                annotations.append(Mask(label=label_id,
                    image=self._lazy_extract_mask(class_mask, label_id))
                )

            items[item_id] = DatasetItem(id=item_id, subset=self._subset,
                annotations=annotations)

        for image_path in find_images(self._images_dir, recursive=True):
            item_id = osp.splitext(osp.relpath(image_path, self._images_dir))[0]
            image = Image(path=image_path)
            if item_id in items:
                items[item_id].image = image
            else:
                items[item_id] = DatasetItem(id=item_id, subset=self._subset,
                    image=image)

        self._load_polygons(items)
        return items.values()
Пример #2
0
    def _load_items(self, rootdir):
        items = {}

        paths = []
        anno_file = osp.join(
            rootdir, Market1501Path.LIST_PREFIX + self._subset + '.txt')
        if osp.isfile(anno_file):
            with open(anno_file, encoding='utf-8') as f:
                for line in f:
                    paths.append(osp.join(rootdir, line.strip()))
        else:
            paths = list(
                chain(
                    find_images(osp.join(rootdir, Market1501Path.QUERY_DIR),
                                recursive=True),
                    find_images(osp.join(
                        rootdir, Market1501Path.BBOX_DIR + self._subset),
                                recursive=True),
                ))

        for image_path in paths:
            item_id = osp.splitext(osp.normpath(image_path))[0]
            if osp.isabs(image_path):
                item_id = osp.relpath(item_id, rootdir)
            subdir, item_id = item_id.split(os.sep, maxsplit=1)

            pid = Market1501Path.UNKNOWN_ID
            camid = Market1501Path.UNKNOWN_ID
            search = Market1501Path.PATTERN.search(osp.basename(item_id))
            if search:
                pid, camid = map(int, search.groups()[0:2])
                camid -= 1  # make ids 0-based
                custom_name = search.groups()[2]
                if custom_name:
                    item_id = osp.join(osp.dirname(item_id), custom_name)

            item = items.get(item_id)
            if item is None:
                item = DatasetItem(id=item_id,
                                   subset=self._subset,
                                   image=image_path)
                items[item_id] = item

            if pid != Market1501Path.UNKNOWN_ID or \
                    camid != Market1501Path.UNKNOWN_ID:
                attributes = item.attributes
                attributes['query'] = subdir == Market1501Path.QUERY_DIR
                attributes['person_id'] = pid
                attributes['camera_id'] = camid
        return items
Пример #3
0
    def _load_items(self, parsed):
        for frame_id, frame_desc in parsed.items():
            pcd_name = frame_desc['name']
            name = osp.splitext(pcd_name)[0]
            pcd_path = osp.join(self._rootdir, PointCloudPath.BASE_DIR,
                                PointCloudPath.POINT_CLOUD_DIR, pcd_name)
            assert pcd_path.endswith('.pcd'), pcd_path

            related_images_dir = osp.join(self._rootdir,
                                          PointCloudPath.BASE_DIR,
                                          PointCloudPath.RELATED_IMAGES_DIR,
                                          name + '_pcd')
            related_images = None
            if osp.isdir(related_images_dir):
                related_images = find_images(related_images_dir)

            parsed[frame_id] = DatasetItem(
                id=name,
                subset=self._subset,
                point_cloud=pcd_path,
                related_images=related_images,
                annotations=frame_desc.get('annotations'),
                attributes={
                    'frame': int(frame_id),
                    **frame_desc['attributes']
                })

        return parsed
Пример #4
0
    def _load_items(self, subset, path):
        items = []
        for label_cat in self._categories[AnnotationType.label]:
            label = label_cat.name
            label_id = self._categories[AnnotationType.label].find(label)[0]
            for image_path in find_images(osp.join(path, label)):
                image_name = osp.basename(image_path)
                item_id = osp.splitext(image_name)[0]
                pedestrian_id = image_name[0:4]

                if not fnmatch.fnmatch(image_name,
                                       label + MarsPath.IMAGE_NAME_POSTFIX):
                    items.append(DatasetItem(id=item_id, image=image_path))
                    continue

                if pedestrian_id != label:
                    log.warning(f'The image {image_path} will be skip because'
                                'pedestrian id for it does not match with'
                                f'the directory name: {label}')
                    continue

                items.append(
                    DatasetItem(id=item_id,
                                image=image_path,
                                subset=subset,
                                annotations=[Label(label=label_id)],
                                attributes={
                                    'person_id': pedestrian_id,
                                    'camera_id': int(image_name[5]),
                                    'track_id': int(image_name[7:11]),
                                    'frame_id': int(image_name[12:15])
                                }))

        return items
Пример #5
0
    def patch(cls, dataset, patch, save_dir, **kwargs):
        conv = cls(patch.as_dataset(dataset), save_dir=save_dir, **kwargs)
        conv.apply()

        pcd_dir = osp.abspath(osp.join(save_dir, KittiRawPath.PCD_DIR))
        for (item_id, subset), status in patch.updated_items.items():
            if status != ItemStatus.removed:
                item = patch.data.get(item_id, subset)
            else:
                item = DatasetItem(item_id, subset=subset)

            if not (status == ItemStatus.removed or not item.has_point_cloud):
                continue

            pcd_path = osp.join(pcd_dir, conv._make_pcd_filename(item))
            if osp.isfile(pcd_path):
                os.unlink(pcd_path)

            for d in os.listdir(save_dir):
                image_dir = osp.join(save_dir, d, 'data', osp.dirname(item.id))
                if d.startswith(KittiRawPath.IMG_DIR_PREFIX) and \
                        osp.isdir(image_dir):
                    for p in find_images(image_dir):
                        if osp.splitext(osp.basename(p))[0] == \
                                osp.basename(item.id):
                            os.unlink(p)
Пример #6
0
    def _load_items(self, subset):
        labels = self._categories.setdefault(AnnotationType.label,
                                             LabelCategories())
        path = osp.join(self._path, subset)

        images = [i for i in find_images(path, recursive=True)]

        for image_path in sorted(images):
            item_id = osp.splitext(osp.relpath(image_path, path))[0]

            if Ade20k2017Path.MASK_PATTERN.fullmatch(osp.basename(item_id)):
                continue

            item_annotations = []

            item_info = self._load_item_info(image_path)
            for item in item_info:
                label_idx = labels.find(item['label_name'])[0]
                if label_idx is None:
                    labels.add(item['label_name'])

            mask_path = osp.splitext(image_path)[0] + '_seg.png'
            if not osp.isfile(mask_path):
                log.warning("Can't find mask for image: %s" % image_path)

            part_level = 0
            max_part_level = max([p['part_level'] for p in item_info])
            for part_level in range(max_part_level + 1):
                if not osp.exists(mask_path):
                    log.warning('Can`t find part level %s mask for %s' \
                        % (part_level, image_path))
                    continue

                mask = lazy_image(mask_path, loader=self._load_instance_mask)
                mask = CompiledMask(instance_mask=mask)

                for v in item_info:
                    if v['part_level'] != part_level:
                        continue

                    label_id = labels.find(v['label_name'])[0]
                    instance_id = v['id']
                    attributes = {k: True for k in v['attributes']}

                    item_annotations.append(
                        Mask(label=label_id,
                             image=mask.lazy_extract(instance_id),
                             id=instance_id,
                             attributes=attributes,
                             z_order=part_level,
                             group=instance_id))

                mask_path = osp.splitext(image_path)[0] \
                    + '_parts_%s.png' % (part_level + 1)

            self._items.append(
                DatasetItem(item_id,
                            subset=subset,
                            image=image_path,
                            annotations=item_annotations))
Пример #7
0
    def _load_items(self):
        items = {}
        image_path_by_id = {}

        if self._images_dir:
            image_path_by_id = {
                self._get_id_from_image_path(p): p
                for p in find_images(self._images_dir, recursive=True)
            }

        masks = glob.glob(osp.join(
            self._gt_anns_dir, '**',
            f'*{CityscapesPath.LABEL_TRAIN_IDS_SUFFIX}'),
                          recursive=True)
        mask_suffix = CityscapesPath.LABEL_TRAIN_IDS_SUFFIX
        if not masks:
            masks = glob.glob(osp.join(
                self._gt_anns_dir, '**',
                f'*{CityscapesPath.GT_INSTANCE_MASK_SUFFIX}'),
                              recursive=True)
            mask_suffix = CityscapesPath.GT_INSTANCE_MASK_SUFFIX
        for mask_path in masks:
            item_id = self._get_id_from_mask_path(mask_path, mask_suffix)

            anns = []
            instances_mask = load_image(mask_path, dtype=np.int32)
            segm_ids = np.unique(instances_mask)
            for segm_id in segm_ids:
                # either is_crowd or ann_id should be set
                if segm_id < 1000:
                    label_id = segm_id
                    is_crowd = True
                    ann_id = None
                else:
                    label_id = segm_id // 1000
                    is_crowd = False
                    ann_id = segm_id % 1000
                anns.append(
                    Mask(image=self._lazy_extract_mask(instances_mask,
                                                       segm_id),
                         label=label_id,
                         id=ann_id,
                         attributes={'is_crowd': is_crowd}))

            items[item_id] = DatasetItem(id=item_id,
                                         subset=self._subset,
                                         image=image_path_by_id.pop(
                                             item_id, None),
                                         annotations=anns)

        for item_id, path in image_path_by_id.items():
            items[item_id] = DatasetItem(id=item_id,
                                         subset=self._subset,
                                         image=path)

        self._categories = self._load_categories(self._path,
            use_train_label_map= \
                mask_suffix is CityscapesPath.LABEL_TRAIN_IDS_SUFFIX)
        return items
Пример #8
0
    def __init__(self, url, subset=None):
        super().__init__(subset=subset)

        assert osp.isdir(url), url

        for path in find_images(url, recursive=True):
            item_id = osp.relpath(osp.splitext(path)[0], url)
            self._items.append(
                DatasetItem(id=item_id, subset=self._subset, image=path))
Пример #9
0
    def __iter__(self):
        image_dir = osp.join(self._dataset_dir, VocPath.IMAGES_DIR)
        if osp.isdir(image_dir):
            images = { osp.splitext(osp.relpath(p, image_dir))[0]: p
                for p in find_images(image_dir, recursive=True) }
        else:
            images = {}

        for item_id in self._items:
            log.debug("Reading item '%s'" % item_id)
            anns = self._load_annotations(item_id)
            yield DatasetItem(id=item_id, subset=self._subset,
                image=images.get(item_id), annotations=anns)
Пример #10
0
    def _load_items(self):
        images_dir = osp.join(self._dataset_dir, OpenImagesPath.IMAGES_DIR)

        self._image_paths_by_id = {
            # the first component of `path_parts` is the subset name
            '/'.join(path_parts[1:]): path
            for path in find_images(images_dir, recursive=True)
            for path_parts in [split_path(
                osp.splitext(osp.relpath(path, images_dir))[0],
            )]
            if 1 < len(path_parts)
        }

        items_by_id = {}

        def load_from(annotation_name):
            with self._open_csv_annotation(annotation_name) as image_reader:
                for image_description in image_reader:
                    image_id = image_description['ImageID']
                    if image_id in items_by_id:
                        raise RepeatedItemError(item_id=image_id)

                    subset = image_description['Subset']

                    if _RE_INVALID_PATH_COMPONENT.fullmatch(subset):
                        raise UnsupportedSubsetNameError(
                            item_id=image_id, subset=subset)

                    if image_id in items_by_id:
                        log.warning('Item %s is repeated' % image_id)
                        continue

                    items_by_id[image_id] = self._add_item(image_id, subset)

        # It's preferable to load the combined image description file,
        # because it contains descriptions for training images without human-annotated labels
        # (the file specific to the training set doesn't).
        # However, if it's missing, we'll try loading subset-specific files instead, so that
        # this extractor can be used on individual subsets of the dataset.
        try:
            load_from(OpenImagesPath.FULL_IMAGE_DESCRIPTION_FILE_NAME)
        except FileNotFoundError:
            for pattern in OpenImagesPath.SUBSET_IMAGE_DESCRIPTION_FILE_PATTERNS:
                for path in self._glob_annotations(pattern):
                    load_from(path)

        self._load_labels(items_by_id)
        normalized_coords = self._load_bboxes(items_by_id)
        self._load_masks(items_by_id, normalized_coords)
Пример #11
0
    def __iter__(self):
        image_dir = osp.join(self._dataset_dir, VocPath.IMAGES_DIR)
        if osp.isdir(image_dir):
            images = {
                osp.splitext(osp.relpath(p, image_dir))[0].replace('\\', '/'):
                p
                for p in find_images(image_dir, recursive=True)
            }
        else:
            images = {}

        anno_dir = osp.join(self._dataset_dir, VocPath.ANNOTATIONS_DIR)

        for item_id in self._ctx.progress_reporter.iter(
                self._items, desc=f"Parsing boxes in '{self._subset}'"):
            log.debug("Reading item '%s'" % item_id)
            size = None

            try:
                anns = []
                ann_file = osp.join(anno_dir, item_id + '.xml')
                if osp.isfile(ann_file):
                    root_elem = ElementTree.parse(ann_file)
                    height = root_elem.find('size/height')
                    if height is not None:
                        height = int(height.text)
                    width = root_elem.find('size/width')
                    if width is not None:
                        width = int(width.text)
                    if height and width:
                        size = (height, width)
                    filename_elem = root_elem.find('filename')
                    if filename_elem is not None:
                        image = osp.join(image_dir, filename_elem.text)
                    anns = self._parse_annotations(root_elem)
                else:
                    image = images.pop(item_id, None)

                if image or size:
                    image = Image(path=image, size=size)

                yield DatasetItem(id=item_id,
                                  subset=self._subset,
                                  image=image,
                                  annotations=anns)
            except Exception as e:
                self._report_item_error(e, item_id=(item_id, self._subset))
Пример #12
0
    def _parse_items(self):
        items = []

        image_dir = self._images_dir
        if osp.isdir(image_dir):
            images = {
                osp.splitext(osp.relpath(p, image_dir))[0]: p
                for p in find_images(image_dir, recursive=True)
            }
        else:
            images = {}

        for p in sorted(iglob(self._anno_dir + '/**/*.png', recursive=True)):
            item_id = osp.splitext(osp.relpath(p, self._anno_dir))[0]
            items.append(
                DatasetItem(id=item_id,
                            subset=self._subset,
                            image=images.get(item_id),
                            annotations=self._parse_annotations(p)))
        return items
Пример #13
0
    def __iter__(self):
        annotations = self._load_annotations()

        image_dir = osp.join(self._dataset_dir, VocPath.IMAGES_DIR)
        if osp.isdir(image_dir):
            images = {
                osp.splitext(osp.relpath(p, image_dir))[0].replace('\\', '/'):
                p
                for p in find_images(image_dir, recursive=True)
            }
        else:
            images = {}

        for item_id in self._ctx.progress_reporter.iter(
                self._items, desc=f"Parsing labels in '{self._subset}'"):
            log.debug("Reading item '%s'" % item_id)
            yield DatasetItem(id=item_id,
                              subset=self._subset,
                              image=images.get(item_id),
                              annotations=annotations.get(item_id))
Пример #14
0
    def _load_items(self, path):
        items = {}

        for image_path in find_images(path, recursive=True, max_depth=1):
            label = osp.basename(osp.dirname(image_path))
            image_name = osp.splitext(osp.basename(image_path))[0]

            item_id = osp.join(label, image_name)
            item = items.get(item_id)
            if item is None:
                item = DatasetItem(id=item_id, subset=self._subset,
                    image=image_path)
                items[item_id] = item
            annotations = item.annotations

            if label != ImagenetPath.IMAGE_DIR_NO_LABEL:
                label = self._categories[AnnotationType.label].find(label)[0]
                annotations.append(Label(label=label))

        return items
Пример #15
0
    def _load_items(self, subset, subset_path):
        items = {}

        paths = []
        if osp.isfile(subset_path):
            with open(subset_path, encoding='utf-8') as f:
                for line in f:
                    paths.append(osp.join(self._path, line.strip()))
        else:
            paths = list(find_images(subset_path, recursive=True))

        for image_path in sorted(paths):
            item_id = osp.splitext(osp.normpath(image_path))[0]
            if osp.isabs(image_path):
                item_id = osp.relpath(item_id, self._path)
            item_id = item_id.split(osp.sep, maxsplit=1)[1]

            attributes = {}
            search = Market1501Path.PATTERN.search(osp.basename(item_id))
            if search:
                attribute_values = search.groups()[0:5]
                attributes = {
                    'person_id': attribute_values[0],
                    'camera_id': int(attribute_values[1]) - 1,
                    'track_id': int(attribute_values[2]),
                    'frame_id': int(attribute_values[3]),
                    'bbox_id': int(attribute_values[4]),
                    'query': subset == Market1501Path.QUERY_DIR
                }

                custom_name = search.groups()[5]
                if custom_name:
                    item_id = osp.join(osp.dirname(item_id), custom_name)

            item = items.get(item_id)
            if item is None:
                item = DatasetItem(id=item_id, subset=subset, image=image_path,
                    attributes=attributes)
                items[item_id] = item

        return items
Пример #16
0
    def _load_items(self, path):
        items = {}

        image_dir = self.image_dir
        if osp.isdir(image_dir):
            images = { osp.splitext(osp.relpath(p, image_dir))[0]: p
                for p in find_images(image_dir, recursive=True) }
        else:
            images = {}

        with open(path, encoding='utf-8') as f:
            for line in f:
                item = line.split('\"')
                if 1 < len(item):
                    if len(item) == 3:
                        item_id = item[1]
                        label_ids = [int(id) for id in item[2].split()]
                    else:
                        raise Exception("Line %s: unexpected number "
                            "of quotes in filename" % line)
                else:
                    item = line.split()
                    item_id = item[0]
                    label_ids = [int(id) for id in item[1:]]

                anno = []
                for label in label_ids:
                    assert 0 <= label and \
                        label < len(self._categories[AnnotationType.label]), \
                        "Image '%s': unknown label id '%s'" % (item_id, label)
                    anno.append(Label(label))

                items[item_id] = DatasetItem(id=item_id, subset=self._subset,
                    image=images.get(item_id), annotations=anno)

        return items
Пример #17
0
    def _load_items(self, parsed):
        images = {}
        for d in os.listdir(self._rootdir):
            image_dir = osp.join(self._rootdir, d, 'data')
            if not (d.lower().startswith(KittiRawPath.IMG_DIR_PREFIX) and \
                    osp.isdir(image_dir)):
                continue

            for p in find_images(image_dir, recursive=True):
                image_name = osp.splitext(osp.relpath(p, image_dir))[0]
                images.setdefault(image_name, []).append(p)

        name_mapping = self._parse_name_mapping(
            osp.join(self._rootdir, KittiRawPath.NAME_MAPPING_FILE))

        items = {}
        for frame_id, item_desc in parsed.items():
            name = name_mapping.get(frame_id, '%010d' % int(frame_id))
            items[frame_id] = DatasetItem(id=name, subset=self._subset,
                point_cloud=osp.join(self._rootdir,
                    KittiRawPath.PCD_DIR, name + '.pcd'),
                related_images=sorted(images.get(name, [])),
                annotations=item_desc.get('annotations'),
                attributes={'frame': int(frame_id)})

        for frame_id, name in name_mapping.items():
            if frame_id in items:
                continue

            items[frame_id] = DatasetItem(id=name, subset=self._subset,
                point_cloud=osp.join(self._rootdir,
                    KittiRawPath.PCD_DIR, name + '.pcd'),
                related_images=sorted(images.get(name, [])),
                attributes={'frame': int(frame_id)})

        return items
Пример #18
0
    def _load_items(self, path):
        items = {}
        label_categories = self._categories.get(AnnotationType.label)

        if osp.isdir(self._images_dir):
            images = {
                osp.splitext(osp.relpath(p, self._images_dir))[0].replace(
                    '\\', '/'): p
                for p in find_images(self._images_dir, recursive=True)
            }
        else:
            images = {}

        with open(path, encoding='utf-8') as f:

            def get_label_id(label_name):
                if not label_name:
                    return None
                label_id = label_categories.find(label_name)[0]
                if label_id is None:
                    label_id = label_categories.add(label_name)
                return label_id

            for line in f:
                pair = line.strip().split('\t')
                if len(pair) == 1 and pair[0] != '':
                    annotations = []
                    image = pair[0]
                    item_id = pair[0]
                    objects = item_id.split('/')
                    if 1 < len(objects):
                        label_name = objects[0]
                        label = get_label_id(label_name)
                        if label is not None:
                            annotations.append(Label(label))
                            item_id = item_id[len(label_name) + 1:]
                    if item_id not in items:
                        items[item_id] = DatasetItem(id=item_id,
                                                     subset=self._subset,
                                                     image=images.get(image),
                                                     annotations=annotations)
                elif len(pair) == 3:
                    image1, id1 = self.get_image_name(pair[0], pair[1])
                    image2, id2 = self.get_image_name(pair[0], pair[2])
                    label = get_label_id(pair[0])

                    if id1 not in items:
                        annotations = []
                        annotations.append(Label(label))
                        items[id1] = DatasetItem(id=id1,
                                                 subset=self._subset,
                                                 image=images.get(image1),
                                                 annotations=annotations)
                    if id2 not in items:
                        annotations = []
                        annotations.append(Label(label))
                        items[id2] = DatasetItem(id=id2,
                                                 subset=self._subset,
                                                 image=images.get(image2),
                                                 annotations=annotations)

                    # pairs form a directed graph
                    if not items[id1].annotations[0].attributes.get(
                            'positive_pairs'):
                        items[id1].annotations[0].attributes[
                            'positive_pairs'] = []
                    items[id1].annotations[0].attributes[
                        'positive_pairs'].append(image2)

                elif len(pair) == 4:
                    image1, id1 = self.get_image_name(pair[0], pair[1])
                    if pair[2] == '-':
                        image2 = pair[3]
                        id2 = pair[3]
                    else:
                        image2, id2 = self.get_image_name(pair[2], pair[3])
                    if id1 not in items:
                        annotations = []
                        label = get_label_id(pair[0])
                        annotations.append(Label(label))
                        items[id1] = DatasetItem(id=id1,
                                                 subset=self._subset,
                                                 image=images.get(image1),
                                                 annotations=annotations)
                    if id2 not in items:
                        annotations = []
                        if pair[2] != '-':
                            label = get_label_id(pair[2])
                            annotations.append(Label(label))
                        items[id2] = DatasetItem(id=id2,
                                                 subset=self._subset,
                                                 image=images.get(image2),
                                                 annotations=annotations)

                    # pairs form a directed graph
                    if not items[id1].annotations[0].attributes.get(
                            'negative_pairs'):
                        items[id1].annotations[0].attributes[
                            'negative_pairs'] = []
                    items[id1].annotations[0].attributes[
                        'negative_pairs'].append(image2)

        landmarks_file = osp.join(self._annotations_dir,
                                  LfwPath.LANDMARKS_FILE)
        if osp.isfile(landmarks_file):
            with open(landmarks_file, encoding='utf-8') as f:
                for line in f:
                    line = line.split('\t')

                    item_id = osp.splitext(line[0])[0]
                    objects = item_id.split('/')
                    if 1 < len(objects):
                        label_name = objects[0]
                        label = get_label_id(label_name)
                        if label is not None:
                            item_id = item_id[len(label_name) + 1:]
                    if item_id not in items:
                        items[item_id] = DatasetItem(id=item_id,
                                                     subset=self._subset,
                                                     image=osp.join(
                                                         self._images_dir,
                                                         line[0]))

                    annotations = items[item_id].annotations
                    annotations.append(
                        Points([float(p) for p in line[1:]], label=label))

        return items
Пример #19
0
    def _load_items(self, root_dir):
        items = {}

        image_dir = osp.join(root_dir, CelebaPath.IMAGES_DIR)

        if osp.isdir(image_dir):
            images = {
                osp.splitext(osp.relpath(p, image_dir))[0].replace('\\', '/'):
                p
                for p in find_images(image_dir, recursive=True)
            }
        else:
            images = {}

        label_categories = self._categories[AnnotationType.label]

        labels_path = osp.join(root_dir, CelebaPath.LABELS_FILE)
        if not osp.isfile(labels_path):
            raise DatasetImportError("File '%s': was not found" % labels_path)

        with open(labels_path, encoding='utf-8') as f:
            for line in f:
                item_id, item_ann = self.split_annotation(line)
                label_ids = [int(id) for id in item_ann]
                anno = []
                for label in label_ids:
                    while len(label_categories) <= label:
                        label_categories.add('class-%d' %
                                             len(label_categories))
                    anno.append(Label(label))

                items[item_id] = DatasetItem(id=item_id,
                                             image=images.get(item_id),
                                             annotations=anno)

        landmark_path = osp.join(root_dir, CelebaPath.LANDMARKS_FILE)
        if osp.isfile(landmark_path):
            with open(landmark_path, encoding='utf-8') as f:
                landmarks_number = int(f.readline().strip())

                point_cat = PointsCategories()
                for i, point_name in enumerate(f.readline().strip().split()):
                    point_cat.add(i, [point_name])
                self._categories[AnnotationType.points] = point_cat

                counter = 0
                for counter, line in enumerate(f):
                    item_id, item_ann = self.split_annotation(line)
                    landmarks = [float(id) for id in item_ann]

                    if len(landmarks) != len(point_cat):
                        raise DatasetImportError("File '%s', line %s: "
                            "points do not match the header of this file" % \
                            (landmark_path, line))

                    if item_id not in items:
                        raise DatasetImportError("File '%s', line %s: "
                            "for this item are not label in %s " % \
                            (landmark_path, line, CelebaPath.LABELS_FILE))

                    anno = items[item_id].annotations
                    label = anno[0].label
                    anno.append(Points(landmarks, label=label))

                if landmarks_number - 1 != counter:
                    raise DatasetImportError(
                        "File '%s': the number of "
                        "landmarks does not match the specified number "
                        "at the beginning of the file " % landmark_path)

        bbox_path = osp.join(root_dir, CelebaPath.BBOXES_FILE)
        if osp.isfile(bbox_path):
            with open(bbox_path, encoding='utf-8') as f:
                bboxes_number = int(f.readline().strip())

                if f.readline().strip() != CelebaPath.BBOXES_HEADER:
                    raise DatasetImportError("File '%s': the header "
                        "does not match the expected format '%s'" % \
                        (bbox_path, CelebaPath.BBOXES_HEADER))

                counter = 0
                for counter, line in enumerate(f):
                    item_id, item_ann = self.split_annotation(line)
                    bbox = [float(id) for id in item_ann]

                    if item_id not in items:
                        raise DatasetImportError("File '%s', line %s: "
                            "for this item are not label in %s " % \
                            (bbox_path, line, CelebaPath.LABELS_FILE))

                    anno = items[item_id].annotations
                    label = anno[0].label
                    anno.append(
                        Bbox(bbox[0], bbox[1], bbox[2], bbox[3], label=label))

                if bboxes_number - 1 != counter:
                    raise DatasetImportError(
                        "File '%s': the number of bounding "
                        "boxes does not match the specified number "
                        "at the beginning of the file " % bbox_path)

        attr_path = osp.join(root_dir, CelebaPath.ATTRS_FILE)
        if osp.isfile(attr_path):
            with open(attr_path, encoding='utf-8') as f:
                attr_number = int(f.readline().strip())
                attr_names = f.readline().split()

                counter = 0
                for counter, line in enumerate(f):
                    item_id, item_ann = self.split_annotation(line)
                    if len(attr_names) != len(item_ann):
                        raise DatasetImportError(
                            "File '%s', line %s: "
                            "the number of attributes "
                            "in the line does not match the number at the "
                            "beginning of the file " % (attr_path, line))

                    attrs = {
                        name: 0 < int(ann)
                        for name, ann in zip(attr_names, item_ann)
                    }

                    if item_id not in items:
                        items[item_id] = DatasetItem(id=item_id,
                                                     image=images.get(item_id))

                    items[item_id].attributes = attrs

                if attr_number - 1 != counter:
                    raise DatasetImportError(
                        "File %s: the number of items "
                        "with attributes does not match the specified number "
                        "at the beginning of the file " % attr_path)

        subset_path = osp.join(root_dir, CelebaPath.SUBSETS_FILE)
        if osp.isfile(subset_path):
            with open(subset_path, encoding='utf-8') as f:
                for line in f:
                    item_id, item_ann = self.split_annotation(line)
                    subset_id = item_ann[0]
                    subset = CelebaPath.SUBSETS[subset_id]

                    if item_id not in items:
                        items[item_id] = DatasetItem(id=item_id,
                                                     image=images.get(item_id))

                    items[item_id].subset = subset

                    if 'default' in self._subsets:
                        self._subsets.pop()
                    self._subsets.append(subset)

        return items
Пример #20
0
    def _load_items(self, path):
        items = {}

        images_dir = osp.join(self._dataset_dir, self._subset, LfwPath.IMAGES_DIR)
        if osp.isdir(images_dir):
            images = { osp.splitext(osp.relpath(p, images_dir))[0]: p
                for p in find_images(images_dir, recursive=True) }
        else:
            images = {}

        with open(path, encoding='utf-8') as f:
            for line in f:
                pair = line.strip().split('\t')
                if len(pair) == 3:
                    if pair[0] == '-':
                        image1 = pair[1]
                        image2 = pair[2]
                    else:
                        image1 = self.get_image_name(pair[0], pair[1])
                        image2 = self.get_image_name(pair[0], pair[2])
                    if image1 not in items:
                        items[image1] = DatasetItem(id=image1, subset=self._subset,
                            image=images.get(image1),
                            attributes={'positive_pairs': [], 'negative_pairs': []})
                    if image2 not in items:
                        items[image2] = DatasetItem(id=image2, subset=self._subset,
                            image=images.get(image2),
                            attributes={'positive_pairs': [], 'negative_pairs': []})

                    # pairs form a directed graph
                    items[image1].attributes['positive_pairs'].append(image2)
                elif len(pair) == 4:
                    if pair[0] == '-':
                        image1 = pair[1]
                    else:
                        image1 = self.get_image_name(pair[0], pair[1])
                    if pair[2] == '-':
                        image2 = pair[3]
                    else:
                        image2 = self.get_image_name(pair[2], pair[3])
                    if image1 not in items:
                        items[image1] = DatasetItem(id=image1, subset=self._subset,
                            image=images.get(image1),
                            attributes={'positive_pairs': [], 'negative_pairs': []})
                    if image2 not in items:
                        items[image2] = DatasetItem(id=image2, subset=self._subset,
                            image=images.get(image2),
                            attributes={'positive_pairs': [], 'negative_pairs': []})

                    # pairs form a directed graph
                    items[image1].attributes['negative_pairs'].append(image2)

        landmarks_file = osp.join(self._dataset_dir, self._subset,
            LfwPath.LANDMARKS_FILE)
        if osp.isfile(landmarks_file):
            with open(landmarks_file, encoding='utf-8') as f:
                for line in f:
                    line = line.split('\t')

                    item_id = osp.splitext(line[0])[0]
                    if item_id not in items:
                        items[item_id] = DatasetItem(id=item_id, subset=self._subset,
                            image=osp.join(images_dir, line[0]),
                            attributes={'positive_pairs': [], 'negative_pairs': []})

                    annotations = items[item_id].annotations
                    annotations.append(Points([float(p) for p in line[1:]]))

        return items
Пример #21
0
    def _load_items(self, subset):
        def _get_label(path):
            label_name = path.split('/')[0]
            label = None
            if label_name != VggFace2Path.IMAGES_DIR_NO_LABEL:
                label = \
                    self._categories[AnnotationType.label].find(label_name)[0]
            return label

        items = {}

        image_dir = osp.join(self._dataset_dir, subset)
        if osp.isdir(image_dir):
            images = {
                osp.splitext(osp.relpath(p, image_dir))[0].replace('\\', '/'):
                p
                for p in find_images(image_dir, recursive=True)
            }
        else:
            images = {}

        landmarks_path = osp.join(
            self._dataset_dir, VggFace2Path.ANNOTATION_DIR,
            VggFace2Path.LANDMARKS_FILE + subset + '.csv')
        if osp.isfile(landmarks_path):
            with open(landmarks_path, encoding='utf-8') as content:
                landmarks_table = list(csv.DictReader(content))
            for row in landmarks_table:
                item_id = row['NAME_ID']
                label = None
                if '/' in item_id:
                    label = _get_label(item_id)

                if item_id not in items:
                    items[item_id] = DatasetItem(id=item_id,
                                                 subset=subset,
                                                 image=images.get(
                                                     row['NAME_ID']))

                annotations = items[item_id].annotations
                if [a for a in annotations if a.type == AnnotationType.points]:
                    raise Exception("Item %s: an image can have only one "
                                    "set of landmarks" % item_id)

                if len([p
                        for p in row if row[p] == '']) == 0 and len(row) == 11:
                    annotations.append(
                        Points([float(row[p]) for p in row if p != 'NAME_ID'],
                               label=label))
                elif label is not None:
                    annotations.append(Label(label=label))

        bboxes_path = osp.join(self._dataset_dir, VggFace2Path.ANNOTATION_DIR,
                               VggFace2Path.BBOXES_FILE + subset + '.csv')
        if osp.isfile(bboxes_path):
            with open(bboxes_path, encoding='utf-8') as content:
                bboxes_table = list(csv.DictReader(content))
            for row in bboxes_table:
                item_id = row['NAME_ID']
                label = None
                if '/' in item_id:
                    label = _get_label(item_id)

                if item_id not in items:
                    items[item_id] = DatasetItem(id=item_id,
                                                 subset=subset,
                                                 image=images.get(
                                                     row['NAME_ID']))

                annotations = items[item_id].annotations
                if [a for a in annotations if a.type == AnnotationType.bbox]:
                    raise Exception("Item %s: an image can have only one "
                                    "bbox" % item_id)

                if len([p
                        for p in row if row[p] == '']) == 0 and len(row) == 5:
                    annotations.append(
                        Bbox(float(row['X']),
                             float(row['Y']),
                             float(row['W']),
                             float(row['H']),
                             label=label))
        return items
Пример #22
0
    def _load_items(self, path):
        labels_count = len(self._categories[AnnotationType.label].items)
        items = OrderedDict()

        if self._seq_info:
            for frame_id in range(self._seq_info['seqlength']):
                items[frame_id] = DatasetItem(
                    id=frame_id,
                    subset=self._subset,
                    image=Image(path=osp.join(
                        self._image_dir,
                        '%06d%s' % (frame_id, self._seq_info['imext'])),
                                size=(self._seq_info['imheight'],
                                      self._seq_info['imwidth'])))
        elif osp.isdir(self._image_dir):
            for p in find_images(self._image_dir):
                frame_id = int(
                    osp.splitext(osp.relpath(p, self._image_dir))[0])
                items[frame_id] = DatasetItem(id=frame_id,
                                              subset=self._subset,
                                              image=p)

        with open(path, newline='', encoding='utf-8') as csv_file:
            # NOTE: Different MOT files have different count of fields
            # (7, 9 or 10). This is handled by reader:
            # - all extra fields go to a separate field
            # - all unmet fields have None values
            for row in csv.DictReader(csv_file, fieldnames=MotPath.FIELDS):
                frame_id = int(row['frame_id'])
                item = items.get(frame_id)
                if item is None:
                    item = DatasetItem(id=frame_id, subset=self._subset)
                annotations = item.annotations

                x, y = float(row['x']), float(row['y'])
                w, h = float(row['w']), float(row['h'])
                label_id = row.get('class_id')
                if label_id and label_id != '-1':
                    label_id = int(label_id) - 1
                    assert label_id < labels_count, label_id
                else:
                    label_id = None

                attributes = {}

                # Annotations for detection task are not related to any track
                track_id = int(row['track_id'])
                if 0 < track_id:
                    attributes['track_id'] = track_id

                confidence = cast(row.get('confidence'), float, 1)
                visibility = cast(row.get('visibility'), float, 1)
                if self._is_gt:
                    attributes['visibility'] = visibility
                    attributes['occluded'] = \
                        visibility <= self._occlusion_threshold
                    attributes['ignored'] = confidence == 0
                else:
                    attributes['score'] = float(confidence)

                annotations.append(
                    Bbox(x, y, w, h, label=label_id, attributes=attributes))

                items[frame_id] = item
        return items
Пример #23
0
    def _load_items(self, root_dir):
        image_dir = osp.join(root_dir, SynthiaPath.IMAGES_DIR)
        if osp.isdir(image_dir):
            images = {
                osp.splitext(osp.relpath(p, image_dir))[0].replace('\\', '/'):
                p
                for p in find_images(image_dir, recursive=True)
            }
        else:
            images = {}

        items = {}

        inst_dir = osp.join(root_dir, SynthiaPath.LABELS_SEGM_DIR)
        if osp.isdir(inst_dir):
            gt_images = find_images(inst_dir, recursive=True)
            for gt_img in gt_images:
                item_id = osp.splitext(osp.relpath(gt_img,
                                                   inst_dir))[0].replace(
                                                       '\\', '/')

                anno = []
                labels_mask = load_image(gt_img, dtype=np.uint16)
                dynamic_objects = np.unique(labels_mask[:, :, 1])
                labels_mask = labels_mask[:, :, 2]
                segm_ids = np.unique(labels_mask)
                for segm_id in segm_ids:
                    attr = {'dynamic_object': False}
                    if segm_id != 0 and segm_id in dynamic_objects:
                        attr['dynamic_object'] = True
                    anno.append(
                        Mask(image=self._lazy_extract_mask(
                            labels_mask, segm_id),
                             label=segm_id,
                             attributes=attr))

                items[item_id] = DatasetItem(id=item_id,
                                             image=images[item_id],
                                             annotations=anno)

        elif osp.isdir(osp.join(root_dir, SynthiaPath.SEMANTIC_SEGM_DIR)):
            gt_dir = osp.join(root_dir, SynthiaPath.SEMANTIC_SEGM_DIR)
            gt_images = find_images(gt_dir, recursive=True)
            for gt_img in gt_images:
                item_id = osp.splitext(osp.relpath(gt_img, gt_dir))[0].replace(
                    '\\', '/')

                anno = []
                inverse_cls_colormap = \
                    self._categories[AnnotationType.mask].inverse_colormap
                color_mask = lazy_mask(gt_img, inverse_cls_colormap)
                color_mask = color_mask()
                classes = np.unique(color_mask)
                for label_id in classes:
                    anno.append(
                        Mask(image=self._lazy_extract_mask(
                            color_mask, label_id),
                             label=label_id))

                items[item_id] = DatasetItem(id=item_id,
                                             image=images[item_id],
                                             annotations=anno)

        return items
Пример #24
0
    def _load_items(self):
        items = {}

        image_dir = osp.join(self._path, KittiPath.IMAGES_DIR)
        image_path_by_id = {
            osp.splitext(osp.relpath(p, image_dir))[0]: p
            for p in find_images(image_dir, recursive=True)
        }

        segm_dir = osp.join(self._path, KittiPath.INSTANCES_DIR)
        if self._task == KittiTask.segmentation:
            for instances_path in find_images(segm_dir,
                                              exts=KittiPath.MASK_EXT,
                                              recursive=True):
                item_id = osp.splitext(osp.relpath(instances_path,
                                                   segm_dir))[0]
                anns = []

                instances_mask = load_image(instances_path, dtype=np.int32)
                segm_ids = np.unique(instances_mask)
                for segm_id in segm_ids:
                    semantic_id = segm_id >> 8
                    ann_id = int(segm_id % 256)
                    isCrowd = (ann_id == 0)
                    anns.append(
                        Mask(image=self._lazy_extract_mask(
                            instances_mask, segm_id),
                             label=semantic_id,
                             id=ann_id,
                             attributes={'is_crowd': isCrowd}))

                items[item_id] = DatasetItem(id=item_id,
                                             annotations=anns,
                                             image=image_path_by_id.pop(
                                                 item_id, None),
                                             subset=self._subset)

        det_dir = osp.join(self._path, KittiPath.LABELS_DIR)
        if self._task == KittiTask.detection:
            for labels_path in sorted(
                    glob.glob(osp.join(det_dir, '**', '*.txt'),
                              recursive=True)):
                item_id = osp.splitext(osp.relpath(labels_path, det_dir))[0]
                anns = []

                with open(labels_path, 'r', encoding='utf-8') as f:
                    lines = f.readlines()

                for line_idx, line in enumerate(lines):
                    line = line.split()
                    assert len(line) == 15 or len(line) == 16

                    x1, y1 = float(line[4]), float(line[5])
                    x2, y2 = float(line[6]), float(line[7])

                    attributes = {}
                    attributes['truncated'] = float(line[1]) != 0
                    attributes['occluded'] = int(line[2]) != 0

                    if len(line) == 16:
                        attributes['score'] = float(line[15])

                    label_id = self.categories()[AnnotationType.label].find(
                        line[0])[0]
                    if label_id is None:
                        label_id = self.categories()[AnnotationType.label].add(
                            line[0])

                    anns.append(
                        Bbox(
                            x=x1,
                            y=y1,
                            w=x2 - x1,
                            h=y2 - y1,
                            id=line_idx,
                            attributes=attributes,
                            label=label_id,
                        ))

                items[item_id] = DatasetItem(id=item_id,
                                             annotations=anns,
                                             image=image_path_by_id.pop(
                                                 item_id, None),
                                             subset=self._subset)

        for item_id, image_path in image_path_by_id.items():
            items[item_id] = DatasetItem(id=item_id,
                                         subset=self._subset,
                                         image=image_path)

        return items
Пример #25
0
    def _load_segmentation_items(self):
        items = {}

        image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)
        if osp.isdir(image_dir):
            images = {
                osp.splitext(osp.relpath(p, image_dir))[0]: p
                for p in find_images(image_dir, recursive=True)
            }
        else:
            images = {}

        for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):
            item_id = osp.splitext(osp.relpath(path, self._path))[0]
            item_id = item_id.replace('\\', '/')
            if item_id.endswith('_GT'):
                item_id = item_id[:-3]

            if item_id not in items:
                items[item_id] = DatasetItem(item_id,
                                             subset=self._subset,
                                             image=images.get(item_id))
            annotations = items[item_id].annotations

            colors = [(255, 255, 255)]
            chars = ['']
            centers = [0]
            groups = [0]
            group = 1
            number_in_group = 0
            with open(path, encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if line == '':
                        if number_in_group == 1:
                            groups[len(groups) - 1] = 0
                        else:
                            group += 1
                        number_in_group = 0
                        continue

                    objects = line.split()
                    if objects[0][0] == '#':
                        objects[0] = objects[0][1:]
                        objects[9] = '\" \"'
                        objects.pop()
                    if len(objects) != 10:
                        raise Exception(
                            "Line %s contains the wrong number "
                            "of arguments, e.g. '241 73 144 1 4 0 3 1 4 \"h\""
                            % line)

                    centers.append(objects[3] + ' ' + objects[4])
                    groups.append(group)
                    colors.append(tuple(int(o) for o in objects[:3]))
                    char = objects[9]
                    if char[0] == '\"' and char[-1] == '\"':
                        char = char[1:-1]
                    chars.append(char)
                    number_in_group += 1
            if number_in_group == 1:
                groups[len(groups) - 1] = 0

            mask_categories = MaskCategories(
                {i: colors[i]
                 for i in range(len(colors))})
            inverse_cls_colormap = mask_categories.inverse_colormap

            gt_path = osp.join(self._path, item_id + '_GT' + IcdarPath.GT_EXT)
            if osp.isfile(gt_path):
                # load mask through cache
                mask = lazy_mask(gt_path, inverse_cls_colormap)
                mask = mask()

                classes = np.unique(mask)
                for label_id in classes:
                    if label_id == 0:
                        continue
                    i = int(label_id)
                    annotations.append(
                        Mask(group=groups[i],
                             image=self._lazy_extract_mask(mask, label_id),
                             attributes={
                                 'index': i - 1,
                                 'color': ' '.join(str(p) for p in colors[i]),
                                 'text': chars[i],
                                 'center': centers[i]
                             }))
        return items
Пример #26
0
    def _load_localization_items(self):
        items = {}

        image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)
        if osp.isdir(image_dir):
            images = {
                osp.splitext(osp.relpath(p, image_dir))[0]: p
                for p in find_images(image_dir, recursive=True)
            }
        else:
            images = {}

        for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):
            item_id = osp.splitext(osp.relpath(path, self._path))[0]
            if osp.basename(item_id).startswith('gt_'):
                item_id = osp.join(osp.dirname(item_id),
                                   osp.basename(item_id)[3:])
            item_id = item_id.replace('\\', '/')

            if item_id not in items:
                items[item_id] = DatasetItem(item_id,
                                             subset=self._subset,
                                             image=images.get(item_id))
            annotations = items[item_id].annotations

            with open(path, encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    objects = line.split('\"')
                    if 1 < len(objects):
                        if len(objects) == 3:
                            text = objects[1]
                        else:
                            raise Exception("Line %s: unexpected number "
                                            "of quotes in filename" % line)
                    else:
                        text = ''
                    objects = objects[0].split()
                    if len(objects) == 1:
                        objects = objects[0].split(',')

                    if 8 <= len(objects):
                        points = [float(p) for p in objects[:8]]

                        attributes = {}
                        if 0 < len(text):
                            attributes['text'] = text
                        elif len(objects) == 9:
                            text = objects[8]
                            attributes['text'] = text

                        annotations.append(
                            Polygon(points, attributes=attributes))
                    elif 4 <= len(objects):
                        x = float(objects[0])
                        y = float(objects[1])
                        w = float(objects[2]) - x
                        h = float(objects[3]) - y

                        attributes = {}
                        if 0 < len(text):
                            attributes['text'] = text
                        elif len(objects) == 5:
                            text = objects[4]
                            attributes['text'] = text

                        annotations.append(
                            Bbox(x, y, w, h, attributes=attributes))
        return items
Пример #27
0
    def _load_items(self, subset):
        labels = self._categories.setdefault(AnnotationType.label,
                                             LabelCategories())
        path = osp.join(self._path, subset)

        images = [i for i in find_images(path, recursive=True)]

        for image_path in sorted(images):
            item_id = osp.splitext(osp.relpath(image_path, path))[0]

            if Ade20k2020Path.MASK_PATTERN.fullmatch(osp.basename(item_id)):
                continue

            item_annotations = []
            item_info = self._load_item_info(image_path)
            for item in item_info:
                label_idx = labels.find(item['label_name'])[0]
                if label_idx is None:
                    labels.add(item['label_name'])

            mask_path = osp.splitext(image_path)[0] + '_seg.png'
            max_part_level = max([p['part_level'] for p in item_info])
            for part_level in range(max_part_level + 1):
                if not osp.exists(mask_path):
                    log.warning('Can`t find part level %s mask for %s' \
                        % (part_level, image_path))
                    continue

                mask = lazy_image(mask_path, loader=self._load_class_mask)
                mask = CompiledMask(instance_mask=mask)

                classes = {(v['class_idx'], v['label_name'])
                           for v in item_info if v['part_level'] == part_level}

                for class_idx, label_name in classes:
                    label_id = labels.find(label_name)[0]
                    item_annotations.append(
                        Mask(label=label_id,
                             id=class_idx,
                             image=mask.lazy_extract(class_idx),
                             group=class_idx,
                             z_order=part_level))

                mask_path = osp.splitext(image_path)[0] \
                    + '_parts_%s.png' % (part_level + 1)

            for item in item_info:
                instance_path = osp.join(osp.dirname(image_path),
                                         item['instance_mask'])
                if not osp.isfile(instance_path):
                    log.warning('Can`t find instance mask: %s' % instance_path)
                    continue

                mask = lazy_image(instance_path,
                                  loader=self._load_instance_mask)
                mask = CompiledMask(instance_mask=mask)

                label_id = labels.find(item['label_name'])[0]
                instance_id = item['id']
                attributes = {k: True for k in item['attributes']}
                polygon_points = item['polygon_points']

                item_annotations.append(
                    Mask(label=label_id,
                         image=mask.lazy_extract(1),
                         id=instance_id,
                         attributes=attributes,
                         z_order=item['part_level'],
                         group=instance_id))

                if (len(item['polygon_points']) % 2 == 0 \
                        and 3 <= len(item['polygon_points']) // 2):
                    item_annotations.append(
                        Polygon(polygon_points,
                                label=label_id,
                                attributes=attributes,
                                id=instance_id,
                                z_order=item['part_level'],
                                group=instance_id))

            self._items.append(
                DatasetItem(item_id,
                            subset=subset,
                            image=image_path,
                            annotations=item_annotations))