コード例 #1
0
    def _load_annotations(self, item_id):
        item_annotations = []

        class_mask = None
        segm_path = osp.join(self._dataset_dir, VocPath.SEGMENTATION_DIR,
                             item_id + VocPath.SEGM_EXT)
        if osp.isfile(segm_path):
            inverse_cls_colormap = \
                self._categories[AnnotationType.mask].inverse_colormap
            class_mask = lazy_mask(segm_path, inverse_cls_colormap)

        instances_mask = None
        inst_path = osp.join(self._dataset_dir, VocPath.INSTANCES_DIR,
                             item_id + VocPath.SEGM_EXT)
        if osp.isfile(inst_path):
            instances_mask = lazy_mask(inst_path, _inverse_inst_colormap)

        if instances_mask is not None:
            compiled_mask = CompiledMask(class_mask, instances_mask)

            if class_mask is not None:
                label_cat = self._categories[AnnotationType.label]
                instance_labels = compiled_mask.get_instance_labels(
                    class_count=len(label_cat.items))
            else:
                instance_labels = {
                    i: None
                    for i in range(compiled_mask.instance_count)
                }

            for instance_id, label_id in instance_labels.items():
                image = compiled_mask.lazy_extract(instance_id)

                attributes = {}
                if label_id is not None:
                    actions = {
                        a: False
                        for a in label_cat.items[label_id].attributes
                    }
                    attributes.update(actions)

                item_annotations.append(
                    Mask(image=image,
                         label=label_id,
                         attributes=attributes,
                         group=instance_id))
        elif class_mask is not None:
            log.warn("item '%s': has only class segmentation, "
                     "instance masks will not be available" % item_id)
            class_mask = class_mask()
            classes = np.unique(class_mask)
            for label_id in classes:
                image = self._lazy_extract_mask(class_mask, label_id)
                item_annotations.append(Mask(image=image, label=label_id))

        return item_annotations
コード例 #2
0
ファイル: extractor.py プロジェクト: openvinotoolkit/datumaro
    def _load_annotations(self, item_id):
        item_annotations = []

        class_mask = None
        segm_path = osp.join(self._dataset_dir, VocPath.SEGMENTATION_DIR,
                             item_id + VocPath.SEGM_EXT)
        if osp.isfile(segm_path):
            inverse_cls_colormap = \
                self._categories[AnnotationType.mask].inverse_colormap
            class_mask = lazy_mask(segm_path, inverse_cls_colormap)

        instances_mask = None
        inst_path = osp.join(self._dataset_dir, VocPath.INSTANCES_DIR,
                             item_id + VocPath.SEGM_EXT)
        if osp.isfile(inst_path):
            instances_mask = lazy_mask(inst_path, _inverse_inst_colormap)

        if instances_mask is not None:
            compiled_mask = CompiledMask(class_mask, instances_mask)

            label_cat = self._categories[AnnotationType.label]

            if class_mask is not None:
                instance_labels = compiled_mask.get_instance_labels()
            else:
                instance_labels = {
                    i: None
                    for i in range(compiled_mask.instance_count)
                }

            for instance_id, label_id in instance_labels.items():
                if len(label_cat) <= label_id:
                    raise Exception(
                        "Item %s: a mask has unexpected class number %s" %
                        (item_id, label_id))

                image = compiled_mask.lazy_extract(instance_id)

                item_annotations.append(
                    Mask(image=image, label=label_id, group=instance_id))
        elif class_mask is not None:
            log.warning("Item %s: only class segmentations available" %
                        item_id)

            class_mask = class_mask()
            classes = np.unique(class_mask)
            for label_id in classes:
                image = self._lazy_extract_mask(class_mask, label_id)
                item_annotations.append(Mask(image=image, label=label_id))

        return item_annotations
コード例 #3
0
    def _load_items(self, path):
        items = {}

        labels = self._categories[AnnotationType.label]._indices
        labels = {labels[label_name]: label_name for label_name in labels}

        with open(path, encoding='utf-8') as f:
            for line in f:
                image, gt = _parse_annotation_line(line)
                item_id = osp.splitext(osp.join(*image.split('/')[2:]))[0]
                image_path = osp.join(self._dataset_dir, image.lstrip('/'))

                item_annotations = []
                if gt is not None:
                    gt_path = osp.join(self._dataset_dir, gt)
                    mask = lazy_mask(
                        gt_path,
                        self._categories[AnnotationType.mask].inverse_colormap)
                    mask = mask()  # loading mask through cache

                    classes = np.unique(mask)
                    for label_id in classes:
                        if labels[label_id] in self._labels:
                            image = self._lazy_extract_mask(mask, label_id)
                            item_annotations.append(
                                Mask(image=image, label=label_id))

                items[item_id] = DatasetItem(id=item_id,
                                             subset=self._subset,
                                             image=image_path,
                                             annotations=item_annotations)

        return items
コード例 #4
0
ファイル: extractor.py プロジェクト: yohayonyon/cvat
    def _get_annotations(self, item, subset_name):
        annotations = []

        segm_ann = self._annotations[subset_name]
        cls_image_path = segm_ann.get(item)
        if cls_image_path and osp.isfile(cls_image_path):
            inverse_cls_colormap = \
                self._categories[AnnotationType.mask].inverse_colormap
            annotations.append(
                Mask(image=lazy_mask(cls_image_path, inverse_cls_colormap),
                     attributes={'class': True}))

        inst_ann = self._annotations[subset_name]
        inst_image_path = inst_ann.get(item)
        if inst_image_path and osp.isfile(inst_image_path):
            annotations.append(
                Mask(image=lazy_mask(inst_image_path, _inverse_inst_colormap),
                     attributes={'instances': True}))

        return annotations
コード例 #5
0
ファイル: camvid_format.py プロジェクト: Eric2370/datumaro-1
    def _load_items(self, path):
        items = {}

        labels = self._categories[AnnotationType.label]._indices
        labels = { labels[label_name]: label_name
            for label_name in labels }

        with open(path, encoding='utf-8') as f:
            for line in f:
                line = line.strip()
                objects = line.split('\"')
                if 1 < len(objects):
                    if len(objects) == 5:
                        objects[0] = objects[1]
                        objects[1] = objects[3]
                    else:
                        raise Exception("Line %s: unexpected number "
                            "of quotes in filename" % line)
                else:
                    objects = line.split()
                image = objects[0]
                item_id = osp.splitext(osp.join(*image.split('/')[2:]))[0]
                image_path = osp.join(self._dataset_dir, image.lstrip('/'))

                item_annotations = []
                if 1 < len(objects):
                    gt = objects[1]
                    gt_path = osp.join(self._dataset_dir, gt.lstrip('/'))
                    mask = lazy_mask(gt_path,
                        self._categories[AnnotationType.mask].inverse_colormap)
                    mask = mask() # loading mask through cache

                    classes = np.unique(mask)
                    for label_id in classes:
                        if labels[label_id] in self._labels:
                            image = self._lazy_extract_mask(mask, label_id)
                            item_annotations.append(
                                Mask(image=image, label=label_id))

                items[item_id] = DatasetItem(id=item_id, subset=self._subset,
                    image=image_path, annotations=item_annotations)

        return items
コード例 #6
0
 def _load_items(self, path):
     items = {}
     with open(path, encoding='utf-8') as f:
         for line in f:
             objects = line.split()
             image = objects[0]
             item_id = ('/'.join(
                 image.split('/')[2:]))[:-len(CamvidPath.IMAGE_EXT)]
             image_path = osp.join(self._dataset_dir,
                                   (image, image[1:])[image[0] == '/'])
             item_annotations = []
             if 1 < len(objects):
                 gt = objects[1]
                 gt_path = osp.join(self._dataset_dir,
                                    (gt, gt[1:])[gt[0] == '/'])
                 inverse_cls_colormap = \
                     self._categories[AnnotationType.mask].inverse_colormap
                 mask = lazy_mask(gt_path, inverse_cls_colormap)
                 # loading mask through cache
                 mask = mask()
                 classes = np.unique(mask)
                 labels = self._categories[AnnotationType.label]._indices
                 labels = {
                     labels[label_name]: label_name
                     for label_name in labels
                 }
                 for label_id in classes:
                     if labels[label_id] in self._labels:
                         image = self._lazy_extract_mask(mask, label_id)
                         item_annotations.append(
                             Mask(image=image, label=label_id))
             items[item_id] = DatasetItem(id=item_id,
                                          subset=self._subset,
                                          image=image_path,
                                          annotations=item_annotations)
     return items
コード例 #7
0
    def _load_segmentation_items(self):
        items = {}

        image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)
        if osp.isdir(image_dir):
            images = {
                osp.splitext(osp.relpath(p, image_dir))[0]: p
                for p in find_images(image_dir, recursive=True)
            }
        else:
            images = {}

        for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):
            item_id = osp.splitext(osp.relpath(path, self._path))[0]
            item_id = item_id.replace('\\', '/')
            if item_id.endswith('_GT'):
                item_id = item_id[:-3]

            if item_id not in items:
                items[item_id] = DatasetItem(item_id,
                                             subset=self._subset,
                                             image=images.get(item_id))
            annotations = items[item_id].annotations

            colors = [(255, 255, 255)]
            chars = ['']
            centers = [0]
            groups = [0]
            group = 1
            number_in_group = 0
            with open(path, encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if line == '':
                        if number_in_group == 1:
                            groups[len(groups) - 1] = 0
                        else:
                            group += 1
                        number_in_group = 0
                        continue

                    objects = line.split()
                    if objects[0][0] == '#':
                        objects[0] = objects[0][1:]
                        objects[9] = '\" \"'
                        objects.pop()
                    if len(objects) != 10:
                        raise Exception(
                            "Line %s contains the wrong number "
                            "of arguments, e.g. '241 73 144 1 4 0 3 1 4 \"h\""
                            % line)

                    centers.append(objects[3] + ' ' + objects[4])
                    groups.append(group)
                    colors.append(tuple(int(o) for o in objects[:3]))
                    char = objects[9]
                    if char[0] == '\"' and char[-1] == '\"':
                        char = char[1:-1]
                    chars.append(char)
                    number_in_group += 1
            if number_in_group == 1:
                groups[len(groups) - 1] = 0

            mask_categories = MaskCategories(
                {i: colors[i]
                 for i in range(len(colors))})
            inverse_cls_colormap = mask_categories.inverse_colormap

            gt_path = osp.join(self._path, item_id + '_GT' + IcdarPath.GT_EXT)
            if osp.isfile(gt_path):
                # load mask through cache
                mask = lazy_mask(gt_path, inverse_cls_colormap)
                mask = mask()

                classes = np.unique(mask)
                for label_id in classes:
                    if label_id == 0:
                        continue
                    i = int(label_id)
                    annotations.append(
                        Mask(group=groups[i],
                             image=self._lazy_extract_mask(mask, label_id),
                             attributes={
                                 'index': i - 1,
                                 'color': ' '.join(str(p) for p in colors[i]),
                                 'text': chars[i],
                                 'center': centers[i]
                             }))
        return items
コード例 #8
0
    def _load_items(self, root_dir):
        image_dir = osp.join(root_dir, SynthiaPath.IMAGES_DIR)
        if osp.isdir(image_dir):
            images = {
                osp.splitext(osp.relpath(p, image_dir))[0].replace('\\', '/'):
                p
                for p in find_images(image_dir, recursive=True)
            }
        else:
            images = {}

        items = {}

        inst_dir = osp.join(root_dir, SynthiaPath.LABELS_SEGM_DIR)
        if osp.isdir(inst_dir):
            gt_images = find_images(inst_dir, recursive=True)
            for gt_img in gt_images:
                item_id = osp.splitext(osp.relpath(gt_img,
                                                   inst_dir))[0].replace(
                                                       '\\', '/')

                anno = []
                labels_mask = load_image(gt_img, dtype=np.uint16)
                dynamic_objects = np.unique(labels_mask[:, :, 1])
                labels_mask = labels_mask[:, :, 2]
                segm_ids = np.unique(labels_mask)
                for segm_id in segm_ids:
                    attr = {'dynamic_object': False}
                    if segm_id != 0 and segm_id in dynamic_objects:
                        attr['dynamic_object'] = True
                    anno.append(
                        Mask(image=self._lazy_extract_mask(
                            labels_mask, segm_id),
                             label=segm_id,
                             attributes=attr))

                items[item_id] = DatasetItem(id=item_id,
                                             image=images[item_id],
                                             annotations=anno)

        elif osp.isdir(osp.join(root_dir, SynthiaPath.SEMANTIC_SEGM_DIR)):
            gt_dir = osp.join(root_dir, SynthiaPath.SEMANTIC_SEGM_DIR)
            gt_images = find_images(gt_dir, recursive=True)
            for gt_img in gt_images:
                item_id = osp.splitext(osp.relpath(gt_img, gt_dir))[0].replace(
                    '\\', '/')

                anno = []
                inverse_cls_colormap = \
                    self._categories[AnnotationType.mask].inverse_colormap
                color_mask = lazy_mask(gt_img, inverse_cls_colormap)
                color_mask = color_mask()
                classes = np.unique(color_mask)
                for label_id in classes:
                    anno.append(
                        Mask(image=self._lazy_extract_mask(
                            color_mask, label_id),
                             label=label_id))

                items[item_id] = DatasetItem(id=item_id,
                                             image=images[item_id],
                                             annotations=anno)

        return items
コード例 #9
0
    def _load_segmentation_items(self):
        items = {}

        for path in glob(osp.join(self._path, '*.txt')):
            item_id = osp.splitext(osp.basename(path))[0]
            if item_id.endswith('_GT'):
                item_id = item_id[:-3]
            image_path = osp.join(self._path, IcdarPath.IMAGES_DIR,
                                  item_id + IcdarPath.IMAGE_EXT)
            if item_id not in items:
                items[item_id] = DatasetItem(item_id,
                                             subset=self._subset,
                                             image=image_path)
            annotations = items[item_id].annotations

            colors = [(255, 255, 255)]
            chars = ['']
            centers = [0]
            groups = [0]
            group = 1
            number_in_group = 0
            with open(path, encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if line == '':
                        if number_in_group == 1:
                            groups[len(groups) - 1] = 0
                        else:
                            group += 1
                        number_in_group = 0
                        continue

                    objects = line.split()
                    if objects[0][0] == '#':
                        objects[0] = objects[0][1:]
                        objects[9] = '\" \"'
                        objects.pop()
                    if len(objects) != 10:
                        continue

                    centers.append(objects[3] + ' ' + objects[4])
                    groups.append(group)
                    colors.append(tuple(int(o) for o in objects[:3]))
                    char = objects[9]
                    if char[0] == '\"' and char[-1] == '\"':
                        char = char[1:-1]
                    chars.append(char)
                    number_in_group += 1
            if number_in_group == 1:
                groups[len(groups) - 1] = 0

            mask_categories = MaskCategories(
                {i: colors[i]
                 for i in range(len(colors))})
            inverse_cls_colormap = mask_categories.inverse_colormap

            gt_path = osp.join(self._path, item_id + '_GT' + IcdarPath.GT_EXT)
            if osp.isfile(gt_path):
                # load mask through cache
                mask = lazy_mask(gt_path, inverse_cls_colormap)
                mask = mask()

                classes = np.unique(mask)
                for label_id in classes:
                    if label_id == 0:
                        continue
                    i = int(label_id)
                    annotations.append(
                        Mask(group=groups[i],
                             image=self._lazy_extract_mask(mask, label_id),
                             attributes={
                                 'index': i - 1,
                                 'color': ' '.join(str(p) for p in colors[i]),
                                 'text': chars[i],
                                 'center': centers[i]
                             }))
        return items
コード例 #10
0
ファイル: voc.py プロジェクト: benhoff/cvat
    def _get_annotations(self, item):
        item_annotations = []

        if self._task is VocTask.segmentation:
            segm_path = osp.join(self._path, VocPath.SEGMENTATION_DIR,
                                 item + VocPath.SEGM_EXT)
            if osp.isfile(segm_path):
                inverse_cls_colormap = \
                    self._categories[AnnotationType.mask].inverse_colormap
                item_annotations.append(
                    MaskObject(image=lazy_mask(segm_path,
                                               inverse_cls_colormap),
                               attributes={'class': True}))

            inst_path = osp.join(self._path, VocPath.INSTANCES_DIR,
                                 item + VocPath.SEGM_EXT)
            if osp.isfile(inst_path):
                item_annotations.append(
                    MaskObject(image=lazy_mask(inst_path,
                                               _inverse_inst_colormap),
                               attributes={'instances': True}))

        cls_annotations = self._annotations.get(VocTask.classification)
        if cls_annotations is not None and \
           self._task is VocTask.classification:
            item_labels = cls_annotations.get(item)
            if item_labels is not None:
                for label in item_labels:
                    label_id = self._get_label_id(VocLabel(label).name)
                    item_annotations.append(LabelObject(label_id))

        det_annotations = self._annotations.get(VocTask.detection)
        if det_annotations is not None:
            det_annotations = det_annotations.get(item)
        if det_annotations is not None:
            root_elem = ET.fromstring(det_annotations)

            for obj_id, object_elem in enumerate(root_elem.findall('object')):
                attributes = {}
                group = None

                obj_label_id = None
                label_elem = object_elem.find('name')
                if label_elem is not None:
                    obj_label_id = self._get_label_id(label_elem.text)

                obj_bbox = self._parse_bbox(object_elem)

                if obj_label_id is None or obj_bbox is None:
                    continue

                difficult_elem = object_elem.find('difficult')
                if difficult_elem is not None:
                    attributes['difficult'] = (difficult_elem.text == '1')

                truncated_elem = object_elem.find('truncated')
                if truncated_elem is not None:
                    attributes['truncated'] = (truncated_elem.text == '1')

                occluded_elem = object_elem.find('occluded')
                if occluded_elem is not None:
                    attributes['occluded'] = (occluded_elem.text == '1')

                pose_elem = object_elem.find('pose')
                if pose_elem is not None:
                    attributes['pose'] = pose_elem.text

                point_elem = object_elem.find('point')
                if point_elem is not None:
                    point_x = point_elem.find('x')
                    point_y = point_elem.find('y')
                    point = [float(point_x.text), float(point_y.text)]
                    attributes['point'] = point

                actions_elem = object_elem.find('actions')
                if actions_elem is not None and \
                   self._task is VocTask.action_classification:
                    for action in VocAction:
                        action_elem = actions_elem.find(action.name)
                        if action_elem is None or action_elem.text != '1':
                            continue

                        act_label_id = self._get_label_id(action.name)
                        assert group in [None, obj_id]
                        group = obj_id
                        item_annotations.append(
                            LabelObject(act_label_id, group=obj_id))

                if self._task is VocTask.person_layout:
                    for part_elem in object_elem.findall('part'):
                        part = part_elem.find('name').text
                        part_label_id = self._get_label_id(part)
                        bbox = self._parse_bbox(part_elem)
                        group = obj_id
                        item_annotations.append(
                            BboxObject(*bbox,
                                       label=part_label_id,
                                       group=obj_id))

                if self._task in [
                        VocTask.action_classification, VocTask.person_layout
                ]:
                    if group is None:
                        continue

                item_annotations.append(
                    BboxObject(*obj_bbox,
                               label=obj_label_id,
                               attributes=attributes,
                               id=obj_id,
                               group=group))

        return item_annotations
コード例 #11
0
ファイル: extractor.py プロジェクト: yohayonyon/cvat
    def _get_annotations(self, item_id):
        item_annotations = []

        if self._task is VocTask.segmentation:
            class_mask = None
            segm_path = osp.join(self._path, VocPath.SEGMENTATION_DIR,
                                 item_id + VocPath.SEGM_EXT)
            if osp.isfile(segm_path):
                inverse_cls_colormap = \
                    self._categories[AnnotationType.mask].inverse_colormap
                class_mask = lazy_mask(segm_path, inverse_cls_colormap)

            instances_mask = None
            inst_path = osp.join(self._path, VocPath.INSTANCES_DIR,
                                 item_id + VocPath.SEGM_EXT)
            if osp.isfile(inst_path):
                instances_mask = lazy_mask(inst_path, _inverse_inst_colormap)

            if instances_mask is not None:
                compiled_mask = CompiledMask(class_mask, instances_mask)

                if class_mask is not None:
                    label_cat = self._categories[AnnotationType.label]
                    instance_labels = compiled_mask.get_instance_labels(
                        class_count=len(label_cat.items))
                else:
                    instance_labels = {
                        i: None
                        for i in range(compiled_mask.instance_count)
                    }

                for instance_id, label_id in instance_labels.items():
                    image = compiled_mask.lazy_extract(instance_id)

                    attributes = dict()
                    if label_id is not None:
                        actions = {
                            a: False
                            for a in label_cat.items[label_id].attributes
                        }
                        attributes.update(actions)

                    item_annotations.append(
                        Mask(image=image,
                             label=label_id,
                             attributes=attributes,
                             group=instance_id))
            elif class_mask is not None:
                log.warn("item '%s': has only class segmentation, "
                         "instance masks will not be available" % item_id)
                classes = class_mask.image.unique()
                for label_id in classes:
                    image = self._lazy_extract_mask(class_mask, label_id)
                    item_annotations.append(Mask(image=image, label=label_id))

        cls_annotations = self._annotations.get(VocTask.classification)
        if cls_annotations is not None and \
                self._task is VocTask.classification:
            item_labels = cls_annotations.get(item_id)
            if item_labels is not None:
                for label_id in item_labels:
                    item_annotations.append(Label(label_id))

        det_annotations = self._annotations.get(VocTask.detection)
        if det_annotations is not None:
            det_annotations = det_annotations.get(item_id)
        if det_annotations is not None:
            root_elem = ET.fromstring(det_annotations)

            for obj_id, object_elem in enumerate(root_elem.findall('object')):
                obj_id += 1
                attributes = {}
                group = obj_id

                obj_label_id = None
                label_elem = object_elem.find('name')
                if label_elem is not None:
                    obj_label_id = self._get_label_id(label_elem.text)

                obj_bbox = self._parse_bbox(object_elem)

                if obj_label_id is None or obj_bbox is None:
                    continue

                difficult_elem = object_elem.find('difficult')
                attributes['difficult'] = difficult_elem is not None and \
                    difficult_elem.text == '1'

                truncated_elem = object_elem.find('truncated')
                attributes['truncated'] = truncated_elem is not None and \
                    truncated_elem.text == '1'

                occluded_elem = object_elem.find('occluded')
                attributes['occluded'] = occluded_elem is not None and \
                    occluded_elem.text == '1'

                pose_elem = object_elem.find('pose')
                if pose_elem is not None:
                    attributes['pose'] = pose_elem.text

                point_elem = object_elem.find('point')
                if point_elem is not None:
                    point_x = point_elem.find('x')
                    point_y = point_elem.find('y')
                    point = [float(point_x.text), float(point_y.text)]
                    attributes['point'] = point

                actions_elem = object_elem.find('actions')
                actions = {a: False
                    for a in self._categories[AnnotationType.label] \
                        .items[obj_label_id].attributes}
                if actions_elem is not None:
                    for action_elem in actions_elem:
                        actions[action_elem.tag] = (action_elem.text == '1')
                for action, present in actions.items():
                    attributes[action] = present

                has_parts = False
                for part_elem in object_elem.findall('part'):
                    part = part_elem.find('name').text
                    part_label_id = self._get_label_id(part)
                    part_bbox = self._parse_bbox(part_elem)

                    if self._task is not VocTask.person_layout:
                        break
                    if part_bbox is None:
                        continue
                    has_parts = True
                    item_annotations.append(
                        Bbox(*part_bbox, label=part_label_id, group=group))

                if self._task is VocTask.person_layout and not has_parts:
                    continue
                if self._task is VocTask.action_classification and not actions:
                    continue

                item_annotations.append(
                    Bbox(*obj_bbox,
                         label=obj_label_id,
                         attributes=attributes,
                         id=obj_id,
                         group=group))

        return item_annotations
コード例 #12
0
ファイル: datumaro.py プロジェクト: benhoff/cvat
    def _load_annotations(self, item):
        parsed = item['annotations']
        loaded = []

        for ann in parsed:
            ann_id = ann.get('id')
            ann_type = AnnotationType[ann['type']]
            attributes = ann.get('attributes')
            group = ann.get('group')

            if ann_type == AnnotationType.label:
                label_id = ann.get('label_id')
                loaded.append(LabelObject(label=label_id,
                    id=ann_id, attributes=attributes, group=group))

            elif ann_type == AnnotationType.mask:
                label_id = ann.get('label_id')
                mask_id = str(ann.get('mask_id'))

                mask_path = osp.join(self._path, DatumaroPath.ANNOTATIONS_DIR,
                    DatumaroPath.MASKS_DIR, mask_id + DatumaroPath.MASK_EXT)
                mask = None

                if osp.isfile(mask_path):
                    mask_cat = self._categories.get(AnnotationType.mask)
                    if mask_cat is not None:
                        mask = lazy_mask(mask_path, mask_cat.inverse_colormap)
                    else:
                        mask = lazy_image(mask_path)

                loaded.append(MaskObject(label=label_id, image=mask,
                    id=ann_id, attributes=attributes, group=group))

            elif ann_type == AnnotationType.polyline:
                label_id = ann.get('label_id')
                points = ann.get('points')
                loaded.append(PolyLineObject(points, label=label_id,
                    id=ann_id, attributes=attributes, group=group))

            elif ann_type == AnnotationType.polygon:
                label_id = ann.get('label_id')
                points = ann.get('points')
                loaded.append(PolygonObject(points, label=label_id,
                    id=ann_id, attributes=attributes, group=group))

            elif ann_type == AnnotationType.bbox:
                label_id = ann.get('label_id')
                x, y, w, h = ann.get('bbox')
                loaded.append(BboxObject(x, y, w, h, label=label_id,
                    id=ann_id, attributes=attributes, group=group))

            elif ann_type == AnnotationType.points:
                label_id = ann.get('label_id')
                points = ann.get('points')
                loaded.append(PointsObject(points, label=label_id,
                    id=ann_id, attributes=attributes, group=group))

            elif ann_type == AnnotationType.caption:
                caption = ann.get('caption')
                loaded.append(CaptionObject(caption,
                    id=ann_id, attributes=attributes, group=group))

            else:
                raise NotImplementedError()

        return loaded