Пример #1
0
    def convert(self, devkit_dir, has_background=True):
        """
        Args:
            devkit_dir: path to VOC2007 devkit dir (e.g. .../VOCdevkit/VOC2007)
            has_background: allows to add background label to label map
        """
        if isinstance(has_background, str):
            has_background = string_to_bool(has_background)

        class_to_ind = prepare_detection_labels(has_background)
        devkit_dir = get_path(devkit_dir, is_directory=True)

        annotation_directory = get_path(devkit_dir / 'Annotations',
                                        is_directory=True)
        images_directory = get_path(devkit_dir / 'JPEGImages',
                                    is_directory=True)

        detections = []
        image_set_file = devkit_dir / 'ImageSets' / 'Main' / 'test.txt'
        for image in tqdm(read_txt(image_set_file, sep=None)):
            file_path = annotation_directory / '{}.xml'.format(image)
            tree = ET.parse(str(file_path))

            identifier = tree.find('.//filename').text
            image_path = images_directory / identifier

            if not image_path.is_file():
                raise FileNotFoundError("{}: {}".format(
                    os.strerror(errno.ENOENT), image_path))

            labels, x_mins, y_mins, x_maxs, y_maxs = [], [], [], [], []
            difficult_indices = []
            for entry in tree.getroot():
                if not entry.tag.startswith('object'):
                    continue

                bbox = entry.find('bndbox')
                difficult = int(entry.find('difficult').text)

                if difficult == 1:
                    difficult_indices.append(len(labels))

                labels.append(class_to_ind[entry.find('name').text])
                x_mins.append(float(bbox.find('xmin').text) - 1)
                y_mins.append(float(bbox.find('ymin').text) - 1)
                x_maxs.append(float(bbox.find('xmax').text) - 1)
                y_maxs.append(float(bbox.find('ymax').text) - 1)

            image_annotation = DetectionAnnotation(identifier, labels, x_mins,
                                                   y_mins, x_maxs, y_maxs)
            image_annotation.metadata['difficult_boxes'] = difficult_indices

            detections.append(image_annotation)

        meta = {'label_map': reverse_label_map(class_to_ind)}
        if has_background:
            meta['background_label'] = 0

        return detections, meta
Пример #2
0
    def convert(self,
                check_content=False,
                progress_callback=None,
                progress_interval=100,
                **kwargs):
        """
        This method is executed automatically when convert.py is started.
        All arguments are automatically got from command line arguments or config file in method configure

        Returns:
            annotations: list of annotation representation objects.
            meta: dictionary with additional dataset level metadata (if provided)
        """

        dataset_directory = get_path(self.data_dir, is_directory=True)

        # read and convert annotation
        image_list_file = dataset_directory / 'labels/val_list_small.txt'
        images_dir = dataset_directory / 'images'

        image_names = []
        labels = []
        annotations = []
        with open(image_list_file, "r") as f:
            for line in f:
                items = line.split()
                image_name = items[0]
                label = items[1:]
                label = [int(i) for i in label]
                annotations.append(
                    MultiLabelRecognitionAnnotation(image_name, label))
                image_names.append(image_name)
                labels.append(label)
        return ConverterReturn(annotations, self.generate_meta(CLASS_NAMES),
                               None)
Пример #3
0
    def convert(self, devkit_dir):
        """
        Args:
            devkit_dir: path to VOC2012 devkit dir (e.g. VOCdevkit/VOC2012)
        """
        devkit_dir = get_path(devkit_dir, is_directory=True)

        image_set_file = devkit_dir / 'ImageSets' / 'Segmentation' / 'test.txt'
        mask_dir = Path('SegmentationClass')
        image_dir = Path('JPEGImages')

        annotations = []
        for image in read_txt(image_set_file):
            annotation = SegmentationAnnotation(
                str(image_dir / '{}.jpg'.format(image)),
                str(mask_dir / '{}.png'.format(image)),
                mask_loader=GTMaskLoader.SCIPY)

            annotations.append(annotation)

        meta = {
            'label_map': dict(enumerate(_VOC_CLASSES_SEGMENTATION)),
            'background_label': 0,
            'segmentation_colors': _SEGMENTATION_COLORS
        }

        return annotations, meta
Пример #4
0
def mock_filesystem(hierarchy: List[str]):
    with TemporaryDirectory() as prefix:
        for entry in hierarchy:
            path = Path(prefix) / entry
            if entry.endswith("/"):
                path.mkdir(parents=True, exist_ok=True)
            else:
                parent = path.parent
                if parent != Path("."):
                    parent.mkdir(parents=True, exist_ok=True)
                # create file
                path.open('w').close()

        yield get_path(prefix, is_directory=True)
Пример #5
0
    def convert(self, data_dir):
        data_dir = get_path(data_dir, is_directory=True).resolve()

        gallery = data_dir / 'bounding_box_test'
        query = data_dir / 'query'

        check_dirs((gallery, query), data_dir)
        gallery_images, gallery_pids = read_directory(
            gallery, query=False, image_pattern=MARKET_IMAGE_PATTERN)
        query_images, query_pids = read_directory(
            query, query=True, image_pattern=MARKET_IMAGE_PATTERN)
        annotation = gallery_images + query_images

        meta = {'num_identities': len(gallery_pids | query_pids)}

        return annotation, meta
Пример #6
0
    def convert(self, wider_annotation: str, label_start=1):
        """
        Args:
            wider_annotation: path to wider validation file
            label_start: start index for labels
        """
        wider_annotation = get_path(wider_annotation)

        image_annotations = read_txt(wider_annotation)
        image_ids = []
        for image_id, line in enumerate(image_annotations):
            if '.jpg' in line:
                image_ids.append(image_id)

        annotations = []
        for image_id in image_ids:
            identifier = image_annotations[image_id]
            bbox_count = image_annotations[image_id + 1]
            bbox_lines = image_annotations[image_id + 2:image_id + 2 +
                                           int(bbox_count)]

            x_mins, y_mins, x_maxs, y_maxs = [], [], [], []
            for bbox in bbox_lines:
                x_min, y_min, x_max, y_max = convert_bboxes_xywh_to_x1y1x2y2(
                    *(map(float, (bbox.split(' ')[0:4]))))
                x_mins.append(x_min)
                y_mins.append(y_min)
                x_maxs.append(x_max)
                y_maxs.append(y_max)

            annotations.append(
                DetectionAnnotation(identifier,
                                    [int(label_start)] * len(x_mins), x_mins,
                                    y_mins, x_maxs, y_maxs))

        return annotations, {
            'label_map': {
                0: '__background__',
                int(label_start): 'face'
            },
            'background_label': 0
        }
Пример #7
0
    def convert(self, dataset_directory: str):
        """
        This method is executed automatically when convert.py is started.
        All arguments are automatically forwarded from command line arguments

        Args:
            dataset_directory: path to sample dataset

        Returns:
            annotations: list of annotation representation objects.
            meta: dictionary with additional dataset level metadata

        """
        dataset_directory = get_path(dataset_directory, is_directory=True)

        # read and convert annotation
        labels = self._read_labels(dataset_directory / 'labels.txt')
        annotations = self._convert_annotations(dataset_directory / 'test', labels)

        # convert label list to label map
        label_map = {i: labels[i] for i in range(len(labels))}
        metadata = {'label_map': label_map}

        return annotations, metadata