def rename_identifiers(annotation_list, images_file):
     check_exists(images_file)
     with Path(images_file).open() as images_f:
         images_list = images_f.read().split('\n')
     for annotation, image in zip(annotation_list, images_list):
         annotation.identifier = image
     return annotation_list
Exemplo n.º 2
0
    def convert(self, devkit_dir):
        """

        Args:
            devkit_dir: path to VOC2012 devkit dir (e.g. VOCdevkit/VOC2012)

        """

        devkit_dir = Path(devkit_dir)
        check_exists(devkit_dir.as_posix())

        image_set_file = devkit_dir / 'ImageSets' / 'Segmentation' / 'val.txt'
        mask_dir = devkit_dir / 'SegmentationClass'
        image_dir = devkit_dir / 'JPEGImages'

        with image_set_file.open() as f:
            image_list = f.read().strip().split()
            annotation = [
                SegmentationAnnotation(
                    (image_dir / "{}.jpg".format(image)).as_posix(),
                    (mask_dir / "{}.png".format(image)).as_posix(),
                    mask_loader=GTMaskLoader.SCIPY) for image in image_list
            ]

        meta = {
            'label_map': dict(enumerate(_VOC_CLASSES)),
            'background_label': 0,
            'segmentation_colors': _SEGMENTATION_COLORS
        }

        return annotation, meta
Exemplo n.º 3
0
    def convert(self, annotation_file: str, label_start=1):
        annotation_file = pathlib.Path(annotation_file).absolute()
        check_exists(annotation_file.as_posix())
        annotations = []

        with open(annotation_file.as_posix(), 'r') as wider_annotation:
            image_annotations = wider_annotation.read()
            image_annotations = image_annotations.split('\n')
            image_ids = []
            for image_id, line in enumerate(image_annotations):
                if '.jpg' in line:
                    image_ids.append(image_id)

            for image_id in image_ids:
                identifier = image_annotations[image_id]
                bbox_count = image_annotations[image_id+1]
                bbox_lines = image_annotations[image_id+2:image_id+2+int(bbox_count)]
                x_mins, y_mins, x_maxs, y_maxs = [], [], [], []
                for bbox in bbox_lines:
                    x_min, y_min, width, height = bbox.split(' ')[0:4]
                    x_mins.append(int(x_min))
                    y_mins.append(int(y_min))
                    x_maxs.append(int(x_min) + int(width))
                    y_maxs.append(int(y_min) + int(height))
                annotations.append(DetectionAnnotation(identifier, [int(label_start)]*len(x_mins),
                                                       x_mins, y_mins, x_maxs, y_maxs))
        return annotations, {'label_map': {0: '__background__', int(label_start): 'face'}, 'background_label': 0}
Exemplo n.º 4
0
 def convert(self, landmarks_csv, bbox_csv=None):
     annotations = []
     check_exists(landmarks_csv)
     with open(landmarks_csv, "r") as file:
         reader = csv.DictReader(file)
         for row in reader:
             identifier = row['NAME_ID'] + '.jpg'
             x_values = np.array([
                 float(row["P1X"]),
                 float(row["P2X"]),
                 float(row["P3X"]),
                 float(row["P4X"]),
                 float(row["P5X"])
             ])
             y_values = np.array([
                 float(row["P1Y"]),
                 float(row["P2Y"]),
                 float(row["P3Y"]),
                 float(row["P4Y"]),
                 float(row["P5Y"])
             ])
             annotations.append(
                 PointRegressionAnnotation(identifier, x_values, y_values))
     if bbox_csv is not None:
         check_exists(bbox_csv)
         with open(bbox_csv) as file:
             reader = csv.DictReader(file)
             for index, row in enumerate(reader):
                 x_min = int(row["X"])
                 y_min = int(row["Y"])
                 x_max = x_min + int(row["W"])
                 y_max = y_min + int(row["H"])
                 annotations[index].metadata['rect'] = [
                     x_min, y_min, x_max, y_max
                 ]
     return annotations, {
         'label_map': {
             0: 'Left Eye',
             1: 'Right Eye',
             2: 'Nose',
             3: 'Left Mouth Corner',
             4: 'Right Mouth Corner'
         }
     }
Exemplo n.º 5
0
    def convert(self, pairs_file, train_file=None, landmarks_file=None):
        pairs_file = Path(pairs_file).absolute()
        landmarks_map = None
        if landmarks_file:
            landmarks_file = Path(landmarks_file).absolute()
            check_exists(landmarks_file)
            with landmarks_file.open('rt') as landmarks:
                landmarks_list = landmarks.read().split('\n')
                landmarks_map = {}
                for landmark_line in landmarks_list:
                    landmark_line = landmark_line.split('\t')
                    landmarks_map[landmark_line[0]] = [
                        int(point) for point in landmark_line[1:]
                    ]
        test_annotations = self.prepare_annotation(pairs_file, True,
                                                   landmarks_map)
        if train_file:
            train_file = Path(train_file).absolute()
            train_annotations = self.prepare_annotation(
                train_file, True, landmarks_map)
            test_annotations += train_annotations

        return test_annotations, {}
Exemplo n.º 6
0
    def convert(self, devkit_dir):
        """
        Args:
            devkit_dir: path to VOC2007 devkit dir (e.g. .../VOCdevkit/VOC2007)
        """
        devkit_dir = Path(devkit_dir)
        check_exists(devkit_dir.as_posix())

        annotation_directory = devkit_dir / 'Annotations'
        images_directory = devkit_dir / 'JPEGImages'
        self.image_root = images_directory.as_posix()

        check_exists(annotation_directory.as_posix())
        check_exists(images_directory.as_posix())

        detections = []

        image_set_file = devkit_dir / 'ImageSets' / 'Main' / 'test.txt'

        with image_set_file.open() as f:
            image_list = f.read().strip().split()

        for image in tqdm(image_list):
            file_path = annotation_directory / '{}.xml'.format(image)
            tree = ET.parse(file_path.as_posix())

            identifier = tree.find('.//filename').text
            image_path = images_directory / identifier

            if not image_path.is_file():
                raise FileNotFoundError("{}: {}".format(
                    os.strerror(errno.ENOENT), image_path))

            labels, x_mins, y_mins, x_maxs, y_maxs = [], [], [], [], []
            difficult_indices = []
            for entry in tree.getroot():
                if not entry.tag.startswith('object'):
                    continue

                bbox = entry.find('bndbox')
                difficult = int(entry.find('difficult').text)

                if difficult == 1:
                    difficult_indices.append(len(labels))

                labels.append(_CLASS_TO_IND[entry.find('name').text])
                x_mins.append(float(bbox.find('xmin').text) - 1)
                y_mins.append(float(bbox.find('ymin').text) - 1)
                x_maxs.append(float(bbox.find('xmax').text) - 1)
                y_maxs.append(float(bbox.find('ymax').text) - 1)

            image_annotation = DetectionAnnotation(identifier, labels, x_mins,
                                                   y_mins, x_maxs, y_maxs)
            image_annotation.metadata['difficult_boxes'] = difficult_indices

            detections.append(image_annotation)

        meta = {
            'label_map': dict(enumerate(_VOC_CLASSES)),
            'background_label': 0
        }

        return detections, meta
    def convert(self, file_path, image_names=None, label_start=1, background_label=None):
        """
        Args:
            file_path: path to file with data
        """
        check_exists(file_path)
        label_start = int(label_start)

        tree = ET.parse(file_path)
        labels_set = set()
        for frames in tree.getroot():
            for frame in frames:
                for annotation in frame:
                    label = annotation.find('type')
                    if label is None:
                        raise ValueError('"{}" contains detection without label'.format(file_path))

                    labels_set.add(label.text)

        labels_set = sorted(labels_set)
        class_to_ind = dict(zip(labels_set, list(range(label_start, len(labels_set) + label_start + 1))))
        label_map = {}
        for class_label, ind in class_to_ind.items():
            label_map[ind] = class_label

        annotations = []
        for frames in tree.getroot():
            for frame in frames:
                identifier = frame.tag + '.png'
                labels, x_mins, y_mins, x_maxs, y_maxs = [], [], [], [], []
                difficult_indices = []
                for annotation in frame:
                    label = annotation.find('type')
                    if label is None:
                        raise ValueError('"{}" contains detection without "{}"'.format(file_path, 'type'))

                    box = annotation.find('roi')
                    if box is None:
                        raise ValueError('"{}" contains detection without "{}"'.format(file_path, 'roi'))
                    box = list(map(float, box.text.split()))

                    is_ignored = annotation.find('is_ignored')
                    if is_ignored is not None and int(is_ignored.text) == 1:
                        difficult_indices.append(len(labels))

                    labels.append(class_to_ind[label.text])
                    x_mins.append(box[0])
                    y_mins.append(box[1])
                    x_maxs.append(box[0] + box[2])
                    y_maxs.append(box[1] + box[3])

                detection_annotation = DetectionAnnotation(identifier, labels, x_mins, y_mins, x_maxs, y_maxs)
                detection_annotation.metadata['difficult_boxes'] = difficult_indices
                annotations.append(detection_annotation)

        if image_names is not None:
            self.rename_identifiers(annotations, image_names)
        meta = {}
        self.add_background(label_map, meta, background_label)
        meta['label_map'] = label_map

        return annotations, meta