Exemple #1
0
    def get_target(self, index: int) -> Dict[str, Any]:
        """
        Returns the instance segmentation target
        """
        target = self.parse_json(index)

        annotations = []
        for annotation in target["annotations"]:
            if "polygon" not in annotation and "complex_polygon" not in annotation:
                print(
                    f"Warning: missing polygon in annotation {self.annotations_path[index]}"
                )
            # Extract the sequences of coordinates from the polygon annotation
            annotation_type: str = "polygon" if "polygon" in annotation else "complex_polygon"
            sequences = convert_polygons_to_sequences(
                annotation[annotation_type]["path"],
                height=target["height"],
                width=target["width"],
            )
            # Compute the bbox of the polygon
            x_coords = [s[0::2] for s in sequences]
            y_coords = [s[1::2] for s in sequences]
            min_x = np.min([np.min(x_coord) for x_coord in x_coords])
            min_y = np.min([np.min(y_coord) for y_coord in y_coords])
            max_x = np.max([np.max(x_coord) for x_coord in x_coords])
            max_y = np.max([np.max(y_coord) for y_coord in y_coords])
            w = max_x - min_x + 1
            h = max_y - min_y + 1
            # Compute the area of the polygon
            # TODO fix with addictive/subtractive paths in complex polygons
            poly_area = np.sum([
                polygon_area(x_coord, y_coord)
                for x_coord, y_coord in zip(x_coords, y_coords)
            ])

            # Create and append the new entry for this annotation
            annotations.append({
                "category_id":
                self.classes.index(annotation["name"]),
                "segmentation":
                sequences,
                "bbox": [min_x, min_y, w, h],
                "area":
                poly_area,
            })
        target["annotations"] = annotations

        return target
Exemple #2
0
    def __getitem__(self, index: int):
        """See superclass for documentation

        Notes
        -----
        The return value is a dict with the following fields:
            image_id : int
                Index of the image inside the dataset
            image_path: str
                The path to the image on the file system
            mask : tensor(H, W)
                Segmentation mask where each pixel encodes a class label
        """
        img = load_pil_image(self.images_path[index])
        target = self.parse_json(index)

        annotations = []
        for obj in target["annotations"]:
            sequences = convert_polygons_to_sequences(
                obj["polygon"]["path"],
                height=target["height"],
                width=target["width"],
            )
            # Discard polygons with less than three points
            sequences[:] = [s for s in sequences if len(s) >= 6]
            if not sequences:
                continue
            annotations.append({
                "category_id": self.classes.index(obj["name"]),
                "segmentation": sequences
            })
        target["annotations"] = annotations

        img, target = self.convert_polygons(img, target)
        if self.transform is not None:
            img, target = self.transform(img, target)

        return img, target
Exemple #3
0
    def get_target(self, index: int) -> Dict[str, Any]:
        """
        Returns the semantic segmentation target
        """
        target = self.parse_json(index)

        annotations = []
        for obj in target["annotations"]:
            sequences = convert_polygons_to_sequences(
                obj["polygon"]["path"],
                height=target["height"],
                width=target["width"],
            )
            # Discard polygons with less than three points
            sequences[:] = [s for s in sequences if len(s) >= 6]
            if not sequences:
                continue
            annotations.append({
                "category_id": self.classes.index(obj["name"]),
                "segmentation": sequences
            })
        target["annotations"] = annotations

        return target
Exemple #4
0
def export(annotation_files: Generator[dt.AnnotationFile, None, None],
           output_dir: Path,
           mode: str = "grey"):
    masks_dir = output_dir / "masks"
    masks_dir.mkdir(exist_ok=True, parents=True)
    annotation_files = list(annotation_files)

    categories = extract_categories(annotation_files)
    N = len(categories)
    if mode == "index":
        if N > 254:
            raise ValueError("maximum number of classes supported: 254")
        palette = {c: i for i, c in enumerate(categories)}
    elif mode == "grey":
        if N > 254:
            raise ValueError("maximum number of classes supported: 254")
        palette = {c: int(i * 255 / (N - 1)) for i, c in enumerate(categories)}
    elif mode == "rgb":
        if N > 360:
            raise ValueError("maximum number of classes supported: 360")
        palette = {c: i for i, c in enumerate(categories)}
        HSV_colors = [(x / N, 0.8, 1.0) for x in range(N - 1)
                      ]  # Generate HSV colors for all classes except for BG
        RGB_colors = list(
            map(lambda x: [int(e * 255) for e in colorsys.hsv_to_rgb(*x)],
                HSV_colors))
        RGB_colors.insert(
            0, [0, 0, 0])  # Now we add BG class with [0 0 0] RGB value
        palette_rgb = {c: rgb for c, rgb in zip(categories, RGB_colors)}
        RGB_colors = [c for e in RGB_colors for c in e]

    for annotation_file in get_progress_bar(list(annotation_files),
                                            "Processing annotations"):
        image_id = os.path.splitext(annotation_file.filename)[0]
        outfile = masks_dir / f"{image_id}.png"
        outfile.parent.mkdir(parents=True, exist_ok=True)
        height = annotation_file.image_height
        width = annotation_file.image_width
        mask = np.zeros((height, width)).astype(np.uint8)
        annotations = [
            a for a in annotation_file.annotations
            if ispolygon(a.annotation_class)
        ]
        for a in annotations:
            cat = a.annotation_class.name
            if a.annotation_class.annotation_type == "polygon":
                polygon = a.data["path"]
            elif a.annotation_class.annotation_type == "complex_polygon":
                polygon = a.data["paths"]
            sequence = convert_polygons_to_sequences(polygon,
                                                     height=height,
                                                     width=width)
            draw_polygon(mask, sequence, palette[cat])
        if mode == "rgb":
            mask = Image.fromarray(mask, "P")
            mask.putpalette(RGB_colors)
        else:
            mask = Image.fromarray(mask)
        mask.save(outfile)

    with open(output_dir / "class_mapping.csv", "w") as f:
        f.write(f"class_name,class_color\n")
        for c in categories:
            if mode == "rgb":
                f.write(
                    f"{c},{palette_rgb[c][0]} {palette_rgb[c][1]} {palette_rgb[c][2]}\n"
                )
            else:
                f.write(f"{c},{palette[c]}\n")
Exemple #5
0
def build_annotation(annotation_file, annotation_id, annotation: dt.Annotation,
                     categories):
    annotation_type = annotation.annotation_class.annotation_type
    if annotation_type == "polygon":
        sequences = convert_polygons_to_sequences(annotation.data["path"],
                                                  rounding=False)
        x_coords = [s[0::2] for s in sequences]
        y_coords = [s[1::2] for s in sequences]
        min_x = np.min([np.min(x_coord) for x_coord in x_coords])
        min_y = np.min([np.min(y_coord) for y_coord in y_coords])
        max_x = np.max([np.max(x_coord) for x_coord in x_coords])
        max_y = np.max([np.max(y_coord) for y_coord in y_coords])
        w = max_x - min_x + 1
        h = max_y - min_y + 1
        # Compute the area of the polygon
        poly_area = np.sum([
            polygon_area(x_coord, y_coord)
            for x_coord, y_coord in zip(x_coords, y_coords)
        ])

        return {
            "id": annotation_id,
            "image_id": annotation_file.seq,
            "category_id": categories[annotation.annotation_class.name],
            "segmentation": sequences,
            "area": poly_area,
            "bbox": [min_x, min_y, w, h],
            "iscrowd": 0,
            "extra": build_extra(annotation),
        }
    elif annotation_type == "complex_polygon":
        mask = np.zeros(
            (annotation_file.image_height, annotation_file.image_width))
        sequences = convert_polygons_to_sequences(annotation.data["paths"])
        draw_polygon(mask, sequences, 1)
        counts = rle_encode(mask)

        x_coords = [s[0::2] for s in sequences]
        y_coords = [s[1::2] for s in sequences]
        min_x = np.min([np.min(x_coord) for x_coord in x_coords])
        min_y = np.min([np.min(y_coord) for y_coord in y_coords])
        max_x = np.max([np.max(x_coord) for x_coord in x_coords])
        max_y = np.max([np.max(y_coord) for y_coord in y_coords])
        w = max_x - min_x + 1
        h = max_y - min_y + 1

        return {
            "id": annotation_id,
            "image_id": annotation_file.seq,
            "category_id": categories[annotation.annotation_class.name],
            "segmentation": {
                "counts": counts,
                "size":
                [annotation_file.image_height, annotation_file.image_width]
            },
            "area": 0,
            "bbox": [min_x, min_y, w, h],
            "iscrowd": 1,
            "extra": build_extra(annotation),
        }
    elif annotation_type == "tag":
        pass
    elif annotation_type == "bounding_box":
        x = annotation.data["x"]
        y = annotation.data["y"]
        w = annotation.data["w"]
        h = annotation.data["h"]
        return build_annotation(
            annotation_file,
            annotation_id,
            dt.make_polygon(
                annotation.annotation_class.name,
                [{
                    "x": x,
                    "y": y
                }, {
                    "x": x + w,
                    "y": y
                }, {
                    "x": x + w,
                    "y": y + h
                }, {
                    "x": x,
                    "y": y + h
                }],
            ),
            categories,
        )
    else:
        print(f"skipping unsupported annotation_type '{annotation_type}'")
Exemple #6
0
def export(annotation_files: Iterable[dt.AnnotationFile], output_dir: Path,
           mode: str) -> None:
    masks_dir: Path = output_dir / "masks"
    masks_dir.mkdir(exist_ok=True, parents=True)
    annotation_files = list(annotation_files)

    categories: List[str] = extract_categories(annotation_files)
    num_categories = len(categories)

    palette = get_palette(mode=mode, categories=categories)
    if mode == "rgb":
        # Generate HSV colors for all classes except for BG
        HSV_colors = [(x / num_categories, 0.8, 1.0)
                      for x in range(num_categories - 1)]
        RGB_color_list = list(
            map(lambda x: [int(e * 255) for e in colorsys.hsv_to_rgb(*x)],
                HSV_colors))
        # Now we add BG class with [0 0 0] RGB value
        RGB_color_list.insert(0, [0, 0, 0])
        palette_rgb = {c: rgb for c, rgb in zip(categories, RGB_color_list)}
        RGB_colors = [c for e in RGB_color_list for c in e]

    for annotation_file in annotation_files:
        image_id = os.path.splitext(annotation_file.filename)[0]
        outfile = masks_dir / f"{image_id}.png"
        outfile.parent.mkdir(parents=True, exist_ok=True)

        height = annotation_file.image_height
        width = annotation_file.image_width
        if height is None or width is None:
            raise ValueError(
                f"Annotation file {annotation_file.filename} references an image with no height or width"
            )

        mask: Image.Image = np.zeros((height, width)).astype(np.uint8)
        annotations = [
            a for a in annotation_file.annotations
            if ispolygon(a.annotation_class)
        ]
        for a in annotations:
            if isinstance(a, dt.VideoAnnotation):
                print(
                    f"Skipping video annotation from file {annotation_file.filename}"
                )
                continue

            cat = a.annotation_class.name
            if a.annotation_class.annotation_type == "polygon":
                polygon = a.data["path"]
            elif a.annotation_class.annotation_type == "complex_polygon":
                polygon = a.data["paths"]
            sequence = convert_polygons_to_sequences(polygon,
                                                     height=height,
                                                     width=width)
            draw_polygon(mask, sequence, palette[cat])

        if mode == "rgb":
            mask = Image.fromarray(mask, "P")
            mask.putpalette(RGB_colors)
        else:
            mask = Image.fromarray(mask)
        mask.save(outfile)

    with open(output_dir / "class_mapping.csv", "w") as f:
        f.write(f"class_name,class_color\n")
        for c in categories:
            if mode == "rgb":
                f.write(
                    f"{c},{palette_rgb[c][0]} {palette_rgb[c][1]} {palette_rgb[c][2]}\n"
                )
            else:
                f.write(f"{c},{palette[c]}\n")
Exemple #7
0
    def __getitem__(self, index: int):
        """
        Notes
        -----
        The return value is a dict with the following fields:
            image_id : int
                Index of the image inside the dataset
            image_path: str
                The path to the image on the file system
            labels : tensor(n)
                The class label of each one of the instances
            masks : tensor(n, H, W)
                Segmentation mask of each one of the instances
            boxes : tensor(n, 4)
                Coordinates of the bounding box enclosing the instances as [x, y, x, y]
            area : float
                Area in pixels of each one of the instances
        """
        img = load_pil_image(self.images_path[index])
        target = self.parse_json(index)

        annotations = []
        for annotation in target["annotations"]:
            if "polygon" not in annotation and "complex_polygon" not in annotation:
                print(
                    f"Warning: missing polygon in annotation {self.annotations_path[index]}"
                )
            # Extract the sequences of coordinates from the polygon annotation
            annotation_type = "polygon" if "polygon" in annotation else "complex_polygon"
            sequences = convert_polygons_to_sequences(
                annotation[annotation_type]["path"],
                height=target["height"],
                width=target["width"],
            )
            # Compute the bbox of the polygon
            x_coords = [s[0::2] for s in sequences]
            y_coords = [s[1::2] for s in sequences]
            min_x = np.min([np.min(x_coord) for x_coord in x_coords])
            min_y = np.min([np.min(y_coord) for y_coord in y_coords])
            max_x = np.max([np.max(x_coord) for x_coord in x_coords])
            max_y = np.max([np.max(y_coord) for y_coord in y_coords])
            w = max_x - min_x + 1
            h = max_y - min_y + 1
            # Compute the area of the polygon
            # TODO fix with addictive/subtractive paths in complex polygons
            poly_area = np.sum([
                polygon_area(x_coord, y_coord)
                for x_coord, y_coord in zip(x_coords, y_coords)
            ])

            # Create and append the new entry for this annotation
            annotations.append({
                "category_id":
                self.classes.index(annotation["name"]),
                "segmentation":
                sequences,
                "bbox": [min_x, min_y, w, h],
                "area":
                poly_area,
            })
        target["annotations"] = annotations

        img, target = self.convert_polygons(img, target)
        if self.transform is not None:
            img, target = self.transform(img, target)

        return img, target