def _decode_classification_data(self, data: Dict[str,
                                                  str]) -> Dict[str, Any]:
     label_idx = int(data["label"]) - 1
     return dict(
         label=Label(label_idx, category=self.info.categories[label_idx]),
         species="cat" if data["species"] == "1" else "dog",
     )
Example #2
0
    def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
        image, target = data  # They're both numpy arrays at this point

        return {
            "image": features.Image(image.transpose(2, 0, 1)),
            "label": Label(target.item(), categories=self._categories),
        }
Example #3
0
    def _prepare_sample(self, data: Tuple[np.ndarray, np.ndarray]) -> Dict[str, Any]:
        image_array, label_array = data

        return dict(
            image=Image(image_array.transpose((2, 0, 1))),
            label=Label(int(label_array) % 10, categories=self._categories),
        )
Example #4
0
    def _prepare_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
        label_id = data.get("emotion")

        return dict(
            image=Image(torch.tensor([int(idx) for idx in data["pixels"].split()], dtype=torch.uint8).reshape(48, 48)),
            label=Label(int(label_id), categories=self.categories) if label_id is not None else None,
        )
Example #5
0
 def _decode_instances_anns(self, anns: List[Dict[str, Any]],
                            image_meta: Dict[str, Any]) -> Dict[str, Any]:
     image_size = (image_meta["height"], image_meta["width"])
     labels = [ann["category_id"] for ann in anns]
     return dict(
         # TODO: create a segmentation feature
         segmentations=_Feature(
             torch.stack([
                 self._segmentation_to_mask(ann["segmentation"],
                                            is_crowd=ann["iscrowd"],
                                            image_size=image_size)
                 for ann in anns
             ])),
         areas=_Feature([ann["area"] for ann in anns]),
         crowds=_Feature([ann["iscrowd"] for ann in anns],
                         dtype=torch.bool),
         bounding_boxes=BoundingBox(
             [ann["bbox"] for ann in anns],
             format="xywh",
             image_size=image_size,
         ),
         labels=Label(labels, categories=self._categories),
         super_categories=[
             self._category_to_super_category[self._categories[label]]
             for label in labels
         ],
         ann_ids=[ann["id"] for ann in anns],
     )
Example #6
0
 def _prepare_val_data(
     self, data: Tuple[Tuple[int, str], Tuple[str, BinaryIO]]
 ) -> Tuple[Tuple[Label, str], Tuple[str, BinaryIO]]:
     label_data, image_data = data
     _, wnid = label_data
     label = Label.from_category(self._wnid_to_category[wnid], categories=self._categories)
     return (label, wnid), image_data
Example #7
0
 def _prepare_sample(self, data: Tuple[torch.Tensor, torch.Tensor], *,
                     config: DatasetConfig) -> Dict[str, Any]:
     image, label = data
     return dict(
         image=Image(image),
         label=Label(label, dtype=torch.int64, categories=self.categories),
     )
Example #8
0
    def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
        image, target = data  # They're both numpy arrays at this point

        return {
            "image": features.Image(image),
            "label": Label(target.item()),
        }
Example #9
0
 def _collate_val_data(
     self, data: Tuple[Tuple[int, int], Tuple[str, io.IOBase]]
 ) -> Tuple[Tuple[Label, str, str], Tuple[str, io.IOBase]]:
     label_data, image_data = data
     _, label = label_data
     category = self.categories[label]
     wnid = self.category_to_wnid[category]
     return (Label(label), category, wnid), image_data
Example #10
0
 def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
     path, buffer = data
     category = pathlib.Path(path).parent.name
     return dict(
         label=Label.from_category(category, categories=self._categories),
         path=path,
         image=EncodedImage.from_file(buffer),
     )
Example #11
0
 def _prepare_sample(self, line: str) -> Dict[str, Any]:
     label, *values = line.strip().split(" ")
     values = [float(value.split(":")[1]) for value in values]
     pixels = torch.tensor(values).add_(1).div_(2)
     return dict(
         image=Image(pixels.reshape(16, 16)),
         label=Label(int(label) - 1, categories=self._categories),
     )
Example #12
0
 def _prepare_sample(
         self, data: Tuple[str, Tuple[str, BinaryIO]]) -> Dict[str, Any]:
     id, (path, buffer) = data
     return dict(
         label=Label.from_category(id.split("/", 1)[0],
                                   categories=self._categories),
         path=path,
         image=EncodedImage.from_file(buffer),
     )
Example #13
0
 def _prepare_train_data(
     self, data: Tuple[str, BinaryIO]
 ) -> Tuple[Tuple[Label, str], Tuple[str, BinaryIO]]:
     path = pathlib.Path(data[0])
     wnid = cast(Match[str],
                 self._TRAIN_IMAGE_NAME_PATTERN.match(path.name))["wnid"]
     label = Label.from_category(self.info.extra.wnid_to_category[wnid],
                                 categories=self.categories)
     return (label, wnid), data
Example #14
0
 def _collate_train_data(
     self, data: Tuple[str, io.IOBase]
 ) -> Tuple[Tuple[Label, str, str], Tuple[str, io.IOBase]]:
     path = pathlib.Path(data[0])
     wnid = self._TRAIN_IMAGE_NAME_PATTERN.match(path.name).group(
         "wnid")  # type: ignore[union-attr]
     category = self.wnid_to_category[wnid]
     label_data = (Label(self.categories.index(category)), category, wnid)
     return label_data, data
Example #15
0
    def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Optional[Dict[str, Any]]]) -> Dict[str, Any]:
        image_data, scenes_data = data
        path, buffer = image_data

        return dict(
            path=path,
            image=EncodedImage.from_file(buffer),
            label=Label(len(scenes_data["objects"])) if scenes_data else None,
        )
Example #16
0
    def _prepare_sample(self, data: Tuple[str, BinaryIO]) -> Dict[str, Any]:
        path, buffer = data

        return dict(
            path=path,
            image=EncodedImage.from_file(buffer),
            label=Label(int(pathlib.Path(path).parent.name.split(".", 1)[0]) -
                        1,
                        categories=self._categories),
        )
Example #17
0
    def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, int, int, str]]) -> Dict[str, Any]:
        image, target = data
        path, buffer = image
        image = EncodedImage.from_file(buffer)

        return dict(
            path=path,
            image=image,
            label=Label(target[4] - 1, categories=self.categories),
            bounding_box=BoundingBox(target[:4], format="xyxy", image_size=image.image_size),
        )
Example #18
0
    def _collate_and_decode_sample(
        self,
        data: Tuple[str, io.IOBase],
        *,
        decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
    ) -> Dict[str, Any]:
        path, buffer = data

        dir_name = pathlib.Path(path).parent.name
        label_str, category = dir_name.split(".")
        label = Label(int(label_str), category=category)

        return dict(label=label, image=decoder(buffer) if decoder else buffer)
Example #19
0
def _prepare_sample(
    data: Tuple[str, BinaryIO],
    *,
    root: pathlib.Path,
    categories: List[str],
) -> Dict[str, Any]:
    path, buffer = data
    category = pathlib.Path(path).relative_to(root).parts[0]
    return dict(
        path=path,
        data=EncodedData.from_file(buffer),
        label=Label.from_category(category, categories=categories),
    )
Example #20
0
    def _collate_and_decode_sample(
        self,
        data: Tuple[Tuple[str, io.IOBase], Optional[Dict[str, Any]]],
        *,
        decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
    ) -> Dict[str, Any]:
        image_data, scenes_data = data
        path, buffer = image_data

        return dict(
            path=path,
            image=decoder(buffer) if decoder else buffer,
            label=Label(len(scenes_data["objects"])) if scenes_data else None,
        )
Example #21
0
    def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
        (path, buffer), csv_info = data
        label = int(csv_info["ClassId"])

        bounding_box = BoundingBox(
            [int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
            format="xyxy",
            image_size=(int(csv_info["Height"]), int(csv_info["Width"])),
        )

        return {
            "path": path,
            "image": EncodedImage.from_file(buffer),
            "label": Label(label, categories=self._categories),
            "bounding_box": bounding_box,
        }
Example #22
0
    def _prepare_sample(
        self, data: Tuple[Tuple[Dict[str, str], Tuple[str, BinaryIO]], Tuple[str, BinaryIO]]
    ) -> Dict[str, Any]:
        ann_data, image_data = data
        classification_data, segmentation_data = ann_data
        segmentation_path, segmentation_buffer = segmentation_data
        image_path, image_buffer = image_data

        return dict(
            label=Label(int(classification_data["label"]) - 1, categories=self._categories),
            species="cat" if classification_data["species"] == "1" else "dog",
            segmentation_path=segmentation_path,
            segmentation=EncodedImage.from_file(segmentation_buffer),
            image_path=image_path,
            image=EncodedImage.from_file(image_buffer),
        )
Example #23
0
 def _prepare_detection_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
     anns = self._parse_detection_ann(buffer)
     instances = anns["object"]
     return dict(
         bounding_boxes=BoundingBox(
             [
                 [int(instance["bndbox"][part]) for part in ("xmin", "ymin", "xmax", "ymax")]
                 for instance in instances
             ],
             format="xyxy",
             image_size=cast(Tuple[int, int], tuple(int(anns["size"][dim]) for dim in ("height", "width"))),
         ),
         labels=Label(
             [self.categories.index(instance["name"]) for instance in instances], categories=self.categories
         ),
     )
Example #24
0
File: dtd.py Project: nairbv/vision
    def _prepare_sample(
        self, data: Tuple[Tuple[str, List[str]], Tuple[str, BinaryIO]]
    ) -> Dict[str, Any]:
        (_, joint_categories_data), image_data = data
        _, *joint_categories = joint_categories_data
        path, buffer = image_data

        category = pathlib.Path(path).parent.name

        return dict(
            joint_categories={
                category
                for category in joint_categories if category
            },
            label=Label.from_category(category, categories=self.categories),
            path=path,
            image=EncodedImage.from_file(buffer),
        )
Example #25
0
    def _prepare_sample(
        self,
        data: Tuple[Tuple[str, Tuple[str, BinaryIO]], Any],
        *,
        prepare_ann_fn: Callable[[Any, Tuple[int, int]], Dict[str, Any]],
    ) -> Dict[str, Any]:
        data, anns_data = data
        _, image_data = data
        path, buffer = image_data

        image = EncodedImage.from_file(buffer)

        return dict(
            prepare_ann_fn(anns_data, image.image_size),
            image=image,
            label=Label(int(pathlib.Path(path).parent.name.rsplit(".", 1)[0]),
                        categories=self._categories),
        )
Example #26
0
    def _collate_and_decode_sample(
        self,
        data: Tuple[Tuple[str, List[str]], Tuple[str, io.IOBase]],
        *,
        decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
    ) -> Dict[str, Any]:
        (_, joint_categories_data), image_data = data
        _, *joint_categories = joint_categories_data
        path, buffer = image_data

        category = pathlib.Path(path).parent.name

        return dict(
            joint_categories={category for category in joint_categories if category},
            label=Label(self.info.categories.index(category), category=category),
            path=path,
            image=decoder(buffer) if decoder else buffer,
        )
Example #27
0
    def _collate_and_decode(
        self,
        data: Tuple[np.ndarray, int],
        *,
        decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
    ) -> Dict[str, Any]:
        image_array, category_idx = data

        image: Union[Image, io.BytesIO]
        if decoder is raw:
            image = Image(image_array)
        else:
            image_buffer = image_buffer_from_array(image_array.transpose((1, 2, 0)))
            image = decoder(image_buffer) if decoder else image_buffer  # type: ignore[assignment]

        label = Label(category_idx, category=self.categories[category_idx])

        return dict(image=image, label=label)
Example #28
0
    def _collate_and_decode_sample(
        self,
        data: Tuple[Tuple[str, Tuple[str, io.IOBase]], Any],
        *,
        year: str,
        decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
    ) -> Dict[str, Any]:
        data, anns_data = data
        _, image_data = data
        path, buffer = image_data

        dir_name = pathlib.Path(path).parent.name
        label_str, category = dir_name.split(".")

        return dict(
            (self._2011_load_ann if year == "2011" else self._2010_load_ann)(
                anns_data, decoder=decoder),
            image=decoder(buffer) if decoder else buffer,
            label=Label(int(label_str), category=category),
        )
Example #29
0
    def _collate_and_decode_sample(
        self,
        data: Tuple[np.ndarray, np.ndarray],
        *,
        decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
    ) -> Dict[str, Any]:
        image_array, label_array = data

        if decoder is raw:
            image = Image(image_array.transpose((2, 0, 1)))
        else:
            image_buffer = image_buffer_from_array(image_array)
            image = decoder(
                image_buffer
            ) if decoder else image_buffer  # type: ignore[assignment]

        return dict(
            image=image,
            label=Label(int(label_array) % 10),
        )
Example #30
0
    def _prepare_sample(
        self,
        data: Tuple[Tuple[str, Tuple[Tuple[str, List[str]], Tuple[str,
                                                                  BinaryIO]]],
                    Tuple[Tuple[str, Dict[str, str]],
                          Tuple[str, Dict[str, str]], Tuple[str, Dict[str,
                                                                      str]],
                          Tuple[str, Dict[str, str]], ], ],
    ) -> Dict[str, Any]:
        split_and_image_data, ann_data = data
        _, (_, image_data) = split_and_image_data
        path, buffer = image_data

        image = EncodedImage.from_file(buffer)
        (_, identity), (_, attributes), (_,
                                         bounding_box), (_,
                                                         landmarks) = ann_data

        return dict(
            path=path,
            image=image,
            identity=Label(int(identity["identity"])),
            attributes={
                attr: value == "1"
                for attr, value in attributes.items()
            },
            bounding_box=BoundingBox(
                [
                    int(bounding_box[key])
                    for key in ("x_1", "y_1", "width", "height")
                ],
                format="xywh",
                image_size=image.image_size,
            ),
            landmarks={
                landmark: _Feature((int(landmarks[f"{landmark}_x"]),
                                    int(landmarks[f"{landmark}_y"])))
                for landmark in {key[:-2]
                                 for key in landmarks.keys()}
            },
        )