def __init__(
        self,
        root: Union[str, Path],
        *,
        download: bool = True,
        superclass: Union[CelebAttr, str] = CelebAttr.Smiling,
        subclass: Union[CelebAttr, str] = CelebAttr.Male,
        transform: Optional[ImageTform] = None,
        split: Optional[Union[CelebASplit, str]] = None,
    ) -> None:

        self.superclass = str_to_enum(str_=superclass, enum=CelebAttr)
        self.subclass = str_to_enum(str_=subclass, enum=CelebAttr)
        self.split = str_to_enum(str_=split, enum=CelebASplit) if isinstance(
            split, str) else split

        self.root = Path(root)
        self._base_dir = self.root / self.__class__.__name__
        image_dir = self._base_dir / self._IMAGE_DIR

        if download:
            download_from_gdrive(file_info=self._FILE_LIST,
                                 root=self._base_dir,
                                 logger=self.logger)
        elif not self._check_unzipped():
            raise FileNotFoundError(
                f"Data not found at location {self._base_dir.resolve()}. Have you downloaded it?"
            )

        if self.split is None:
            skiprows = None
        else:
            # splits: information about which samples belong to train, val or test
            splits = (pd.read_csv(
                self._base_dir / "list_eval_partition.txt",
                delim_whitespace=True,
                names=["split"],
            ).to_numpy().squeeze())
            skiprows = (splits != self.split.value).nonzero()[0] + 2
        attrs = pd.read_csv(
            self._base_dir / "list_attr_celeba.txt",
            delim_whitespace=True,
            header=1,
            usecols=[self.superclass.name, self.subclass.name],
            skiprows=skiprows,
        )

        x = np.array(attrs.index)
        s_unmapped = torch.as_tensor(attrs[self.subclass.name].to_numpy())
        y_unmapped = torch.as_tensor(attrs[self.superclass.name].to_numpy())
        # map from {-1, 1} to {0, 1}
        s_binary = torch.div(s_unmapped + 1, 2, rounding_mode='floor')
        y_binary = torch.div(y_unmapped + 1, 2, rounding_mode='floor')

        super().__init__(x=x,
                         y=y_binary,
                         s=s_binary,
                         transform=transform,
                         image_dir=image_dir)
    def __init__(
        self,
        root: Union[str, Path],
        *,
        split: Union[SSRPSplit, str] = SSRPSplit.pretrain,
        download: bool = True,
        transform: Optional[ImageTform] = None,
    ) -> None:
        self.root = Path(root)
        self._base_dir = self.root / self.__class__.__name__
        self._metadata_path = self._base_dir / "metadata.csv"
        self.download = download
        self.split = str_to_enum(str_=split, enum=SSRPSplit)

        if self.download:
            download_from_gdrive(file_info=self._FILE_INFO, root=self._base_dir, logger=self.logger)
        if not self._check_unzipped():
            raise FileNotFoundError(
                f"Data not found at location {self._base_dir.resolve()}. Have you downloaded it?"
            )
        if not self._metadata_path.exists():
            self._extract_metadata()

        self.metadata = pd.read_csv(self._base_dir / "metadata.csv")
        self.metadata = cast(
            pd.DataFrame, self.metadata[self.metadata.split.values == self.split.value]
        )

        x = self.metadata["filepath"].to_numpy()
        y = torch.as_tensor(self.metadata["class_le"].to_numpy(), dtype=torch.long)
        s = torch.as_tensor(self.metadata["season_le"].to_numpy(), dtype=torch.long)

        super().__init__(x=x, y=y, s=s, transform=transform, image_dir=self._base_dir)
示例#3
0
    def __init__(
        self,
        root: Union[str, Path],
        *,
        download: bool = True,
        transform: Optional[ImageTform] = None,
        superclass: Optional[Union[NicoSuperclass,
                                   str]] = NicoSuperclass.animals,
    ) -> None:

        self.superclass = (str_to_enum(str_=superclass, enum=NicoSuperclass)
                           if isinstance(superclass, str) else superclass)
        self.root = Path(root)
        self.download = download
        self._base_dir = self.root / self.__class__.__name__
        self._metadata_path = self._base_dir / "metadata.csv"

        if self.download:
            download_from_gdrive(file_info=self._FILE_INFO,
                                 root=self.root,
                                 logger=self.logger)
        elif not self._check_unzipped():
            raise FileNotFoundError(
                f"Data not found at location {self._base_dir.resolve()}. "
                "Have you downloaded it?")
        if not self._metadata_path.exists():
            self._extract_metadata()

        self.metadata = pd.read_csv(self._base_dir / "metadata.csv")
        self.class_tree = (self.metadata[[
            "concept", "context"
        ]].drop_duplicates().groupby("concept").agg(set).to_dict()["context"])
        self.concept_label_decoder = (self.metadata[[
            "concept", "concept_le"
        ]].set_index("concept_le").to_dict()["concept"])
        self.context_label_decoder = (self.metadata[[
            "context", "context_le"
        ]].set_index("context_le").to_dict()["context"])

        if self.superclass is not None:
            self.metadata = self.metadata[self.metadata["superclass"] == str(
                self.superclass)]
        # # Divide up the dataframe into its constituent arrays because indexing with pandas is
        # # substantially slower than indexing with numpy/torch
        x = self.metadata["filepath"].to_numpy()
        y = torch.as_tensor(self.metadata["concept_le"].to_numpy(),
                            dtype=torch.long)
        s = torch.as_tensor(self.metadata["context_le"].to_numpy(),
                            dtype=torch.long)

        super().__init__(x=x,
                         y=y,
                         s=s,
                         transform=transform,
                         image_dir=self._base_dir)
示例#4
0
def _reduce(losses: Tensor, reduction_type: ReductionType | str) -> Tensor:
    if isinstance(reduction_type, str):
        reduction_type = str_to_enum(str_=reduction_type, enum=ReductionType)
    if reduction_type is ReductionType.mean:
        return losses.mean()
    elif reduction_type is ReductionType.batch_mean:
        return losses.sum() / losses.size(0)
    elif reduction_type is ReductionType.sum:
        return losses.sum()
    elif reduction_type is ReductionType.none:
        return losses
    raise TypeError(
        f"Received invalid type '{type(reduction_type)}' for argument 'reduction_type'."
    )
示例#5
0
    def __init__(
        self,
        root: Union[str, Path],
        *,
        download: bool = True,
        transform: Optional[ImageTform] = None,
        split: Optional[Union[WaterbirdsSplit, str]] = None,
    ) -> None:

        self.split = (
            str_to_enum(str_=split, enum=WaterbirdsSplit) if isinstance(split, str) else split
        )
        self.root = Path(root)
        self._base_dir = self.root / self.__class__.__name__
        self.download = download
        if self.download:
            download_from_url(
                file_info=self._FILE_INFO,
                root=self.root,
                logger=self.logger,
                remove_finished=True,
            )
        else:
            raise FileNotFoundError(
                f"Data not found at location {self._base_dir.resolve()}. Have you downloaded it?"
            )

        # Read in metadata
        # Note: metadata is one-indexed.
        self.metadata = pd.read_csv(self._base_dir / 'metadata.csv')
        # Use an official split of the data, if specified, else just use all
        # of the data
        if self.split is not None:
            split_indices = self.metadata["split"] == self.split.value
            self.metadata = cast(pd.DataFrame, self.metadata[split_indices])

        # Extract filenames
        x = self.metadata['img_filename'].to_numpy()
        # Extract class (land- vs. water-bird) labels
        y = torch.as_tensor(self.metadata["y"].to_numpy(), dtype=torch.long)
        # Extract place (land vs. water) labels
        s = torch.as_tensor(self.metadata["place"].to_numpy(), dtype=torch.long)

        super().__init__(x=x, y=y, s=s, transform=transform, image_dir=self._base_dir)
示例#6
0
def cross_entropy_loss(
    input: Tensor,
    *,
    target: Tensor,
    instance_weight: Tensor | None = None,
    reduction: ReductionType | str = ReductionType.mean,
    ignore_index: int = -100,
    class_weight: Tensor | None = None,
    label_smoothing: float = 0.0,
) -> Tensor:
    r"""This criterion computes the cross entropy loss between input and target.

    See :class:`~ranzen.torch.losses.CrossEntropyLoss` for details.

    :param input: Predicted unnormalized scores (often referred to as logits).
    :param target: Ground truth class indices or class probabilities.

    :param instance_weight: a manual rescaling weight given to each sample. If given, has to be a
        Tensor of 'N'.

    :param class_weight: A manual rescaling weight given to each class. If given, has to be a
        Tensor of size `C`.

    :param ignore_index: Specifies a target value that is ignored and does not contribute to the
        input gradient. Note that :attr:`ignore_index` is only applicable when the target contains
        class indices.

    :param reduction: Specifies the reduction to apply to the output.

    :param label_smoothing: A float in [0.0, 1.0]. Specifies the amount of smoothing when computing
        the loss, where 0.0 means no smoothing. The targets become a mixture of the original ground
        truth and a uniform distribution as described in `Rethinking the Inception Architecture for
        Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.

    :returns: The (reduced) cross-entropy between ``input`` and ``target``.

    :example:

    >>> # Example of target with class indices
    >>> input = torch.randn(3, 5, requires_grad=True)
    >>> target = torch.randint(5, (3,), dtype=torch.int64)
    >>> loss = F.cross_entropy(input, target)
    >>> loss.backward()
    >>>
    >>> # Example of target with class probabilities
    >>> input = torch.randn(3, 5, requires_grad=True)
    >>> target = torch.randn(3, 5).softmax(dim=1)
    >>> loss = F.cross_entropy(input, target)
    >>> loss.backward()
    """
    if isinstance(reduction, str):
        reduction = str_to_enum(str_=reduction, enum=ReductionType)
    if input.ndim == 1 or input.size(1) == 1:  # Binary classification
        target = target.view_as(input)
        if not target.is_floating_point():
            target = target.float()
        loss_fn = F.binary_cross_entropy_with_logits
    else:  # Multiclass classification
        target = target.view(input.size(0), -1).squeeze(-1)
        if (target.ndim == 1) and target.is_floating_point():
            target = target.long()
        loss_fn = partial(F.cross_entropy, ignore_index=ignore_index)
    losses = loss_fn(
        input=input,
        target=target,
        weight=class_weight,
        reduction="none",
    )
    if instance_weight is not None:
        losses *= instance_weight.view_as(losses)
    return _reduce(losses=losses, reduction_type=reduction)
示例#7
0
 def reduction(self, value: ReductionType | str) -> None:  # type: ignore
     if isinstance(value, str):
         value = str_to_enum(str_=value, enum=ReductionType)
     self._reduction = value
示例#8
0
    def __init__(
        self,
        *,
        class_weight: Optional[Tensor] = None,
        ignore_index: int = -100,
        reduction: Union[ReductionType, str] = ReductionType.mean,
        label_smoothing: float = 0.0,
    ) -> None:
        r"""This criterion computes the cross entropy loss between input and target.

        It is useful when training a classification problem with `C` classes.
        If provided, the optional argument :attr:`weight` should be a 1D `Tensor`
        assigning weight to each of the classes.
        This is particularly useful when you have an unbalanced training set.

        The `input` is expected to contain raw, unnormalized scores for each class.
        `input` has to be a Tensor of size :math:`(C)` for unbatched input,
        :math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1` for the
        `K`-dimensional case. The last being useful for higher dimension inputs, such
        as computing cross entropy loss per-pixel for 2D images.

        The `target` that this criterion expects should contain either:

        - Class indices in the range :math:`[0, C)` where :math:`C` is the number of classes; if
          `ignore_index` is specified, this loss also accepts this class index (this index
          may not necessarily be in the class range). The unreduced (i.e. with :attr:`reduction`
          set to ``'none'``) loss for this case can be described as:

          .. math::
              \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
              l_n = - w_{y_n} \log \frac{\exp(x_{n,y_n})}{\sum_{c=1}^C \exp(x_{n,c})}
              \cdot \mathbb{1}\{y_n \not= \text{ignore\_index}\}

          where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight,
          :math:`C` is the number of classes, and :math:`N` spans the minibatch dimension as well as
          :math:`d_1, ..., d_k` for the `K`-dimensional case. If
          :attr:`reduction` is not ``'none'`` (default ``'mean'``), then

          .. math::
              \ell(x, y) = \begin{cases}
                  \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n} \cdot \mathbb{1}\{y_n \not= \text{ignore\_index}\}} l_n, &
                   \text{if reduction} = \text{`mean';}\\
                    \sum_{n=1}^N l_n,  &
                    \text{if reduction} = \text{`sum'.}
                \end{cases}

          Note that this case is equivalent to the combination of :class:`~torch.nn.LogSoftmax` and
          :class:`~torch.nn.NLLLoss`.

        - Probabilities for each class; useful when labels beyond a single class per minibatch item
          are required, such as for blended labels, label smoothing, etc. The unreduced (i.e. with
          :attr:`reduction` set to ``'none'``) loss for this case can be described as:

          .. math::
              \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
              l_n = - \sum_{c=1}^C w_c \log \frac{\exp(x_{n,c})}{\sum_{i=1}^C \exp(x_{n,i})} y_{n,c}

          where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight,
          :math:`C` is the number of classes, and :math:`N` spans the minibatch dimension as well as
          :math:`d_1, ..., d_k` for the `K`-dimensional case. If
          :attr:`reduction` is not ``'none'`` (default ``'mean'``), then

          .. math::
              \ell(x, y) = \begin{cases}
                  \frac{\sum_{n=1}^N l_n}{N}, &
                   \text{if reduction} = \text{`mean';}\\
                    \sum_{n=1}^N l_n,  &
                    \text{if reduction} = \text{`sum'.}
                \end{cases}

        .. note::
            The performance of this criterion is generally better when `target` contains class
            indices, as this allows for optimized computation. Consider providing `target` as
            class probabilities only when a single class label per minibatch item is too restrictive.

    
        :param class_weight: A manual rescaling weight given to each class. If given, has to be a 
            Tensor of size `C`.

        :param ignore_index: Specifies a target value that is ignored and does not contribute to the 
            input gradient. Note that :attr:`ignore_index` is only applicable when the target contains
            class indices.

        :param reduction: Specifies the reduction to apply to the output.

        :param label_smoothing: A float in [0.0, 1.0]. Specifies the amount of smoothing when computing
            the loss, where 0.0 means no smoothing. The targets become a mixture of the original ground
            truth and a uniform distribution as described in `Rethinking the Inception Architecture for
            Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.

        :example:

        >>> # Example of target with class indices
        >>> loss = nn.CrossEntropyLoss()
        >>> input = torch.randn(3, 5, requires_grad=True)
        >>> target = torch.empty(3, dtype=torch.long).random_(5)
        >>> output = loss(input, target)
        >>> output.backward()
        >>>
        >>> # Example of target with class probabilities
        >>> input = torch.randn(3, 5, requires_grad=True)
        >>> target = torch.randn(3, 5).softmax(dim=1)
        >>> output = loss(input, target)
        >>> output.backward()
        """
        super().__init__()
        if isinstance(reduction, str):
            reduction = str_to_enum(str_=reduction, enum=ReductionType)
        self.register_buffer("weight", class_weight)
        self.ignore_index = ignore_index
        self.label_smoothing = label_smoothing
        self._reduction = reduction