Esempio n. 1
0
    def __init__(
        self,
        input_key: str = "targets",
        output_key: str = "logits",
        prefix: str = "accuracy",
        topk_args: List[int] = None,
        num_classes: int = None,
        accuracy_args: List[int] = None,
        **kwargs,
    ):
        """
        Args:
            input_key: input key to use for accuracy calculation;
                specifies our `y_true`
            output_key: output key to use for accuracy calculation;
                specifies our `y_pred`
            prefix: key for the metric's name
            topk_args: specifies which accuracy@K to log:
                [1] - accuracy
                [1, 3] - accuracy at 1 and 3
                [1, 3, 5] - accuracy at 1, 3 and 5
            num_classes: number of classes to calculate ``topk_args``
                if ``accuracy_args`` is None
        """
        topk_args = (topk_args or accuracy_args
                     or get_default_topk_args(num_classes))

        super().__init__(
            prefix=prefix,
            metric_fn=wrap_topk_metric2dict(accuracy, topk_args=topk_args),
            input_key=input_key,
            output_key=output_key,
            **kwargs,
        )
Esempio n. 2
0
    def __init__(
        self,
        embeddings_key: str = "logits",
        labels_key: str = "targets",
        is_query_key: str = "is_query",
        prefix: str = "cmc",
        topk_args: List[int] = None,
        num_classes: int = None,
    ):
        """
        This callback was designed to count
        cumulative matching characteristics.
        If current object is from query your dataset
        should output `True` in `is_query_key`
        and false if current object is from gallery.
        You can see `QueryGalleryDataset` in
        `catalyst.contrib.datasets.metric_learning` for more information.
        On batch end callback accumulate all embeddings

        Args:
            embeddings_key: embeddings key in output dict
            labels_key: labels key in output dict
            is_query_key: bool key True if current
                object is from query
            prefix: key for the metric's name
            topk_args: specifies which cmc@K to log.
                [1] - cmc@1
                [1, 3] - cmc@1 and cmc@3
                [1, 3, 5] - cmc@1, cmc@3 and cmc@5
            num_classes: number of classes to calculate ``accuracy_args``
                if ``topk_args`` is None

        """
        super().__init__(order=CallbackOrder.Metric)
        self.list_args = topk_args or get_default_topk_args(num_classes)
        self._metric_fn = cmc_score
        self._prefix = prefix
        self.embeddings_key = embeddings_key
        self.labels_key = labels_key
        self.is_query_key = is_query_key
        self._gallery_embeddings: torch.Tensor = None
        self._query_embeddings: torch.Tensor = None
        self._gallery_labels: torch.Tensor = None
        self._query_labels: torch.Tensor = None
        self._gallery_idx = None
        self._query_idx = None
        self._query_size = None
        self._gallery_size = None
Esempio n. 3
0
    def __init__(
        self,
        input_key: str = "targets",
        output_key: str = "logits",
        prefix: str = "accuracy",
        topk_args: List[int] = None,
        num_classes: int = None,
        accuracy_args: List[int] = None,
        **kwargs,
    ):
        """
        Args:
            input_key: input key to use for accuracy calculation;
                specifies our `y_true`
            output_key: output key to use for accuracy calculation;
                specifies our `y_pred`
            prefix: key for the metric's name
            topk_args: specifies which accuracy@K to log:
                [1] - accuracy
                [1, 3] - accuracy at 1 and 3
                [1, 3, 5] - accuracy at 1, 3 and 5
            num_classes: number of classes to calculate ``topk_args``
                if ``accuracy_args`` is None
            **kwargs: key-value params to pass to the metric

        .. note::
            For ``**kwargs`` info, please follow
            ``catalyst.callbacks.metric.BatchMetricCallback`` and
            ``catalyst.metrics.accuracy.accuracy`` docs
        """
        topk_args = (topk_args or accuracy_args
                     or get_default_topk_args(num_classes))

        super().__init__(
            prefix=prefix,
            metric_fn=wrap_topk_metric2dict(accuracy, topk_args=topk_args),
            input_key=input_key,
            output_key=output_key,
            **kwargs,
        )
Esempio n. 4
0
    def __init__(
        self,
        input_key: str = "targets",
        output_key: str = "logits",
        prefix: str = "accuracy",
        multiplier: float = 1.0,
        topk_args: List[int] = None,
        num_classes: int = None,
        accuracy_args: List[int] = None,
        **kwargs,
    ):
        """
        Args:
            input_key: input key to use for accuracy calculation;
                specifies our `y_true`
            output_key: output key to use for accuracy calculation;
                specifies our `y_pred`
            prefix: key for the metric's name
            topk_args: specifies which accuracy@K to log:
                [1] - accuracy
                [1, 3] - accuracy at 1 and 3
                [1, 3, 5] - accuracy at 1, 3 and 5
            num_classes: number of classes to calculate ``topk_args``
                if ``accuracy_args`` is None
            activation: An torch.nn activation applied to the outputs.
                Must be one of ``"none"``, ``"Sigmoid"``, or ``"Softmax"``
        """
        topk_args = (topk_args or accuracy_args
                     or get_default_topk_args(num_classes))

        super().__init__(
            prefix=prefix,
            metric_fn=wrap_topk_metric2dict(accuracy, topk_args=topk_args),
            input_key=input_key,
            output_key=output_key,
            multiplier=multiplier,
            **kwargs,
        )