Exemple #1
0
 def __init__(
     self,
     input_key: str = "targets",
     output_key: str = "logits",
     prefix: str = "auc",
     activation: str = "Sigmoid",
     class_args: List[str] = None,
     **kwargs,
 ):
     """
     Args:
         input_key: input key to use for auc calculation
             specifies our ``y_true``.
         output_key: output key to use for auc calculation;
             specifies our ``y_pred``.
         prefix: key for the metric's name
         multiplier: scale factor for the metric.
         activation: An torch.nn activation applied to the outputs.
             Must be one of ``'none'``, ``'Sigmoid'``, or ``'Softmax'``
         class_args: class names to display in the logs.
             If None, defaults to indices for each class, starting from 0
     """
     metric_fn = wrap_metric_fn_with_activation(metric_fn=auc,
                                                activation=activation)
     metric_fn = wrap_class_metric2dict(metric_fn, class_args=class_args)
     super().__init__(
         prefix=prefix,
         metric_fn=metric_fn,
         input_key=input_key,
         output_key=output_key,
         **kwargs,
     )
Exemple #2
0
    def __init__(
        self,
        input_key: str = "targets",
        output_key: str = "logits",
        prefix: str = "multilabel_accuracy",
        activation: str = "Sigmoid",
        threshold: float = None,
        **kwargs,
    ):
        """
        Args:
            input_key: input key to use for accuracy calculation;
                specifies our `y_true`
            output_key: output key to use for accuracy calculation;
                specifies our `y_pred`
            prefix: key for the metric's name
            activation: An torch.nn activation applied to the outputs.
                Must be one of ``"none"``, ``"Sigmoid"``, or ``"Softmax"``
            threshold: threshold for for model output
            **kwargs: key-value params to pass to the metric

        .. note::
            For `**kwargs` info, please follow
            `catalyst.metrics.accuracy.multilabel_accuracy` docs
        """
        super().__init__(
            prefix=prefix,
            metric_fn=wrap_metric_fn_with_activation(
                metric_fn=multilabel_accuracy, activation=activation),
            input_key=input_key,
            output_key=output_key,
            threshold=threshold,
            **kwargs,
        )
Exemple #3
0
 def __init__(
     self,
     input_key: str = "targets",
     output_key: str = "logits",
     prefix: str = "dice",
     activation: str = "Sigmoid",
     **kwargs,
 ):
     """
     Args:
         input_key: input key to use for iou calculation
             specifies our ``y_true``
         output_key: output key to use for iou calculation;
             specifies our ``y_pred``
         prefix: key to store in logs
         activation: An torch.nn activation applied to the model outputs.
             Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'``
         eps: epsilon to avoid zero division
         threshold: threshold for outputs binarization
     """
     super().__init__(
         prefix=prefix,
         metric_fn=wrap_metric_fn_with_activation(metric_fn=dice,
                                                  activation=activation),
         input_key=input_key,
         output_key=output_key,
         **kwargs,
     )
Exemple #4
0
 def __init__(
     self,
     input_key: str = "targets",
     output_key: str = "logits",
     prefix: str = "multilabel_accuracy",
     activation: str = "Sigmoid",
     threshold: float = None,
 ):
     """
     Args:
         input_key: input key to use for accuracy calculation;
             specifies our `y_true`
         output_key: output key to use for accuracy calculation;
             specifies our `y_pred`
         prefix: key for the metric's name
         threshold: threshold for for model output
         activation: An torch.nn activation applied to the outputs.
             Must be one of ``"none"``, ``"Sigmoid"``, or ``"Softmax"``
     """
     super().__init__(
         prefix=prefix,
         metric_fn=wrap_metric_fn_with_activation(
             metric_fn=multilabel_accuracy, activation=activation),
         input_key=input_key,
         output_key=output_key,
         threshold=threshold,
     )
Exemple #5
0
    def __init__(
        self,
        eps: float = 1e-7,
        threshold: float = None,
        activation: str = "Sigmoid",
    ):
        """@TODO: Docs. Contribution is welcome."""
        super().__init__()

        metric_fn = wrap_metric_fn_with_activation(metric_fn=dice,
                                                   activation=activation)
        self.loss_fn = partial(metric_fn, eps=eps, threshold=threshold)
Exemple #6
0
 def __init__(
     self,
     eps: float = 1e-7,
     threshold: float = None,
     activation: str = "Sigmoid",
 ):
     """
     Args:
         eps: epsilon to avoid zero division
         threshold: threshold for outputs binarization
         activation: An torch.nn activation applied to the outputs.
             Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'``
     """
     super().__init__()
     metric_fn = wrap_metric_fn_with_activation(metric_fn=iou,
                                                activation=activation)
     self.loss_fn = partial(metric_fn, eps=eps, threshold=threshold)
Exemple #7
0
    def __init__(
        self,
        input_key: str = "targets",
        output_key: str = "logits",
        prefix: str = "iou",
        activation: str = "Sigmoid",
        per_class: bool = False,
        class_args: List[str] = None,
        **kwargs,
    ):
        """
        Args:
            input_key: input key to use for iou calculation
                specifies our ``y_true``
            output_key: output key to use for iou calculation;
                specifies our ``y_pred``
            prefix: key to store in logs
            eps: epsilon to avoid zero division
            threshold: threshold for outputs binarization
            activation: An torch.nn activation applied to the outputs.
                Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'``
            per_class: boolean flag to log per class metrics,
                or use mean/macro statistics otherwise
            class_args: class names to display in the logs.
                If None, defaults to indices for each class, starting from 0
            **kwargs: key-value params to pass to the metric

        .. note::
            For ``**kwargs`` info, please follow
            ``catalyst.callbacks.metric.BatchMetricCallback`` and
            ``catalyst.metrics.iou.iou`` docs
        """
        metric_fn = wrap_metric_fn_with_activation(metric_fn=iou,
                                                   activation=activation)
        metric_fn = wrap_class_metric2dict(metric_fn,
                                           per_class=per_class,
                                           class_args=class_args)
        super().__init__(
            prefix=prefix,
            metric_fn=metric_fn,
            input_key=input_key,
            output_key=output_key,
            **kwargs,
        )
Exemple #8
0
    def __init__(
        self,
        input_key: str = "targets",
        output_key: str = "logits",
        prefix: str = "average_precision",
        activation: str = "Sigmoid",
        per_class: bool = False,
        class_args: List[str] = None,
        **kwargs,
    ):
        """
        Args:
            input_key: input key to use for
                calculation mean average precision;
                specifies our `y_true`.
            output_key: output key to use for
                calculation mean average precision;
                specifies our `y_pred`.
            prefix: key for the metric's name
            activation: An torch.nn activation applied to the outputs.
                Must be one of ``'none'``, ``'Sigmoid'``, or ``'Softmax'``
            per_class: boolean flag to log per class metrics,
                or use mean/macro statistics otherwise
            class_args: class names to display in the logs.
                If None, defaults to indices for each class, starting from 0
            **kwargs: key-value params to pass to the metric

        .. note::
            For ``**kwargs`` info, please follow
            ``catalyst.callbacks.metric.LoaderMetricCallback`` and
            ``catalyst.metrics.precision.average_precision`` docs
        """
        metric_fn = wrap_metric_fn_with_activation(metric_fn=average_precision,
                                                   activation=activation)
        metric_fn = wrap_class_metric2dict(metric_fn,
                                           per_class=per_class,
                                           class_args=class_args)
        super().__init__(
            prefix=prefix,
            metric_fn=metric_fn,
            input_key=input_key,
            output_key=output_key,
            **kwargs,
        )
Exemple #9
0
    def __init__(
        self,
        input_key: str = "targets",
        output_key: str = "logits",
        prefix: str = "dice",
        activation: str = "Sigmoid",
        class_args: List[str] = None,
        **kwargs,
    ):
        """
        Args:
            input_key: input key to use for iou calculation
                specifies our ``y_true``
            output_key: output key to use for iou calculation;
                specifies our ``y_pred``
            prefix: key to store in logs
            activation: An torch.nn activation applied to the outputs.
                Must be one of ``'none'``, ``'Sigmoid'``, or ``'Softmax'``
            class_args: class names to display in the logs.
                If None, defaults to indices for each class, starting from 0
            **kwargs: key-value params to pass to the metric

        .. note::
            For `**kwargs` info, please follow
            `catalyst.metrics.dice.dice` docs
        """
        metric_fn = wrap_metric_fn_with_activation(metric_fn=dice,
                                                   activation=activation)
        metric_fn = wrap_class_metric2dict(metric_fn, class_args=class_args)
        super().__init__(
            prefix=prefix,
            metric_fn=metric_fn,
            input_key=input_key,
            output_key=output_key,
            **kwargs,
        )