def __init__( self, input_key: str = "targets", output_key: str = "logits", prefix: str = "average_precision", multiplier: float = 1.0, class_args: List[str] = None, **kwargs, ): """ Args: input_key: input key to use for calculation mean average precision; specifies our `y_true`. output_key: output key to use for calculation mean average precision; specifies our `y_pred`. prefix: metric's name. multiplier: scale factor for the metric. class_args: class names to display in the logs. If None, defaults to indices for each class, starting from 0 """ super().__init__( prefix=prefix, metric_fn=wrap_class_metric2dict(average_precision, class_args=class_args), input_key=input_key, output_key=output_key, multiplier=multiplier, **kwargs, )
def __init__( self, input_key: str = "targets", output_key: str = "logits", prefix: str = "auc", activation: str = "Sigmoid", class_args: List[str] = None, **kwargs, ): """ Args: input_key: input key to use for auc calculation specifies our ``y_true``. output_key: output key to use for auc calculation; specifies our ``y_pred``. prefix: key for the metric's name multiplier: scale factor for the metric. activation: An torch.nn activation applied to the outputs. Must be one of ``'none'``, ``'Sigmoid'``, or ``'Softmax'`` class_args: class names to display in the logs. If None, defaults to indices for each class, starting from 0 """ metric_fn = wrap_metric_fn_with_activation(metric_fn=auc, activation=activation) metric_fn = wrap_class_metric2dict(metric_fn, class_args=class_args) super().__init__( prefix=prefix, metric_fn=metric_fn, input_key=input_key, output_key=output_key, **kwargs, )
def __init__( self, input_key: str = "targets", output_key: str = "logits", prefix: str = "iou", activation: str = "Sigmoid", per_class: bool = False, class_args: List[str] = None, **kwargs, ): """ Args: input_key: input key to use for iou calculation specifies our ``y_true`` output_key: output key to use for iou calculation; specifies our ``y_pred`` prefix: key to store in logs eps: epsilon to avoid zero division threshold: threshold for outputs binarization activation: An torch.nn activation applied to the outputs. Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'`` per_class: boolean flag to log per class metrics, or use mean/macro statistics otherwise class_args: class names to display in the logs. If None, defaults to indices for each class, starting from 0 **kwargs: key-value params to pass to the metric .. note:: For ``**kwargs`` info, please follow ``catalyst.callbacks.metric.BatchMetricCallback`` and ``catalyst.metrics.iou.iou`` docs """ metric_fn = wrap_metric_fn_with_activation(metric_fn=iou, activation=activation) metric_fn = wrap_class_metric2dict(metric_fn, per_class=per_class, class_args=class_args) super().__init__( prefix=prefix, metric_fn=metric_fn, input_key=input_key, output_key=output_key, **kwargs, )
def __init__( self, input_key: str = "targets", output_key: str = "logits", prefix: str = "average_precision", activation: str = "Sigmoid", per_class: bool = False, class_args: List[str] = None, **kwargs, ): """ Args: input_key: input key to use for calculation mean average precision; specifies our `y_true`. output_key: output key to use for calculation mean average precision; specifies our `y_pred`. prefix: key for the metric's name activation: An torch.nn activation applied to the outputs. Must be one of ``'none'``, ``'Sigmoid'``, or ``'Softmax'`` per_class: boolean flag to log per class metrics, or use mean/macro statistics otherwise class_args: class names to display in the logs. If None, defaults to indices for each class, starting from 0 **kwargs: key-value params to pass to the metric .. note:: For ``**kwargs`` info, please follow ``catalyst.callbacks.metric.LoaderMetricCallback`` and ``catalyst.metrics.precision.average_precision`` docs """ metric_fn = wrap_metric_fn_with_activation(metric_fn=average_precision, activation=activation) metric_fn = wrap_class_metric2dict(metric_fn, per_class=per_class, class_args=class_args) super().__init__( prefix=prefix, metric_fn=metric_fn, input_key=input_key, output_key=output_key, **kwargs, )
def __init__( self, input_key: str = "targets", output_key: str = "logits", prefix: str = "dice", activation: str = "Sigmoid", class_args: List[str] = None, **kwargs, ): """ Args: input_key: input key to use for iou calculation specifies our ``y_true`` output_key: output key to use for iou calculation; specifies our ``y_pred`` prefix: key to store in logs activation: An torch.nn activation applied to the outputs. Must be one of ``'none'``, ``'Sigmoid'``, or ``'Softmax'`` class_args: class names to display in the logs. If None, defaults to indices for each class, starting from 0 **kwargs: key-value params to pass to the metric .. note:: For `**kwargs` info, please follow `catalyst.metrics.dice.dice` docs """ metric_fn = wrap_metric_fn_with_activation(metric_fn=dice, activation=activation) metric_fn = wrap_class_metric2dict(metric_fn, class_args=class_args) super().__init__( prefix=prefix, metric_fn=metric_fn, input_key=input_key, output_key=output_key, **kwargs, )