Beispiel #1
0
 def __init__(
     self,
     metric_fn: Callable,
     metric_key: str,
     compute_on_call: bool = True,
     prefix: str = None,
     suffix: str = None,
 ):
     """Init"""
     super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix)
     self.metric_fn = metric_fn
     self.metric_name = f"{self.prefix}{metric_key}{self.suffix}"
     self.additive_metric = AdditiveValueMetric()
Beispiel #2
0
 def __init__(
     self,
     input_key: str,
     target_key: str,
     metric_key: str,
     criterion_key: str = None,
 ):
     """Init."""
     super().__init__(order=CallbackOrder.metric, node=CallbackNode.all)
     self.input_key = input_key
     self.target_key = target_key
     self.metric_key = metric_key
     self.criterion_key = criterion_key
     self.additive_metric = AdditiveValueMetric()
     self.criterion = None
Beispiel #3
0
def test_additive_std(
    values_list: Iterable[float],
    num_samples_list: Iterable[int],
    true_values_list: Iterable[float],
):
    """
    Test additive metric std computation

    Args:
        values_list: list of values to update metric
        num_samples_list: list of num_samples
        true_values_list: list of metric intermediate value
    """
    metric = AdditiveValueMetric()
    for value, num_samples, true_value in zip(values_list, num_samples_list,
                                              true_values_list):
        metric.update(value=value, num_samples=num_samples)
        _, std = metric.compute()
        assert np.isclose(std, true_value)
Beispiel #4
0
def test_additive_mode(
    values_list: Union[Iterable[float], Iterable[torch.Tensor]],
    num_samples_list: Iterable[int],
    true_values_list: Iterable[float],
    mode: Iterable[str],
):
    """
    Test additive metric std computation

    Args:
        values_list: list of values to update metric
        num_samples_list: list of num_samples
        true_values_list: list of metric intermediate value
        mode: `AdditiveValueMetric` mode
    """
    metric = AdditiveValueMetric(mode=mode)
    for value, num_samples, true_value in zip(values_list, num_samples_list,
                                              true_values_list):
        metric.update(value=value, num_samples=num_samples)
        mean, _ = metric.compute()
        assert np.isclose(mean, true_value)
Beispiel #5
0
 def __init__(
     self,
     topk_args: List[int] = None,
     compute_on_call: bool = True,
     prefix: str = None,
     suffix: str = None,
 ):
     """Init NDCGMetric"""
     super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix)
     self.metric_name_mean = f"{self.prefix}ndcg{self.suffix}"
     self.metric_name_std = f"{self.prefix}ndcg{self.suffix}/std"
     self.topk_args: List[int] = topk_args or [1]
     self.additive_metrics: List[AdditiveValueMetric] = [
         AdditiveValueMetric() for _ in range(len(self.topk_args))
     ]
Beispiel #6
0
 def __init__(
     self,
     topk_args: List[int] = None,
     num_classes: int = None,
     compute_on_call: bool = True,
     prefix: str = None,
     suffix: str = None,
 ):
     """Init AccuracyMetric"""
     super().__init__(compute_on_call=compute_on_call,
                      prefix=prefix,
                      suffix=suffix)
     self.metric_name_mean = f"{self.prefix}accuracy{self.suffix}"
     self.metric_name_std = f"{self.prefix}accuracy{self.suffix}/std"
     self.topk_args: List[int] = topk_args or get_default_topk_args(
         num_classes)
     self.additive_metrics: List[AdditiveValueMetric] = [
         AdditiveValueMetric() for _ in range(len(self.topk_args))
     ]
Beispiel #7
0
class CriterionCallback(ICriterionCallback):
    """Criterion callback, abstraction over criterion step.

    Args:
        input_key:
        target_key:
        metric_key: prefix for metrics and output key for loss
            in ``runner.batch_metrics`` dictionary
        criterion_key: A key to take a criterion in case
            there are several of them and they are in a dictionary format.
    """
    def __init__(
        self,
        input_key: str,
        target_key: str,
        metric_key: str,
        criterion_key: str = None,
    ):
        """Init."""
        super().__init__(order=CallbackOrder.metric, node=CallbackNode.all)
        self.input_key = input_key
        self.target_key = target_key
        self.metric_key = metric_key
        self.criterion_key = criterion_key
        self.additive_metric = AdditiveValueMetric()
        self.criterion = None

    def on_stage_start(self, runner: "IRunner"):
        """Checks that the current stage has correct criterion.

        Args:
            runner: current runner
        """
        self.criterion = get_attr(runner,
                                  key="criterion",
                                  inner_key=self.criterion_key)
        assert self.criterion is not None

    def on_loader_start(self, runner: "IRunner") -> None:
        """Event handler."""
        self.additive_metric.reset()

    def on_batch_end(self, runner: "IRunner"):
        """Event handler."""
        inputs, targets = runner.batch[self.input_key], runner.batch[
            self.target_key]

        # NOTE: similar to amp guides in docs
        # https://pytorch.org/docs/stable/notes/amp_examples.html
        # with runner.engine.autocast():
        loss = self.criterion(inputs, targets)

        runner.batch_metrics.update({self.metric_key: loss})
        self.additive_metric.update(loss.detach().cpu(), len(targets))

    def on_loader_end(self, runner: "IRunner") -> None:
        """Event handler."""
        mean, std = self.additive_metric.compute()
        metrics = {self.metric_key: mean, f"{self.metric_key}/std": std}
        metrics = {
            k: runner.engine.sync_tensor(torch.tensor(v, device=runner.device),
                                         "mean")
            for k, v in metrics.items()
        }
        runner.loader_metrics.update(metrics)
Beispiel #8
0
class FunctionalBatchMetric(ICallbackBatchMetric):
    """Class for custom metrics in a functional way.

    Args:
        metric_fn: metric function, that get outputs, targets and return score as torch.Tensor
        metric_key: metric name
        compute_on_call: Computes and returns metric value during metric call.
            Used for per-batch logging. default: True
        prefix: metric prefix
        suffix: metric suffix

    .. note::

        Loader metrics calculated as average over all batch metrics.

    Examples:

    .. code-block:: python

        import torch
        from catalyst import metrics
        import sklearn.metrics

        outputs = torch.tensor([1, 0, 2, 1])
        targets = torch.tensor([3, 0, 2, 2])

        metric = metrics.FunctionalBatchMetric(
            metric_fn=sklearn.metrics.accuracy_score,
            metric_key="sk_accuracy",
        )
        metric.reset()

        metric.update(batch_size=len(outputs), y_pred=outputs, y_true=targets)
        metric.compute()
        # (0.5, 0.0)  # mean, std

        metric.compute_key_value()
        # {'sk_accuracy': 0.5, 'sk_accuracy/mean': 0.5, 'sk_accuracy/std': 0.0}

    """

    def __init__(
        self,
        metric_fn: Callable,
        metric_key: str,
        compute_on_call: bool = True,
        prefix: str = None,
        suffix: str = None,
    ):
        """Init"""
        super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix)
        self.metric_fn = metric_fn
        self.metric_name = f"{self.prefix}{metric_key}{self.suffix}"
        self.additive_metric = AdditiveValueMetric()

    def reset(self):
        """Reset all statistics"""
        self.additive_metric.reset()

    def update(self, batch_size: int, *args, **kwargs) -> torch.Tensor:
        """
        Calculate metric and update average metric

        Args:
            batch_size: current batch size for metric statistics aggregation
            *args: args for metric_fn
            **kwargs: kwargs for metric_fn

        Returns:
            custom metric
        """
        value = self.metric_fn(*args, **kwargs)
        self.additive_metric.update(float(value), batch_size)
        return value

    def update_key_value(self, batch_size: int, *args, **kwargs) -> Dict[str, torch.Tensor]:
        """
        Calculate metric and update average metric

        Args:
            batch_size: current batch size for metric statistics aggregation
            *args: args for metric_fn
            **kwargs: kwargs for metric_fn

        Returns:
            Dict with one element-custom metric
        """
        value = self.update(batch_size, *args, **kwargs)
        return {f"{self.metric_name}": value}

    def compute(self) -> torch.Tensor:
        """
        Get metric average over all examples

        Returns:
            custom metric
        """
        return self.additive_metric.compute()

    def compute_key_value(self) -> Dict[str, torch.Tensor]:
        """
        Get metric average over all examples

        Returns:
            Dict with one element-custom metric
        """
        mean, std = self.compute()
        return {
            self.metric_name: mean,
            f"{self.metric_name}/mean": mean,
            f"{self.metric_name}/std": std,
        }
class FunctionalBatchMetric(ICallbackBatchMetric):
    """Class for custom metrics in a functional way.

    Args:
        metric_fn: metric function, that get outputs, targets and return score as torch.Tensor
        metric_key: metric name
        compute_on_call: Computes and returns metric value during metric call.
            Used for per-batch logging. default: True
        prefix: metric prefix
        suffix: metric suffix

    .. note::

        Loader metrics calculated as average over all batch metrics.

    """
    def __init__(
        self,
        metric_fn: Callable,
        metric_key: str,
        compute_on_call: bool = True,
        prefix: str = None,
        suffix: str = None,
    ):
        """Init"""
        super().__init__(compute_on_call=compute_on_call,
                         prefix=prefix,
                         suffix=suffix)
        self.metric_fn = metric_fn
        self.metric_name = f"{self.prefix}{metric_key}{self.suffix}"
        self.additive_metric = AdditiveValueMetric()

    def reset(self):
        """Reset all statistics"""
        self.additive_metric.reset()

    def update(self, batch_size: int, *args, **kwargs) -> torch.Tensor:
        """
        Calculate metric and update average metric

        Args:
            batch_size: current batch size for metric statistics aggregation
            *args: args for metric_fn
            **kwargs: kwargs for metric_fn

        Returns:
            custom metric
        """
        value = self.metric_fn(*args, **kwargs)
        self.additive_metric.update(float(value), batch_size)
        return value

    def update_key_value(self, batch_size: int, *args,
                         **kwargs) -> Dict[str, torch.Tensor]:
        """
        Calculate metric and update average metric

        Args:
            batch_size: current batch size for metric statistics aggregation
            *args: args for metric_fn
            **kwargs: kwargs for metric_fn

        Returns:
            Dict with one element-custom metric
        """
        value = self.update(batch_size, *args, **kwargs)
        return {f"{self.metric_name}": value}

    def compute(self) -> torch.Tensor:
        """
        Get metric average over all examples

        Returns:
            custom metric
        """
        return self.additive_metric.compute()

    def compute_key_value(self) -> Dict[str, torch.Tensor]:
        """
        Get metric average over all examples

        Returns:
            Dict with one element-custom metric
        """
        mean, std = self.compute()
        return {
            self.metric_name: mean,
            f"{self.metric_name}/mean": mean,
            f"{self.metric_name}/std": std,
        }