示例#1
0
class Loss(Metric[float]):
    """
    The standalone Loss metric. This is a general metric
    used to compute more specific ones.

    Instances of this metric keeps the running average loss
    over multiple <prediction, target> pairs of Tensors,
    provided incrementally.
    The "prediction" and "target" tensors may contain plain labels or
    one-hot/logit vectors.

    Each time `result` is called, this metric emits the average loss
    across all predictions made since the last `reset`.

    The reset method will bring the metric to its initial state. By default
    this metric in its initial state will return a loss value of 0.
    """
    def __init__(self):
        """
        Creates an instance of the loss metric.

        By default this metric in its initial state will return a loss
        value of 0. The metric can be updated by using the `update` method
        while the running loss can be retrieved using the `result` method.
        """
        self._mean_loss = Mean()

    @torch.no_grad()
    def update(self, loss: Tensor, patterns: int) -> None:
        """
        Update the running loss given the loss Tensor and the minibatch size.

        :param loss: The loss Tensor. Different reduction types don't affect
            the result.
        :param patterns: The number of patterns in the minibatch.
        :return: None.
        """
        self._mean_loss.update(torch.mean(loss), weight=patterns)

    def result(self) -> float:
        """
        Retrieves the running average loss per pattern.

        Calling this method will not change the internal state of the metric.

        :return: The running loss, as a float.
        """
        return self._mean_loss.result()

    def reset(self) -> None:
        """
        Resets the metric.

        :return: None.
        """
        self._mean_loss.reset()
示例#2
0
class RunningEpochTime(PluginMetric[float]):
    """
    The running epoch time metric.
    This plugin metric only works at training time.

    For each iteration, this metric logs the average time
    between the start of the
    epoch and the current iteration.
    """
    def __init__(self):
        """
        Creates an instance of the running epoch time metric..
        """
        super().__init__()

        self._time_mean = Mean()
        self._epoch_time = ElapsedTime()

    def before_training_epoch(self, strategy) -> MetricResult:
        self.reset()
        self._epoch_time.update()

    def before_training_iteration(self, strategy: 'BaseStrategy') \
            -> None:
        self._epoch_time.update()

    def after_training_iteration(self, strategy: 'BaseStrategy') \
            -> MetricResult:
        super().after_training_iteration(strategy)
        self._epoch_time.update()
        self._time_mean.update(self._epoch_time.result())
        self._epoch_time.reset()
        return self._package_result(strategy)

    def reset(self) -> None:
        self._epoch_time.reset()
        self._time_mean.reset()

    def result(self) -> float:
        return self._time_mean.result()

    def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
        average_epoch_time = self.result()

        metric_name = get_metric_name(self, strategy)
        plot_x_position = self.get_global_counter()

        return [
            MetricValue(self, metric_name, average_epoch_time, plot_x_position)
        ]

    def __str__(self):
        return "RunningTime_Epoch"
示例#3
0
class RunningEpochTime(TimePluginMetric):
    """
    The running epoch time metric.
    This plugin metric only works at training time.

    For each iteration, this metric logs the average time
    between the start of the
    epoch and the current iteration.
    """

    def __init__(self):
        """
        Creates an instance of the running epoch time metric..
        """
        self._time_mean = Mean()

        super(RunningEpochTime, self).__init__(
            reset_at='epoch', emit_at='iteration', mode='train')

    def before_training_epoch(self, strategy) -> MetricResult:
        super().before_training_epoch(strategy)
        self._time_mean.reset()
        self._time.update()

    def after_training_iteration(self, strategy: 'BaseStrategy') \
            -> MetricResult:
        super().after_training_iteration(strategy)
        self._time_mean.update(self._time.result())
        self._time.reset()
        return self._package_result(strategy)

    def result(self) -> float:
        return self._time_mean.result()

    def __str__(self):
        return "RunningTime_Epoch"
示例#4
0
class Accuracy(Metric[float]):
    """
    The Accuracy metric. This is a standalone metric
    used to compute more specific ones.

    Instances of this metric keeps the running average accuracy
    over multiple <prediction, target> pairs of Tensors,
    provided incrementally.
    The "prediction" and "target" tensors may contain plain labels or
    one-hot/logit vectors.

    Each time `result` is called, this metric emits the average accuracy
    across all predictions made since the last `reset`.

    The reset method will bring the metric to its initial state. By default
    this metric in its initial state will return an accuracy value of 0.
    """

    def __init__(self):
        """
        Creates an instance of the standalone Accuracy metric.

        By default this metric in its initial state will return an accuracy
        value of 0. The metric can be updated by using the `update` method
        while the running accuracy can be retrieved using the `result` method.
        """

        self._mean_accuracy = Mean()
        """
        The mean utility that will be used to store the running accuracy.
        """

    @torch.no_grad()
    def update(self, predicted_y: Tensor, true_y: Tensor) -> None:
        """
        Update the running accuracy given the true and predicted labels.

        :param predicted_y: The model prediction. Both labels and logit vectors
            are supported.
        :param true_y: The ground truth. Both labels and one-hot vectors
            are supported.
        :return: None.
        """
        if len(true_y) != len(predicted_y):
            raise ValueError('Size mismatch for true_y and predicted_y tensors')

        true_y = torch.as_tensor(true_y)
        predicted_y = torch.as_tensor(predicted_y)

        # Check if logits or labels
        if len(predicted_y.shape) > 1:
            # Logits -> transform to labels
            predicted_y = torch.max(predicted_y, 1)[1]

        if len(true_y.shape) > 1:
            # Logits -> transform to labels
            true_y = torch.max(true_y, 1)[1]

        true_positives = float(torch.sum(torch.eq(predicted_y, true_y)))
        total_patterns = len(true_y)

        self._mean_accuracy.update(true_positives / total_patterns,
                                   total_patterns)

    def result(self) -> float:
        """
        Retrieves the running accuracy.

        Calling this method will not change the internal state of the metric.

        :return: The running accuracy, as a float value between 0 and 1.
        """
        return self._mean_accuracy.result()

    def reset(self) -> None:
        """
        Resets the metric.

        :return: None.
        """
        self._mean_accuracy.reset()