def test_mean_avg_precision():
    """
    Tests for catalyst.mean_avg_precision metric.
    """
    # check 1
    # Stanford Introdcution to information retrieval primer
    y_pred1 = np.arange(9, -1, -1)
    y_true1 = [1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0]
    y_pred2 = np.arange(9, -1, -1)
    y_true2 = [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]

    y_pred_torch = torch.Tensor([y_pred1, y_pred2])
    y_true_torch = torch.Tensor([y_true1, y_true2])

    top_k = [10]
    map_at10 = mean_average_precision(y_pred_torch, y_true_torch, top_k)[0]

    assert np.allclose(map_at10, 0.5325, atol=1e-3)

    # check 2
    # map_at1: (1.0 + 0.0) / 2 = 0.5
    # map_at3: ((1 + 0.67)/2 + 0.5) / 2 = 0.6675
    # map_at5: ((1 + 0.67)/2 + (0.5 + 0.4)/2) / 2 = 0.6425
    # map_at10: ((1 + 0.67 + 0.5 + 0.44 + 0.5)/5 + (0.5 + 0.4 + 0.43)/3 ) / 2  = 0.53

    y_pred1 = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
    y_pred2 = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]

    y_true1 = [1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0]
    y_true2 = [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]

    y_pred_torch = torch.Tensor([y_pred1, y_pred2])
    y_true_torch = torch.Tensor([y_true1, y_true2])

    top_k = [1, 3, 5, 10]

    map_k = mean_average_precision(y_pred_torch, y_true_torch, top_k)

    map_at1 = map_k[0]
    map_at3 = map_k[1]
    map_at5 = map_k[2]
    map_at10 = map_k[3]

    assert np.allclose(map_at1, 0.5, atol=1e-3)
    assert np.allclose(map_at3, 0.6675, atol=1e-3)
    assert np.allclose(map_at5, 0.6425, atol=1e-3)
    assert np.allclose(map_at10, 0.5325, atol=1e-3)
Beispiel #2
0
    def update(self, logits: torch.Tensor,
               targets: torch.Tensor) -> List[float]:
        """
        Update metric value with map for new data and return intermediate metrics values.

        Args:
            logits (torch.Tensor): tensor of logits
            targets (torch.Tensor): tensor of targets

        Returns:
            list of map@k values
        """
        values = mean_average_precision(logits, targets, topk=self.topk_args)
        values = [v.item() for v in values]
        for value, metric in zip(values, self.additive_metrics):
            metric.update(value, len(targets))
        return values
def test_mean_avg_precision():
    """
    Tests for catalyst.mean_avg_precision metric.
    """
    # check 1
    # Stanford Introdcution to information retrieval primer
    y_pred1 = np.arange(9, -1, -1)
    y_true1 = [1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0]
    y_pred2 = np.arange(9, -1, -1)
    y_true2 = [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]

    y_pred_torch = torch.Tensor([y_pred1, y_pred2])
    y_true_torch = torch.Tensor([y_true1, y_true2])

    top_k = [10]
    map_at10 = mean_average_precision(y_pred_torch, y_true_torch, top_k)[0]

    assert np.allclose(map_at10, 0.5325, atol=1e-3)