def test_meter_get_set_classy_state_test(self):
        # In this test we update meter0 with model_output0 & target0
        # and we update meter1 with model_output1 & target1 then
        # transfer the state from meter1 to meter0 and validate they
        # give same expected value.
        #
        # Expected value is the expected value of meter1 For this test
        # to work, top-1 / top-2 values of meter0 / meter1 should be
        # different
        meters = [
            PrecisionAtKMeter(topk=[1, 2]),
            PrecisionAtKMeter(topk=[1, 2])
        ]
        model_outputs = [
            torch.tensor([[0.05, 0.4, 0.05], [0.2, 0.65, 0.15],
                          [0.33, 0.33, 0.34]]),
            torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2],
                          [0.4, 0.2, 0.4]]),
        ]
        targets = [
            torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 0]]),
            torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]),
        ]

        # Second update's expected value
        expected_value = {"top_1": 2 / 3.0, "top_2": 2 / 6.0}

        self.meter_get_set_classy_state_test(meters, model_outputs, targets,
                                             expected_value)
    def test_double_meter_update_and_reset(self):
        meter = PrecisionAtKMeter(topk=[1, 2])

        # Batchsize = 3, num classes = 3, score is probability of class
        model_outputs = [
            torch.tensor([[0.3, 0.4, 0.3], [0.2, 0.65, 0.15],
                          [0.33, 0.33, 0.34]]),
            torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2],
                          [0.4, 0.2, 0.4]]),
        ]

        # One-hot encoding, 1 = positive for class
        # batch-1: sample-1: 1, sample-2: 0, sample-3: 0,1,2
        # batch-2: sample-1: 1, sample-2: 1, sample-3: 1
        targets = [
            torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]),
            torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]),
        ]

        # First batch has top-1 precision of 2/3.0, top-2 precision of 4/6.0
        # Second batch has top-1 precision of 2/3.0, top-2 precision of 2/6.0
        expected_value = {"top_1": 4 / 6.0, "top_2": 6 / 12.0}

        self.meter_update_and_reset_test(meter, model_outputs, targets,
                                         expected_value)
    def test_non_onehot_target(self):
        """
        This test verifies that the meter works as expected on a single
        update + reset + same single update.
        """
        meter = PrecisionAtKMeter(topk=[1, 2],
                                  target_is_one_hot=False,
                                  num_classes=3)

        # Batchsize = 2, num classes = 3, score is probability of class
        model_outputs = [
            torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2],
                          [0.4, 0.2, 0.4]]),
            torch.tensor([[0.2, 0.4, 0.4], [0.2, 0.65, 0.15], [0.1, 0.8,
                                                               0.1]]),
        ]

        # One-hot encoding, 1 = positive for class
        targets = [
            torch.tensor([[1], [1], [1]]),  # [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
            torch.tensor([[0], [1], [2]]),  # [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
        ]

        # Note for ties, we select randomly, so we should not use ambiguous ties
        # First batch has top-1 precision of 2/3.0, top-2 precision of 2/6.0
        # Second batch has top-1 precision of 1/3.0, top-2 precision of 1/6.0
        expected_value = {"top_1": 3 / 6.0, "top_2": 3 / 12.0}

        self.meter_update_and_reset_test(meter, model_outputs, targets,
                                         expected_value)
    def test_meter_invalid_model_output(self):
        meter = PrecisionAtKMeter(topk=[1, 2])
        # This model output has 3 dimensions instead of expected 2
        model_output = torch.tensor([[[0.33, 0.33, 0.34], [1, 2, 3]],
                                     [[-1, -3, -4], [-10, -90, -100]]])
        target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]])

        self.meter_invalid_meter_input_test(meter, model_output, target)
    def test_meter_invalid_topk(self):
        meter = PrecisionAtKMeter(topk=[1, 5])
        model_output = torch.tensor([
            [0.2, 0.4, 0.4],  # top-1: 1/2, top-2: 1/2
            [0.2, 0.65, 0.15],  # top-1: 1, top-2: 1/0
            [0.33, 0.33, 0.34],  # top-1: 2, top-2: 2/0/1
        ])
        target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]])

        self.meter_invalid_meter_input_test(meter, model_output, target)
    def test_meter_distributed(self):
        # Meter0 will execute on one process, Meter1 on the other
        meters = [
            PrecisionAtKMeter(topk=[1, 2]),
            PrecisionAtKMeter(topk=[1, 2])
        ]

        # Batchsize = 3, num classes = 3, score is probability of class
        model_outputs = [
            torch.tensor([[0.3, 0.4, 0.3], [0.2, 0.65, 0.15],
                          [0.33, 0.33, 0.34]]),  # Meter 0
            torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2],
                          [0.4, 0.2, 0.4]]),  # Meter 1
            torch.tensor([[0.3, 0.4, 0.3], [0.2, 0.65, 0.15],
                          [0.33, 0.33, 0.34]]),  # Meter 0
            torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2],
                          [0.4, 0.2, 0.4]]),  # Meter 1
        ]

        # Class 0 is the correct class for sample 1, class 2 for sample 2, etc
        targets = [
            torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]),  # Meter 0
            torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]),  # Meter 1
            torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]),  # Meter 0
            torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]),  # Meter 1
        ]

        # In first two updates there are 4 correct top-1, 6 correct in top 2
        # The same occurs in the second two updates and is added to first
        expected_values = [
            {
                "top_1": 4 / 6.0,
                "top_2": 6 / 12.0
            },  # After one update to each meter
            {
                "top_1": 8 / 12.0,
                "top_2": 12 / 24.0
            },  # After two updates to each meter
        ]

        self.meter_distributed_test(meters, model_outputs, targets,
                                    expected_values)
    def test_meter_invalid_target(self):
        meter = PrecisionAtKMeter(topk=[1, 2])
        model_output = torch.tensor([
            [0.2, 0.4, 0.4],  # top-1: 1/2, top-2: 1/2
            [0.2, 0.65, 0.15],  # top-1: 1, top-2: 1/0
            [0.33, 0.33, 0.34],  # top-1: 2, top-2: 2/0/1
        ])
        # Target shape does not match model shape
        target = torch.tensor([0, 1, 2])

        self.meter_invalid_meter_input_test(meter, model_output, target)
예제 #8
0
    def __init__(self, num_meters: int, topk_values: List[int], meter_names: List[str]):
        super().__init__()

        assert is_pos_int(num_meters), "num_meters must be positive"
        assert isinstance(topk_values, list), "topk_values must be a list"
        assert len(topk_values) > 0, "topk_values list should have at least one element"
        assert [
            is_pos_int(x) for x in topk_values
        ], "each value in topk_values must be >= 1"
        self._num_meters = num_meters
        self._topk_values = topk_values
        self._meters = [
            PrecisionAtKMeter(self._topk_values) for _ in range(self._num_meters)
        ]
        self._meter_names = meter_names
        self.reset()
예제 #9
0
    def test_meter_fp16(self):
        """
        This test verifies that the meter works if the input tensor is fp16.
        """
        meter = PrecisionAtKMeter(topk=[1, 2])

        # Batchsize = 3, num classes = 3, score is probability of class
        model_output = torch.tensor([
            [0.2, 0.4, 0.4],  # top-1: 1/2, top-2: 1/2
            [0.2, 0.65, 0.15],  # top-1: 1, top-2: 1/0
            [0.33, 0.33, 0.34],  # top-1: 2, top-2: 2/0?1
        ]).half()

        # One-hot encoding, 1 = positive for class
        # sample-1: 1, sample-2: 0, sample-3: 0,1,2
        target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]).half()

        # Note for ties, we select randomly, so we should not use ambiguous ties
        expected_value = {"top_1": 2 / 3.0, "top_2": 4 / 6.0}

        self.meter_update_and_reset_test(meter, model_output, target,
                                         expected_value)