Пример #1
0
 def test_lrp_simple_attributions(self) -> None:
     model, inputs = _get_simple_model()
     model.eval()
     model.linear.rule = EpsilonRule()  # type: ignore
     model.linear2.rule = EpsilonRule()  # type: ignore
     lrp = LRP(model)
     relevance = lrp.attribute(inputs)
     assertTensorAlmostEqual(self, relevance,
                             torch.tensor([18.0, 36.0, 54.0]))
Пример #2
0
 def test_lrp_simple_attributions_all_layers(self):
     model, inputs = _get_simple_model(inplace=False)
     model.eval()
     model.linear.rule = EpsilonRule()
     model.linear2.rule = EpsilonRule()
     layers = [model.linear, model.linear2]
     lrp = LayerLRP(model, layers)
     relevance = lrp.attribute(inputs, attribute_to_layer_input=True)
     self.assertEqual(len(relevance), 2)
     assertTensorAlmostEqual(self, relevance[0][0],
                             torch.tensor([[[18.0, 36.0, 54.0]]]))
Пример #3
0
 def test_lrp_simple_inplaceReLU(self) -> None:
     model_default, inputs = _get_simple_model()
     model_inplace, _ = _get_simple_model(inplace=True)
     for model in [model_default, model_inplace]:
         model.eval()
         model.linear.rule = EpsilonRule()  # type: ignore
         model.linear2.rule = EpsilonRule()  # type: ignore
     lrp_default = LRP(model_default)
     lrp_inplace = LRP(model_inplace)
     relevance_default = lrp_default.attribute(inputs)
     relevance_inplace = lrp_inplace.attribute(inputs)
     assertTensorAlmostEqual(self, relevance_default, relevance_inplace)
Пример #4
0
 def test_lrp_simple_attributions(self):
     model, inputs = _get_simple_model(inplace=False)
     model.eval()
     model.linear.rule = EpsilonRule()
     model.linear2.rule = EpsilonRule()
     lrp_upper = LayerLRP(model, model.linear2)
     relevance_upper, delta = lrp_upper.attribute(
         inputs,
         attribute_to_layer_input=True,
         return_convergence_delta=True)
     lrp_lower = LayerLRP(model, model.linear)
     relevance_lower = lrp_lower.attribute(inputs)
     assertTensorAlmostEqual(self, relevance_lower[0], relevance_upper[0])
     self.assertEqual(delta.item(), 0)
Пример #5
0
 def test_lrp_simple_attributions_batch(self) -> None:
     model, inputs = _get_simple_model()
     model.eval()
     model.linear.rule = EpsilonRule()  # type: ignore
     model.linear2.rule = EpsilonRule()  # type: ignore
     lrp = LRP(model)
     inputs = torch.cat((inputs, 3 * inputs))
     relevance, delta = lrp.attribute(inputs,
                                      target=0,
                                      return_convergence_delta=True)
     self.assertEqual(relevance.shape, inputs.shape)  # type: ignore
     self.assertEqual(delta.shape[0], inputs.shape[0])  # type: ignore
     assertTensorAlmostEqual(
         self, relevance,
         torch.Tensor([[18.0, 36.0, 54.0], [54.0, 108.0, 162.0]]))
Пример #6
0
 def test_lrp_simple_inplaceReLU(self):
     model_default, inputs = _get_simple_model()
     model_inplace, _ = _get_simple_model(inplace=True)
     for model in [model_default, model_inplace]:
         model.eval()
         model.linear.rule = EpsilonRule()
         model.linear2.rule = EpsilonRule()
     lrp_default = LayerLRP(model_default, model_default.linear2)
     lrp_inplace = LayerLRP(model_inplace, model_inplace.linear2)
     relevance_default = lrp_default.attribute(
         inputs, attribute_to_layer_input=True)
     relevance_inplace = lrp_inplace.attribute(
         inputs, attribute_to_layer_input=True)
     assertTensorAlmostEqual(self, relevance_default[0],
                             relevance_inplace[0])
Пример #7
0
    def test_lrp_skip_connection(self) -> None:
        # A custom addition module needs to be used so that relevance is
        # propagated correctly.
        class Addition_Module(nn.Module):
            def __init__(self) -> None:
                super().__init__()

            def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
                return x1 + x2

        class SkipConnection(nn.Module):
            def __init__(self) -> None:
                super().__init__()
                self.linear = nn.Linear(2, 2, bias=False)
                self.linear.weight.data.fill_(5)
                self.add = Addition_Module()

            def forward(self, input: Tensor) -> Module:
                x = self.add(self.linear(input), input)
                return x

        model = SkipConnection()
        input = torch.Tensor([[2, 3]])
        model.add.rule = EpsilonRule()  # type: ignore
        lrp = LRP(model)
        relevance = lrp.attribute(input, target=1)
        assertTensorAlmostEqual(self, relevance, torch.Tensor([[10, 18]]))
Пример #8
0
 def test_lrp_simple_attributions_all_layers_delta(self):
     model, inputs = _get_simple_model(inplace=False)
     model.eval()
     model.linear.rule = EpsilonRule()
     model.linear2.rule = EpsilonRule()
     layers = [model.linear, model.linear2]
     lrp = LayerLRP(model, layers)
     inputs = torch.cat((inputs, 2 * inputs))
     relevance, delta = lrp.attribute(inputs,
                                      attribute_to_layer_input=True,
                                      return_convergence_delta=True)
     self.assertEqual(len(relevance), len(delta))
     assertTensorAlmostEqual(
         self,
         relevance[0],
         torch.tensor([[18.0, 36.0, 54.0], [36.0, 72.0, 108.0]]),
     )
Пример #9
0
 def test_lrp_Identity(self) -> None:
     model, inputs = _get_simple_model()
     with torch.no_grad():
         model.linear.weight.data[0][0] = -2  # type: ignore
     model.eval()
     model.linear.rule = IdentityRule()  # type: ignore
     model.linear2.rule = EpsilonRule()  # type: ignore
     lrp = LRP(model)
     relevance = lrp.attribute(inputs)
     assertTensorAlmostEqual(self, relevance,
                             torch.tensor([24.0, 36.0, 36.0]))