Beispiel #1
0
 def test_lrp_simple_attributions(self):
     model, inputs = _get_simple_model(inplace=False)
     model.eval()
     model.linear.rule = EpsilonRule()
     model.linear2.rule = EpsilonRule()
     lrp_upper = LayerLRP(model, model.linear2)
     relevance_upper, delta = lrp_upper.attribute(
         inputs,
         attribute_to_layer_input=True,
         return_convergence_delta=True)
     lrp_lower = LayerLRP(model, model.linear)
     relevance_lower = lrp_lower.attribute(inputs)
     assertTensorAlmostEqual(self, relevance_lower[0], relevance_upper[0])
     self.assertEqual(delta.item(), 0)
Beispiel #2
0
 def test_lrp_simple_inplaceReLU(self):
     model_default, inputs = _get_simple_model()
     model_inplace, _ = _get_simple_model(inplace=True)
     for model in [model_default, model_inplace]:
         model.eval()
         model.linear.rule = EpsilonRule()
         model.linear2.rule = EpsilonRule()
     lrp_default = LayerLRP(model_default, model_default.linear2)
     lrp_inplace = LayerLRP(model_inplace, model_inplace.linear2)
     relevance_default = lrp_default.attribute(
         inputs, attribute_to_layer_input=True)
     relevance_inplace = lrp_inplace.attribute(
         inputs, attribute_to_layer_input=True)
     assertTensorAlmostEqual(self, relevance_default[0],
                             relevance_inplace[0])
Beispiel #3
0
 def test_lrp_simple_repeat_attributions(self):
     model, inputs = _get_simple_model()
     model.eval()
     model.linear.rule = GammaRule()
     model.linear2.rule = Alpha1_Beta0_Rule()
     output = model(inputs)
     lrp = LayerLRP(model, model.linear)
     _ = lrp.attribute(inputs)
     output_after = model(inputs)
     assertTensorAlmostEqual(self, output, output_after)
Beispiel #4
0
 def test_lrp_simple_attributions_all_layers(self):
     model, inputs = _get_simple_model(inplace=False)
     model.eval()
     model.linear.rule = EpsilonRule()
     model.linear2.rule = EpsilonRule()
     layers = [model.linear, model.linear2]
     lrp = LayerLRP(model, layers)
     relevance = lrp.attribute(inputs, attribute_to_layer_input=True)
     self.assertEqual(len(relevance), 2)
     assertTensorAlmostEqual(self, relevance[0][0],
                             torch.tensor([[[18.0, 36.0, 54.0]]]))
Beispiel #5
0
 def test_lrp_simple_attributions_AlphaBeta(self):
     model, inputs = _get_simple_model()
     with torch.no_grad():
         model.linear.weight.data[0][0] = -2
     model.eval()
     model.linear.rule = Alpha1_Beta0_Rule()
     model.linear2.rule = Alpha1_Beta0_Rule()
     lrp = LayerLRP(model, model.linear)
     relevance = lrp.attribute(inputs)
     assertTensorAlmostEqual(self, relevance[0],
                             torch.tensor([24.0, 36.0, 36.0]))
Beispiel #6
0
 def test_lrp_basic_attributions(self):
     model, inputs = _get_basic_config()
     logits = model(inputs)
     score, classIndex = torch.max(logits, 1)
     lrp = LayerLRP(model, model.conv1)
     relevance, delta = lrp.attribute(inputs,
                                      classIndex.item(),
                                      return_convergence_delta=True)
     assertTensorAlmostEqual(
         self, relevance[0],
         torch.Tensor([[[0, 4], [31, 40]], [[0, 0], [-6, -15]]]))
     assertTensorAlmostEqual(self, delta, torch.Tensor([0]))
Beispiel #7
0
 def test_lrp_simple_attributions_all_layers_delta(self):
     model, inputs = _get_simple_model(inplace=False)
     model.eval()
     model.linear.rule = EpsilonRule()
     model.linear2.rule = EpsilonRule()
     layers = [model.linear, model.linear2]
     lrp = LayerLRP(model, layers)
     inputs = torch.cat((inputs, 2 * inputs))
     relevance, delta = lrp.attribute(inputs,
                                      attribute_to_layer_input=True,
                                      return_convergence_delta=True)
     self.assertEqual(len(relevance), len(delta))
     assertTensorAlmostEqual(
         self,
         relevance[0],
         torch.tensor([[18.0, 36.0, 54.0], [36.0, 72.0, 108.0]]),
     )
Beispiel #8
0
    def test_lrp_simple_tanh(self):
        class Model(nn.Module):
            def __init__(self) -> None:
                super(Model, self).__init__()
                self.linear = nn.Linear(3, 3, bias=False)
                self.linear.weight.data.fill_(0.1)
                self.tanh = torch.nn.Tanh()
                self.linear2 = nn.Linear(3, 1, bias=False)
                self.linear2.weight.data.fill_(0.1)

            def forward(self, x):
                return self.linear2(self.tanh(self.linear(x)))

        model = Model()
        _, inputs = _get_simple_model()
        lrp = LayerLRP(model, model.linear)
        relevance = lrp.attribute(inputs)
        assertTensorAlmostEqual(
            self, relevance[0],
            torch.Tensor([[0.0537, 0.0537, 0.0537]
                          ]))  # Result if tanh is skipped for propagation
Beispiel #9
0
 def test_lrp_creator_activation(self):
     model, inputs = _get_basic_config()
     model.add_module("sigmoid", nn.Sigmoid())
     lrp = LayerLRP(model, model.conv1)
     self.assertRaises(TypeError, lrp.attribute, inputs)