def test_lrp_simple_attributions_GammaRule(self): model, inputs = _get_simple_model() with torch.no_grad(): model.linear.weight.data[0][0] = -2 model.eval() model.linear.rule = GammaRule(gamma=1) model.linear2.rule = GammaRule() lrp = LayerLRP(model, model.linear) relevance = lrp.attribute(inputs) assertTensorAlmostEqual(self, relevance[0], torch.tensor([24.0, 36.0, 36.0]))
def test_lrp_simple_attributions_GammaRule(self) -> None: model, inputs = _get_simple_model() with torch.no_grad(): model.linear.weight.data[0][0] = -2 # type: ignore model.eval() model.linear.rule = GammaRule(gamma=1) # type: ignore model.linear2.rule = GammaRule() # type: ignore lrp = LRP(model) relevance = lrp.attribute(inputs) assertTensorAlmostEqual( self, relevance.data, torch.tensor([[28 / 3, 104 / 3, 52]]) # type: ignore )
def test_lrp_simple_repeat_attributions(self) -> None: model, inputs = _get_simple_model() model.eval() model.linear.rule = GammaRule() # type: ignore model.linear2.rule = Alpha1_Beta0_Rule() # type: ignore output = model(inputs) lrp = LRP(model) _ = lrp.attribute(inputs) output_after = model(inputs) assertTensorAlmostEqual(self, output, output_after)