def test_lrp_simple_attributions_AlphaBeta(self) -> None: model, inputs = _get_simple_model() with torch.no_grad(): model.linear.weight.data[0][0] = -2 # type: ignore model.eval() model.linear.rule = Alpha1_Beta0_Rule() # type: ignore model.linear2.rule = Alpha1_Beta0_Rule() # type: ignore lrp = LRP(model) relevance = lrp.attribute(inputs) assertTensorAlmostEqual(self, relevance, torch.tensor([[12, 33.6, 50.4]]))
def test_lrp_simple_attributions_AlphaBeta(self): model, inputs = _get_simple_model() with torch.no_grad(): model.linear.weight.data[0][0] = -2 model.eval() model.linear.rule = Alpha1_Beta0_Rule() model.linear2.rule = Alpha1_Beta0_Rule() lrp = LayerLRP(model, model.linear) relevance = lrp.attribute(inputs) assertTensorAlmostEqual(self, relevance[0], torch.tensor([24.0, 36.0, 36.0]))
def test_lrp_simple_repeat_attributions(self) -> None: model, inputs = _get_simple_model() model.eval() model.linear.rule = GammaRule() # type: ignore model.linear2.rule = Alpha1_Beta0_Rule() # type: ignore output = model(inputs) lrp = LRP(model) _ = lrp.attribute(inputs) output_after = model(inputs) assertTensorAlmostEqual(self, output, output_after)