示例#1
0
    def test_relu_deeplift_exact_match_wo_mutliplying_by_inputs(self) -> None:
        x1 = torch.tensor([1.0])
        x2 = torch.tensor([2.0])
        inputs = (x1, x2)

        model = ReLUDeepLiftModel()
        dl = DeepLift(model, multiply_by_inputs=False)
        attributions = dl.attribute(inputs)
        self.assertEqual(attributions[0][0], 2.0)
        self.assertEqual(attributions[1][0], 0.5)
示例#2
0
 def test_relu_deeplift_with_hypothetical_contrib_func(self) -> None:
     model = Conv1dDeepLiftModel()
     rand_seq_data = torch.abs(torch.randn(2, 4, 1000))
     rand_seq_ref = torch.abs(torch.randn(2, 4, 1000))
     dls = DeepLift(model)
     attr = dls.attribute(
         rand_seq_data,
         rand_seq_ref,
         custom_attribution_func=_hypothetical_contrib_func,
         target=(1, 0),
     )
     self.assertEqual(attr.shape, rand_seq_data.shape)
示例#3
0
    def test_lin_maxpool_lin_classification(self) -> None:
        inputs = torch.ones(2, 4)
        baselines = torch.tensor([[1, 2, 3, 9], [4, 8, 6, 7]]).float()

        model = LinearMaxPoolLinearModel()
        dl = DeepLift(model)
        attrs, delta = dl.attribute(
            inputs, baselines, target=0, return_convergence_delta=True
        )
        expected = [[0.0, 0.0, 0.0, -8.0], [0.0, -7.0, 0.0, 0.0]]
        expected_delta = [0.0, 0.0]
        assertArraysAlmostEqual(attrs.detach().numpy(), expected)
        assertArraysAlmostEqual(delta.detach().numpy(), expected_delta)
示例#4
0
    def test_lin_maxpool_lin_classification(self) -> None:
        inputs = torch.ones(2, 4)
        baselines = torch.tensor([[1, 2, 3, 9], [4, 8, 6, 7]]).float()

        model = LinearMaxPoolLinearModel()
        dl = DeepLift(model)
        attrs, delta = dl.attribute(inputs,
                                    baselines,
                                    target=0,
                                    return_convergence_delta=True)
        expected = torch.Tensor([[0.0, 0.0, 0.0, -8.0], [0.0, -7.0, 0.0, 0.0]])
        expected_delta = torch.Tensor([0.0, 0.0])
        assertTensorAlmostEqual(self, attrs, expected, 0.0001)
        assertTensorAlmostEqual(self, delta, expected_delta, 0.0001)
示例#5
0
    def test_relu_deeplift_exact_match(self) -> None:
        x1 = torch.tensor([1.0], requires_grad=True)
        x2 = torch.tensor([2.0], requires_grad=True)

        b1 = torch.tensor([0.0], requires_grad=True)
        b2 = torch.tensor([0.0], requires_grad=True)

        inputs = (x1, x2)
        baselines = (b1, b2)
        model = ReLUDeepLiftModel()
        dl = DeepLift(model)
        attributions, delta = dl.attribute(
            inputs, baselines, return_convergence_delta=True
        )
        self.assertEqual(attributions[0][0], 2.0)
        self.assertEqual(attributions[1][0], 1.0)
        self.assertEqual(delta[0], 0.0)
示例#6
0
    def test_sigmoid_classification(self):
        num_in = 20
        input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
        baseline = 0 * input
        target = torch.tensor(0)
        # TODO add test cases for multiple different layers
        model = SigmoidDeepLiftModel(num_in, 5, 1)
        dl = DeepLift(model)
        model.zero_grad()
        attributions, delta = dl.attribute(
            input, baseline, target=target, return_convergence_delta=True
        )
        self._assert_attributions(model, attributions, input, baseline, delta, target)

        # compare with integrated gradients
        ig = IntegratedGradients(model)
        attributions_ig = ig.attribute(input, baseline, target=target)
        assertAttributionComparision(self, (attributions,), (attributions_ig,))
示例#7
0
 def test_reusable_modules(self) -> None:
     model = BasicModelWithReusableModules()
     input = torch.rand(1, 3)
     dl = DeepLift(model)
     with self.assertRaises(RuntimeError):
         dl.attribute(input, target=0)