Exemple #1
0
    def _saliency_classification_assert(self,
                                        nt_type: str = "vanilla") -> None:
        num_in = 5
        input = torch.tensor([[0.0, 1.0, 2.0, 3.0, 4.0]], requires_grad=True)
        target = torch.tensor(5)
        # 10-class classification model
        model = SoftmaxModel(num_in, 20, 10)
        saliency = Saliency(model)

        if nt_type == "vanilla":
            attributions = saliency.attribute(input, target)

            output = model(input)[:, target]
            output.backward()
            expected = torch.abs(cast(Tensor, input.grad))
            self.assertEqual(
                expected.detach().numpy().tolist(),
                attributions.detach().numpy().tolist(),
            )
        else:
            nt = NoiseTunnel(saliency)
            attributions = nt.attribute(input,
                                        nt_type=nt_type,
                                        nt_samples=10,
                                        stdevs=0.0002,
                                        target=target)
        self.assertEqual(input.shape, attributions.shape)
Exemple #2
0
    def _input_x_gradient_classification_assert(self,
                                                nt_type: str = "vanilla"
                                                ) -> None:
        num_in = 5
        input = torch.tensor([[0.0, 1.0, 2.0, 3.0, 4.0]], requires_grad=True)
        target = torch.tensor(5)

        # 10-class classification model
        model = SoftmaxModel(num_in, 20, 10)
        input_x_grad = InputXGradient(model.forward)
        if nt_type == "vanilla":
            attributions = input_x_grad.attribute(input, target)
            output = model(input)[:, target]
            output.backward()
            expercted = input.grad * input
            self.assertEqual(
                expercted.detach().numpy().tolist(),
                attributions.detach().numpy().tolist(),
            )
        else:
            nt = NoiseTunnel(input_x_grad)
            attributions = nt.attribute(input,
                                        nt_type=nt_type,
                                        n_samples=10,
                                        stdevs=1.0,
                                        target=target)

        self.assertEqual(attributions.shape, input.shape)
Exemple #3
0
    def test_classification(self) -> None:
        def custom_baseline_fn(inputs: Tensor) -> Tensor:
            num_in = inputs.shape[1]  # type: ignore
            return torch.arange(0.0, num_in * 5.0).reshape(5, num_in)

        num_in = 40
        n_samples = 100

        # 10-class classification model
        model = SoftmaxModel(num_in, 20, 10)
        model.eval()

        inputs = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
        baselines = custom_baseline_fn

        self._assert_attributions(model, model.relu1, inputs, baselines, 1, n_samples)
Exemple #4
0
 def _assert_softmax_classification(
         self,
         type: str = "vanilla",
         approximation_method: str = "gausslegendre") -> None:
     num_in = 40
     input = torch.arange(0.0, num_in * 1.0,
                          requires_grad=True).unsqueeze(0)
     target = torch.tensor(5)
     # 10-class classification model
     model = SoftmaxModel(num_in, 20, 10)
     self._validate_completness(model, input, target, type,
                                approximation_method)
Exemple #5
0
 def _assert_softmax_classification_batch(
         self,
         type: str = "vanilla",
         approximation_method: str = "gausslegendre") -> None:
     num_in = 40
     input = torch.arange(0.0, num_in * 3.0,
                          requires_grad=True).reshape(3, num_in)
     target = torch.tensor([5, 5, 2])
     baseline = torch.zeros(1, num_in)
     # 10-class classification model
     model = SoftmaxModel(num_in, 20, 10)
     self._validate_completness(model, input, target, type,
                                approximation_method, baseline)
Exemple #6
0
    def test_classification(self) -> None:
        def custom_baseline_fn(inputs):
            num_in = inputs.shape[1]
            return torch.arange(0.0, num_in * 4.0).reshape(4, num_in)

        num_in = 40
        n_samples = 10

        # 10-class classification model
        model = SoftmaxModel(num_in, 20, 10)
        model.eval()

        inputs = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
        baselines = custom_baseline_fn
        expected = torch.zeros(2, 20)

        self._assert_attributions(model,
                                  model.relu1,
                                  inputs,
                                  baselines,
                                  1,
                                  expected,
                                  n_samples=n_samples)
Exemple #7
0
    def test_classification_baselines_as_function(self) -> None:
        num_in = 40
        inputs = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)

        def generate_baselines() -> Tensor:
            return torch.arange(0.0, num_in * 4.0).reshape(4, num_in)

        def generate_baselines_with_inputs(inputs: Tensor) -> Tensor:
            inp_shape = cast(Tuple[int, ...], inputs.shape)
            return torch.arange(0.0,
                                inp_shape[1] * 2.0).reshape(2, inp_shape[1])

        def generate_baselines_returns_array() -> ndarray:
            return np.arange(0.0, num_in * 4.0).reshape(4, num_in)

        # 10-class classification model
        model = SoftmaxModel(num_in, 20, 10)
        model.eval()
        model.zero_grad()

        gradient_shap = GradientShap(model)
        n_samples = 10
        attributions, delta = gradient_shap.attribute(
            inputs,
            baselines=generate_baselines,
            target=torch.tensor(1),
            n_samples=n_samples,
            stdevs=0.009,
            return_convergence_delta=True,
        )
        _assert_attribution_delta(self, (inputs, ), (attributions, ),
                                  n_samples, delta)

        attributions, delta = gradient_shap.attribute(
            inputs,
            baselines=generate_baselines_with_inputs,
            target=torch.tensor(1),
            n_samples=n_samples,
            stdevs=0.00001,
            return_convergence_delta=True,
        )
        _assert_attribution_delta(self, (inputs, ), (attributions, ),
                                  n_samples, delta)

        with self.assertRaises(AssertionError):
            attributions, delta = gradient_shap.attribute(
                inputs,
                baselines=generate_baselines_returns_array,
                target=torch.tensor(1),
                n_samples=n_samples,
                stdevs=0.00001,
                return_convergence_delta=True,
            )
Exemple #8
0
    def test_classification(self) -> None:
        num_in = 40
        inputs = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
        baselines = torch.arange(0.0, num_in * 4.0).reshape(4, num_in)
        target = torch.tensor(1)
        # 10-class classification model
        model = SoftmaxModel(num_in, 20, 10)
        model.eval()
        model.zero_grad()

        gradient_shap = GradientShap(model)
        n_samples = 10
        attributions, delta = gradient_shap.attribute(
            inputs,
            baselines=baselines,
            target=target,
            n_samples=n_samples,
            stdevs=0.009,
            return_convergence_delta=True,
        )
        _assert_attribution_delta(self, (inputs, ), (attributions, ),
                                  n_samples, delta)

        # try to call `compute_convergence_delta` externally
        with self.assertRaises(AssertionError):
            gradient_shap.compute_convergence_delta(attributions,
                                                    inputs,
                                                    baselines,
                                                    target=target)
        # now, let's expand target and choose random baselines from `baselines` tensor
        rand_indices = np.random.choice(baselines.shape[0],
                                        inputs.shape[0]).tolist()
        chosen_baselines = baselines[rand_indices]

        target_extendes = torch.tensor([1, 1])
        external_delta = gradient_shap.compute_convergence_delta(
            attributions, chosen_baselines, inputs, target=target_extendes)
        _assert_delta(self, external_delta)

        # Compare with integrated gradients
        ig = IntegratedGradients(model)
        baselines = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
        attributions_ig = ig.attribute(inputs,
                                       baselines=baselines,
                                       target=target)
        self._assert_shap_ig_comparision((attributions, ), (attributions_ig, ))