示例#1
0
    def _saliency_base_assert(
        self,
        model: Module,
        inputs: TensorOrTupleOfTensorsGeneric,
        expected: TensorOrTupleOfTensorsGeneric,
        additional_forward_args: Any = None,
        nt_type: str = "vanilla",
    ) -> None:
        saliency = Saliency(model)

        self.assertFalse(saliency.uses_input_marginal_effects)

        if nt_type == "vanilla":
            attributions = saliency.attribute(
                inputs, additional_forward_args=additional_forward_args)
        else:
            nt = NoiseTunnel(saliency)
            attributions = nt.attribute(
                inputs,
                nt_type=nt_type,
                n_samples=10,
                stdevs=0.0000002,
                additional_forward_args=additional_forward_args,
            )

        for input, attribution, expected_attr in zip(inputs, attributions,
                                                     expected):
            if nt_type == "vanilla":
                self._assert_attribution(attribution, expected_attr)
            self.assertEqual(input.shape, attribution.shape)
示例#2
0
 def _gradient_matching_test_assert(self, model: Module,
                                    output_layer: Module,
                                    test_input: Tensor) -> None:
     out = _forward_layer_eval(model, test_input, output_layer)
     # Select first element of tuple
     out = out[0]
     gradient_attrib = NeuronGradient(model, output_layer)
     self.assertFalse(gradient_attrib.multiplies_by_inputs)
     for i in range(cast(Tuple[int, ...], out.shape)[1]):
         neuron: Tuple[int, ...] = (i, )
         while len(neuron) < len(out.shape) - 1:
             neuron = neuron + (0, )
         input_attrib = Saliency(lambda x: _forward_layer_eval(
             model, x, output_layer, grad_enabled=True)[0][
                 (slice(None), *neuron)])
         sal_vals = input_attrib.attribute(test_input, abs=False)
         grad_vals = gradient_attrib.attribute(test_input, neuron)
         # Verify matching sizes
         self.assertEqual(grad_vals.shape, sal_vals.shape)
         self.assertEqual(grad_vals.shape, test_input.shape)
         assertArraysAlmostEqual(
             sal_vals.reshape(-1).tolist(),
             grad_vals.reshape(-1).tolist(),
             delta=0.001,
         )
示例#3
0
    def _saliency_classification_assert(self,
                                        nt_type: str = "vanilla") -> None:
        num_in = 5
        input = torch.tensor([[0.0, 1.0, 2.0, 3.0, 4.0]], requires_grad=True)
        target = torch.tensor(5)
        # 10-class classification model
        model = SoftmaxModel(num_in, 20, 10)
        saliency = Saliency(model)

        if nt_type == "vanilla":
            attributions = saliency.attribute(input, target)

            output = model(input)[:, target]
            output.backward()
            expected = torch.abs(cast(Tensor, input.grad))
            self.assertEqual(
                expected.detach().numpy().tolist(),
                attributions.detach().numpy().tolist(),
            )
        else:
            nt = NoiseTunnel(saliency)
            attributions = nt.attribute(input,
                                        nt_type=nt_type,
                                        n_samples=10,
                                        stdevs=0.0002,
                                        target=target)
        self.assertEqual(input.shape, attributions.shape)
示例#4
0
 def _saliency_base_assert(
     self, model, inputs, expected, additional_forward_args=None, nt_type="vanilla"
 ):
     saliency = Saliency(model)
     if nt_type == "vanilla":
         attributions = saliency.attribute(
             inputs, additional_forward_args=additional_forward_args
         )
     else:
         nt = NoiseTunnel(saliency)
         attributions = nt.attribute(
             inputs,
             nt_type=nt_type,
             n_samples=10,
             stdevs=0.0000002,
             additional_forward_args=additional_forward_args,
         )
     if isinstance(attributions, tuple):
         for input, attribution, expected_attr in zip(
             inputs, attributions, expected
         ):
             if nt_type == "vanilla":
                 self._assert_attribution(attribution, expected_attr)
             self.assertEqual(input.shape, attribution.shape)
     else:
         if nt_type == "vanilla":
             self._assert_attribution(attributions, expected)
         self.assertEqual(inputs.shape, attributions.shape)
示例#5
0
    def _saliency_base_assert(
        self,
        model: Module,
        inputs: TensorOrTupleOfTensorsGeneric,
        expected: TensorOrTupleOfTensorsGeneric,
        additional_forward_args: Any = None,
        nt_type: str = "vanilla",
        n_samples_batch_size=None,
    ) -> Union[Tensor, Tuple[Tensor, ...]]:
        saliency = Saliency(model)

        self.assertFalse(saliency.multiplies_by_inputs)

        if nt_type == "vanilla":
            attributions = saliency.attribute(
                inputs, additional_forward_args=additional_forward_args)
        else:
            nt = NoiseTunnel(saliency)
            attributions = nt.attribute(
                inputs,
                nt_type=nt_type,
                nt_samples=10,
                nt_samples_batch_size=n_samples_batch_size,
                stdevs=0.0000002,
                additional_forward_args=additional_forward_args,
            )

        for input, attribution, expected_attr in zip(inputs, attributions,
                                                     expected):
            if nt_type == "vanilla":
                self._assert_attribution(attribution, expected_attr)
            self.assertEqual(input.shape, attribution.shape)

        return attributions
示例#6
0
 def _gradient_matching_test_assert(self, model, output_layer, test_input):
     out = _forward_layer_eval(model, test_input, output_layer)
     gradient_attrib = NeuronGradient(model, output_layer)
     for i in range(out.shape[1]):
         neuron = (i, )
         while len(neuron) < len(out.shape) - 1:
             neuron = neuron + (0, )
         input_attrib = Saliency(lambda x: _forward_layer_eval(
             model, x, output_layer)[(slice(None), *neuron)])
         sal_vals = input_attrib.attribute(test_input, abs=False)
         grad_vals = gradient_attrib.attribute(test_input, neuron)
         # Verify matching sizes
         self.assertEqual(grad_vals.shape, sal_vals.shape)
         self.assertEqual(grad_vals.shape, test_input.shape)
         assertArraysAlmostEqual(
             sal_vals.reshape(-1).tolist(),
             grad_vals.reshape(-1).tolist(),
             delta=0.001,
         )
示例#7
0
        nearest_neighbors[i] = find_nearest_neighbor(neighbor, test_flat[img_idx])

    nearest_neighbors = nearest_neighbors.astype(int)

    ## Plot only nearest neighbors
    pairs, labels = create_pairs(top_10_idx, nearest_neighbors)
    create_folder("imgs/nn")
    for i in range(len(pairs)):
        show_pair(pairs[i], labels[i], perf.preds[top_10_idx[i]].item(), f"imgs/nn/{i}")

    ####
    # Explaining and plotting
    ####

    algos = {
        "saliency": Saliency(net),
        "gradcam": GuidedGradCam(net, net.layer3),
        "integrated_gradients": IntegratedGradients(net),
    }
    methods = ["heat_map", "masked_image", "alpha_scaling", "blended_heat_map"]
    signs = [
        "positive",
        "all",
        "absolute_value",
    ]  # all does not work with masked_image and alpha_scaling

    net.eval()
    for alg_name, alg in algos.items():
        create_folder("imgs")
        create_folder(f"imgs/{alg_name}")
        for sign in signs: