def _conductance_test_assert(
        self,
        model: Module,
        target_layer: Module,
        test_input: Union[Tensor, Tuple[Tensor, ...]],
        expected_conductance: Union[List[List[float]], Tuple[List[List[float]], ...]],
        baselines: BaselineType = None,
        additional_args: Any = None,
    ) -> None:
        cond = LayerConductance(model, target_layer)
        for internal_batch_size in (None, 1, 20):
            attributions, delta = cond.attribute(
                test_input,
                baselines=baselines,
                target=0,
                n_steps=500,
                method="gausslegendre",
                additional_forward_args=additional_args,
                internal_batch_size=internal_batch_size,
                return_convergence_delta=True,
            )
            delta_condition = all(abs(delta.numpy().flatten()) < 0.01)
            self.assertTrue(
                delta_condition,
                "Sum of attributions does {}"
                " not match the difference of endpoints.".format(delta),
            )

            assertTensorTuplesAlmostEqual(
                self, attributions, expected_conductance, delta=0.1,
            )
Beispiel #2
0
 def _conductance_input_sum_test_assert(self,
                                        model,
                                        target_layer,
                                        test_input,
                                        test_baseline=None):
     layer_cond = LayerConductance(model, target_layer)
     attributions = layer_cond.attribute(
         test_input,
         baselines=test_baseline,
         target=0,
         n_steps=500,
         method="gausslegendre",
     )
     neuron_cond = NeuronConductance(model, target_layer)
     for i in range(attributions.shape[1]):
         for j in range(attributions.shape[2]):
             for k in range(attributions.shape[3]):
                 neuron_vals = neuron_cond.attribute(
                     test_input,
                     (i, j, k),
                     baselines=test_baseline,
                     target=0,
                     n_steps=500,
                 )
                 for n in range(attributions.shape[0]):
                     self.assertAlmostEqual(
                         torch.sum(neuron_vals[n]),
                         attributions[n, i, j, k],
                         delta=0.005,
                     )
 def _assert_compare_with_layer_conductance(
         self,
         model: Module,
         input: Tensor,
         attribute_to_layer_input: bool = False):
     lc = LayerConductance(model, cast(Module, model.linear2))
     # For large number of steps layer conductance and layer integrated gradients
     # become very close
     attribution, delta = lc.attribute(
         input,
         target=0,
         n_steps=1500,
         return_convergence_delta=True,
         attribute_to_layer_input=attribute_to_layer_input,
     )
     lig = LayerIntegratedGradients(model, cast(Module, model.linear2))
     attributions2, delta2 = lig.attribute(
         input,
         target=0,
         n_steps=1500,
         return_convergence_delta=True,
         attribute_to_layer_input=attribute_to_layer_input,
     )
     assertArraysAlmostEqual(attribution, attributions2, 0.01)
     assertArraysAlmostEqual(delta, delta2, 0.05)
Beispiel #4
0
    def _conductance_test_assert(
        self,
        model,
        target_layer,
        test_input,
        expected_conductance,
        baselines=None,
        additional_args=None,
    ):
        cond = LayerConductance(model, target_layer)
        for internal_batch_size in (None, 1, 20):
            attributions, delta = cond.attribute(
                test_input,
                baselines=baselines,
                target=0,
                n_steps=500,
                method="gausslegendre",
                additional_forward_args=additional_args,
                internal_batch_size=internal_batch_size,
                return_convergence_delta=True,
            )
            delta_condition = all(abs(delta.numpy().flatten()) < 0.01)
            self.assertTrue(
                delta_condition,
                "Sum of attributions does {}"
                " not match the difference of endpoints.".format(delta),
            )

            for i in range(len(expected_conductance)):
                assertArraysAlmostEqual(
                    attributions[i:i + 1].squeeze(0).tolist(),
                    expected_conductance[i],
                    delta=0.1,
                )
Beispiel #5
0
 def _conductance_input_sum_test_assert(
     self,
     model: Module,
     target_layer: Module,
     test_input: TensorOrTupleOfTensors,
     test_baseline: Optional[Union[Tensor, int, float,
                                   Tuple[Union[Tensor, int, float],
                                         ...]]] = None,
 ):
     layer_cond = LayerConductance(model, target_layer)
     attributions = cast(
         Tensor,
         layer_cond.attribute(
             test_input,
             baselines=test_baseline,
             target=0,
             n_steps=500,
             method="gausslegendre",
         ),
     )
     neuron_cond = NeuronConductance(model, target_layer)
     for i in range(attributions.shape[1]):
         for j in range(attributions.shape[2]):
             for k in range(attributions.shape[3]):
                 neuron_vals = neuron_cond.attribute(
                     test_input,
                     (i, j, k),
                     baselines=test_baseline,
                     target=0,
                     n_steps=500,
                 )
                 for n in range(attributions.shape[0]):
                     self.assertAlmostEqual(
                         torch.sum(neuron_vals[n]).item(),
                         attributions[n, i, j, k].item(),
                         delta=0.005,
                     )
Beispiel #6
0
    def test_matching_layer_tuple_selector_fn(self) -> None:
        net = BasicModel_MultiLayer(multi_input_module=True)
        inp = torch.tensor([[0.0, 6.0, 0.0]])

        lc = LayerConductance(net, net.multi_relu)
        layer_attr = lc.attribute(inp,
                                  target=0,
                                  n_steps=500,
                                  method="gausslegendre")
        nc = NeuronConductance(net, net.multi_relu)
        for i in range(len(layer_attr)):
            for j in range(layer_attr[i].shape[1]):
                neuron_attr = nc.attribute(
                    inp,
                    lambda x: x[i][:, j],
                    target=0,
                    n_steps=500,
                    method="gausslegendre",
                )
                self.assertAlmostEqual(
                    neuron_attr.sum().item(),
                    layer_attr[i][0][j].item(),
                    delta=0.005,
                )
    def _conductance_reference_test_assert(
        self,
        model: Module,
        target_layer: Module,
        test_input: Tensor,
        test_baseline: Optional[Tensor] = None,
    ) -> None:
        layer_output = None

        def forward_hook(module, inp, out):
            nonlocal layer_output
            layer_output = out

        hook = target_layer.register_forward_hook(forward_hook)
        final_output = model(test_input)
        layer_output = cast(Tensor, layer_output)
        hook.remove()
        target_index = torch.argmax(torch.sum(final_output, 0))
        cond = LayerConductance(model, target_layer)
        cond_ref = ConductanceReference(model, target_layer)
        attributions, delta = cast(
            Tuple[Tensor, Tensor],
            cond.attribute(
                test_input,
                baselines=test_baseline,
                target=target_index,
                n_steps=300,
                method="gausslegendre",
                return_convergence_delta=True,
            ),
        )
        delta_condition = all(abs(delta.numpy().flatten()) < 0.005)
        self.assertTrue(
            delta_condition,
            "Sum of attribution values does {} "
            " not match the difference of endpoints.".format(delta),
        )

        attributions_reference = cond_ref.attribute(
            test_input,
            baselines=test_baseline,
            target=target_index,
            n_steps=300,
            method="gausslegendre",
        )

        # Check that layer output size matches conductance size.
        self.assertEqual(layer_output.shape, attributions.shape)
        # Check that reference implementation output matches standard implementation.
        assertArraysAlmostEqual(
            attributions.reshape(-1).tolist(),
            attributions_reference.reshape(-1).tolist(),
            delta=0.07,
        )

        # Test if batching is working correctly for inputs with multiple examples
        if test_input.shape[0] > 1:
            for i in range(test_input.shape[0]):
                single_attributions = cast(
                    Tensor,
                    cond.attribute(
                        test_input[i:i + 1],
                        baselines=test_baseline[i:i + 1]
                        if test_baseline is not None else None,
                        target=target_index,
                        n_steps=300,
                        method="gausslegendre",
                    ),
                )
                # Verify that attributions when passing example independently
                # matches corresponding attribution of batched input.
                assertArraysAlmostEqual(
                    attributions[i:i + 1].reshape(-1).tolist(),
                    single_attributions.reshape(-1).tolist(),
                    delta=0.01,
                )