def test_multiple_tensors_compare_with_exp_wo_mult_by_inputs(self) -> None:
     net = BasicModel_MultiLayer(multi_input_module=True)
     inp = torch.tensor([[0.0, 100.0, 0.0]])
     base = torch.tensor([[0.0, 0.0, 0.0]])
     target_layer = net.multi_relu
     layer_ig = LayerIntegratedGradients(net, target_layer)
     layer_ig_wo_mult_by_inputs = LayerIntegratedGradients(
         net, target_layer, multiply_by_inputs=False
     )
     layer_act = LayerActivation(net, target_layer)
     attributions = layer_ig.attribute(inp, target=0)
     attributions_wo_mult_by_inputs = layer_ig_wo_mult_by_inputs.attribute(
         inp, target=0
     )
     inp_minus_baseline_activ = tuple(
         inp_act - base_act
         for inp_act, base_act in zip(
             layer_act.attribute(inp), layer_act.attribute(base)
         )
     )
     assertTensorTuplesAlmostEqual(
         self,
         tuple(
             attr_wo_mult * inp_min_base
             for attr_wo_mult, inp_min_base in zip(
                 attributions_wo_mult_by_inputs, inp_minus_baseline_activ
             )
         ),
         attributions,
     )
Exemple #2
0
    def _layer_activation_test_assert(
        self,
        model: Module,
        target_layer: Module,
        test_input: Union[Tensor, Tuple[Tensor, ...]],
        expected_activation: Union[List[float], Tuple[List[float], ...]],
        additional_input: Any = None,
    ) -> None:
        layer_act = LayerGradientXActivation(model, target_layer)
        self.assertTrue(layer_act.multiplies_by_inputs)
        attributions = layer_act.attribute(
            test_input, target=0, additional_forward_args=additional_input)
        assertTensorTuplesAlmostEqual(self,
                                      attributions,
                                      expected_activation,
                                      delta=0.01)
        # test Layer Gradient without multiplying with activations
        layer_grads = LayerGradientXActivation(model,
                                               target_layer,
                                               multiply_by_inputs=False)
        layer_act = LayerActivation(model, target_layer)
        self.assertFalse(layer_grads.multiplies_by_inputs)
        grads = layer_grads.attribute(test_input,
                                      target=0,
                                      additional_forward_args=additional_input)
        acts = layer_act.attribute(test_input,
                                   additional_forward_args=additional_input)

        assertTensorTuplesAlmostEqual(
            self,
            attributions,
            tuple(act * grad for act, grad in zip(acts, grads)),
            delta=0.01,
        )
Exemple #3
0
 def _layer_activation_test_assert(
     self,
     model: Module,
     target_layer: Module,
     test_input: Union[Tensor, Tuple[Tensor, ...]],
     expected_activation: Union[List[float], Tuple[List[float], ...]],
     additional_input: Any = None,
     attribute_to_layer_input: bool = False,
 ):
     layer_act = LayerActivation(model, target_layer)
     attributions = layer_act.attribute(
         test_input,
         additional_forward_args=additional_input,
         attribute_to_layer_input=attribute_to_layer_input,
     )
     assertTensorTuplesAlmostEqual(
         self, attributions, expected_activation, delta=0.01
     )
 def _layer_activation_test_assert(
     self,
     model,
     target_layer,
     test_input,
     expected_activation,
     additional_input=None,
     attribute_to_layer_input=False,
 ):
     layer_act = LayerActivation(model, target_layer)
     attributions = layer_act.attribute(
         test_input,
         additional_forward_args=additional_input,
         attribute_to_layer_input=attribute_to_layer_input,
     )
     assertArraysAlmostEqual(
         attributions.squeeze(0).tolist(), expected_activation, delta=0.01
     )
 def _multiple_layer_activation_test_assert(
     self,
     model: Module,
     target_layers: List[Module],
     test_input: Union[Tensor, Tuple[Tensor, ...]],
     expected_activation: Union[List, Tuple[List[float], ...]],
     additional_input: Any = None,
     attribute_to_layer_input: bool = False,
 ):
     layer_act = LayerActivation(model, target_layers)
     self.assertTrue(layer_act.multiplies_by_inputs)
     attributions = layer_act.attribute(
         test_input,
         additional_forward_args=additional_input,
         attribute_to_layer_input=attribute_to_layer_input,
     )
     for i in range(len(target_layers)):
         assertTensorTuplesAlmostEqual(self,
                                       attributions[i],
                                       expected_activation[i],
                                       delta=0.01)
 def test_sequential_in_place(self) -> None:
     model = nn.Sequential(nn.Conv2d(3, 4, 3), nn.ReLU(inplace=True))
     layer_act = LayerActivation(model, model[0])
     input = torch.randn(1, 3, 5, 5)
     assertTensorAlmostEqual(self, layer_act.attribute(input),
                             model[0](input))
 def test_sequential_module(self) -> None:
     model = Conv1dSeqModel()
     layer_act = LayerActivation(model, model.seq)
     input = torch.randn(2, 4, 1000)
     out = model(input)
     assertTensorAlmostEqual(self, layer_act.attribute(input), out)