def _layer_activation_test_assert( self, model: Module, target_layer: Module, test_input: Union[Tensor, Tuple[Tensor, ...]], expected_activation: Union[List[float], Tuple[List[float], ...]], additional_input: Any = None, ) -> None: layer_act = LayerGradientXActivation(model, target_layer) self.assertTrue(layer_act.multiplies_by_inputs) attributions = layer_act.attribute( test_input, target=0, additional_forward_args=additional_input) assertTensorTuplesAlmostEqual(self, attributions, expected_activation, delta=0.01) # test Layer Gradient without multiplying with activations layer_grads = LayerGradientXActivation(model, target_layer, multiply_by_inputs=False) layer_act = LayerActivation(model, target_layer) self.assertFalse(layer_grads.multiplies_by_inputs) grads = layer_grads.attribute(test_input, target=0, additional_forward_args=additional_input) acts = layer_act.attribute(test_input, additional_forward_args=additional_input) assertTensorTuplesAlmostEqual( self, attributions, tuple(act * grad for act, grad in zip(acts, grads)), delta=0.01, )
def test_gradient_activation_embedding(self) -> None: input1 = torch.tensor([2, 5, 0, 1]) input2 = torch.tensor([3, 0, 0, 2]) model = BasicEmbeddingModel() layer_act = LayerGradientXActivation(model, model.embedding1) self.assertEqual( list(layer_act.attribute(inputs=(input1, input2)).shape), [4, 100])
def test_gradient_activation_embedding_no_grad(self) -> None: input1 = torch.tensor([2, 5, 0, 1]) input2 = torch.tensor([3, 0, 0, 2]) model = BasicEmbeddingModel() for param in model.parameters(): param.requires_grad = False with torch.no_grad(): layer_act = LayerGradientXActivation(model, model.embedding1) self.assertEqual( list(layer_act.attribute(inputs=(input1, input2)).shape), [4, 100])
def _layer_activation_test_assert( self, model, target_layer, test_input, expected_activation, additional_input=None, ): layer_act = LayerGradientXActivation(model, target_layer) attributions = layer_act.attribute( test_input, target=0, additional_forward_args=additional_input) assertArraysAlmostEqual(attributions.squeeze(0).tolist(), expected_activation, delta=0.01)
def _layer_activation_test_assert( self, model, target_layer, test_input, expected_activation, additional_input=None, ): layer_act = LayerGradientXActivation(model, target_layer) attributions = layer_act.attribute( test_input, target=0, additional_forward_args=additional_input ) assertTensorTuplesAlmostEqual( self, attributions, expected_activation, delta=0.01 )
def _layer_activation_test_assert( self, model: Module, target_layer: Module, test_input: Union[Tensor, Tuple[Tensor, ...]], expected_activation: Union[List[float], Tuple[List[float], ...]], additional_input: Any = None, ) -> None: layer_act = LayerGradientXActivation(model, target_layer) attributions = layer_act.attribute( test_input, target=0, additional_forward_args=additional_input) assertTensorTuplesAlmostEqual(self, attributions, expected_activation, delta=0.01)