def _gradient_matching_test_assert(self, model: Module, output_layer: Module, test_input: Tensor) -> None: out = _forward_layer_eval(model, test_input, output_layer) # Select first element of tuple out = out[0] gradient_attrib = NeuronGradient(model, output_layer) self.assertFalse(gradient_attrib.multiplies_by_inputs) for i in range(cast(Tuple[int, ...], out.shape)[1]): neuron: Tuple[int, ...] = (i, ) while len(neuron) < len(out.shape) - 1: neuron = neuron + (0, ) input_attrib = Saliency(lambda x: _forward_layer_eval( model, x, output_layer, grad_enabled=True)[0][ (slice(None), *neuron)]) sal_vals = input_attrib.attribute(test_input, abs=False) grad_vals = gradient_attrib.attribute(test_input, neuron) # Verify matching sizes self.assertEqual(grad_vals.shape, sal_vals.shape) self.assertEqual(grad_vals.shape, test_input.shape) assertArraysAlmostEqual( sal_vals.reshape(-1).tolist(), grad_vals.reshape(-1).tolist(), delta=0.001, )
def _saliency_base_assert( self, model: Module, inputs: TensorOrTupleOfTensorsGeneric, expected: TensorOrTupleOfTensorsGeneric, additional_forward_args: Any = None, nt_type: str = "vanilla", ) -> None: saliency = Saliency(model) self.assertFalse(saliency.uses_input_marginal_effects) if nt_type == "vanilla": attributions = saliency.attribute( inputs, additional_forward_args=additional_forward_args) else: nt = NoiseTunnel(saliency) attributions = nt.attribute( inputs, nt_type=nt_type, n_samples=10, stdevs=0.0000002, additional_forward_args=additional_forward_args, ) for input, attribution, expected_attr in zip(inputs, attributions, expected): if nt_type == "vanilla": self._assert_attribution(attribution, expected_attr) self.assertEqual(input.shape, attribution.shape)
def _saliency_classification_assert(self, nt_type: str = "vanilla") -> None: num_in = 5 input = torch.tensor([[0.0, 1.0, 2.0, 3.0, 4.0]], requires_grad=True) target = torch.tensor(5) # 10-class classification model model = SoftmaxModel(num_in, 20, 10) saliency = Saliency(model) if nt_type == "vanilla": attributions = saliency.attribute(input, target) output = model(input)[:, target] output.backward() expected = torch.abs(cast(Tensor, input.grad)) self.assertEqual( expected.detach().numpy().tolist(), attributions.detach().numpy().tolist(), ) else: nt = NoiseTunnel(saliency) attributions = nt.attribute(input, nt_type=nt_type, n_samples=10, stdevs=0.0002, target=target) self.assertEqual(input.shape, attributions.shape)
def _saliency_base_assert( self, model, inputs, expected, additional_forward_args=None, nt_type="vanilla" ): saliency = Saliency(model) if nt_type == "vanilla": attributions = saliency.attribute( inputs, additional_forward_args=additional_forward_args ) else: nt = NoiseTunnel(saliency) attributions = nt.attribute( inputs, nt_type=nt_type, n_samples=10, stdevs=0.0000002, additional_forward_args=additional_forward_args, ) if isinstance(attributions, tuple): for input, attribution, expected_attr in zip( inputs, attributions, expected ): if nt_type == "vanilla": self._assert_attribution(attribution, expected_attr) self.assertEqual(input.shape, attribution.shape) else: if nt_type == "vanilla": self._assert_attribution(attributions, expected) self.assertEqual(inputs.shape, attributions.shape)
def _saliency_base_assert( self, model: Module, inputs: TensorOrTupleOfTensorsGeneric, expected: TensorOrTupleOfTensorsGeneric, additional_forward_args: Any = None, nt_type: str = "vanilla", n_samples_batch_size=None, ) -> Union[Tensor, Tuple[Tensor, ...]]: saliency = Saliency(model) self.assertFalse(saliency.multiplies_by_inputs) if nt_type == "vanilla": attributions = saliency.attribute( inputs, additional_forward_args=additional_forward_args) else: nt = NoiseTunnel(saliency) attributions = nt.attribute( inputs, nt_type=nt_type, nt_samples=10, nt_samples_batch_size=n_samples_batch_size, stdevs=0.0000002, additional_forward_args=additional_forward_args, ) for input, attribution, expected_attr in zip(inputs, attributions, expected): if nt_type == "vanilla": self._assert_attribution(attribution, expected_attr) self.assertEqual(input.shape, attribution.shape) return attributions
def _gradient_matching_test_assert(self, model, output_layer, test_input): out = _forward_layer_eval(model, test_input, output_layer) gradient_attrib = NeuronGradient(model, output_layer) for i in range(out.shape[1]): neuron = (i, ) while len(neuron) < len(out.shape) - 1: neuron = neuron + (0, ) input_attrib = Saliency(lambda x: _forward_layer_eval( model, x, output_layer)[(slice(None), *neuron)]) sal_vals = input_attrib.attribute(test_input, abs=False) grad_vals = gradient_attrib.attribute(test_input, neuron) # Verify matching sizes self.assertEqual(grad_vals.shape, sal_vals.shape) self.assertEqual(grad_vals.shape, test_input.shape) assertArraysAlmostEqual( sal_vals.reshape(-1).tolist(), grad_vals.reshape(-1).tolist(), delta=0.001, )