def _assert_attributions( self, model: Module, layer: Module, inputs: Tensor, baselines: Union[Tensor, Callable[..., Tensor]], neuron_ind: Union[int, Tuple[Union[int, slice], ...]], n_samples: int = 5, ) -> None: ngs = NeuronGradientShap(model, layer) nig = NeuronIntegratedGradients(model, layer) attrs_gs = ngs.attribute( inputs, neuron_ind, baselines=baselines, n_samples=n_samples, stdevs=0.09 ) if callable(baselines): baselines = baselines(inputs) attrs_ig = [] for baseline in torch.unbind(baselines): attrs_ig.append( nig.attribute(inputs, neuron_ind, baselines=baseline.unsqueeze(0)) ) combined_attrs_ig = torch.stack(attrs_ig, dim=0).mean(dim=0) self.assertTrue(ngs.multiplies_by_inputs) assertTensorAlmostEqual(self, attrs_gs, combined_attrs_ig, 0.5)
def _ig_input_test_assert( self, model: Module, target_layer: Module, test_input: TensorOrTupleOfTensorsGeneric, test_neuron: Union[int, Tuple[Union[int, slice], ...]], expected_input_ig: Union[List[float], Tuple[List[List[float]], ...]], additional_input: Any = None, multiply_by_inputs: bool = True, ) -> None: for internal_batch_size in [None, 5, 20]: grad = NeuronIntegratedGradients( model, target_layer, multiply_by_inputs=multiply_by_inputs) self.assertEquals(grad.multiplies_by_inputs, multiply_by_inputs) attributions = grad.attribute( test_input, test_neuron, n_steps=200, method="gausslegendre", additional_forward_args=additional_input, internal_batch_size=internal_batch_size, ) assertTensorTuplesAlmostEqual(self, attributions, expected_input_ig, delta=0.1)
def _ig_input_test_assert( self, model, target_layer, test_input, test_neuron, expected_input_ig, additional_input=None, ): for internal_batch_size in [None, 1, 20]: grad = NeuronIntegratedGradients(model, target_layer) attributions = grad.attribute( test_input, test_neuron, n_steps=500, method="gausslegendre", additional_forward_args=additional_input, internal_batch_size=internal_batch_size, ) if isinstance(expected_input_ig, tuple): for i in range(len(expected_input_ig)): for j in range(attributions[i].shape[0]): assertArraysAlmostEqual( attributions[i][j].squeeze(0).tolist(), expected_input_ig[i][j], delta=0.1, ) else: assertArraysAlmostEqual(attributions.squeeze(0).tolist(), expected_input_ig, delta=0.1)
def _assert_attributions(self, model, layer, inputs, baselines, neuron_ind, n_samples=5): ngs = NeuronGradientShap(model, layer) nig = NeuronIntegratedGradients(model, layer) attrs_gs = ngs.attribute(inputs, neuron_ind, baselines=baselines, n_samples=n_samples, stdevs=0.09) if callable(baselines): baselines = baselines(inputs) attrs_ig = [] for baseline in baselines: attrs_ig.append( nig.attribute(inputs, neuron_ind, baselines=baseline.unsqueeze(0))) attrs_ig = torch.stack(attrs_ig, axis=0).mean(axis=0) assertTensorAlmostEqual(self, attrs_gs, attrs_ig, 0.5)
def _ig_matching_test_assert( self, model: Module, output_layer: Module, test_input: Tensor, baseline: Union[None, Tensor] = None, ) -> None: out = model(test_input) input_attrib = IntegratedGradients(model) ig_attrib = NeuronIntegratedGradients(model, output_layer) for i in range(out.shape[1]): ig_vals = input_attrib.attribute(test_input, target=i, baselines=baseline) neuron_ig_vals = ig_attrib.attribute(test_input, (i,), baselines=baseline) assertArraysAlmostEqual( ig_vals.reshape(-1).tolist(), neuron_ig_vals.reshape(-1).tolist(), delta=0.001, ) self.assertEqual(neuron_ig_vals.shape, test_input.shape)