def _assert_delta(test: BaseTest, delta: Tensor) -> None: delta_condition = all(abs(delta.numpy().flatten()) < 0.0006) test.assertTrue( delta_condition, "Sum of SHAP values {} does" " not match the difference of endpoints.".format(delta), )
def _scale_input(input: Tensor, baseline: Union[Tensor, int, float], rand_coefficient: Tensor) -> Tensor: # batch size bsz = input.shape[0] inp_shape_wo_bsz = input.shape[1:] inp_shape = (bsz, ) + tuple([1] * len(inp_shape_wo_bsz)) # expand and reshape the indices rand_coefficient = rand_coefficient.view(inp_shape) input_baseline_scaled = ( rand_coefficient * input + (1.0 - rand_coefficient) * baseline).requires_grad_() return input_baseline_scaled
def _assert_attribution_delta( test: BaseTest, inputs: Union[Tensor, Tuple[Tensor, ...]], attributions: Union[Tensor, Tuple[Tensor, ...]], n_samples: int, delta: Tensor, is_layer: bool = False, ) -> None: if not is_layer: for input, attribution in zip(inputs, attributions): test.assertEqual(attribution.shape, input.shape) if isinstance(inputs, tuple): bsz = inputs[0].shape[0] else: bsz = inputs.shape[0] test.assertEqual([bsz * n_samples], list(delta.shape)) delta = torch.mean(delta.reshape(bsz, -1), dim=1) _assert_delta(test, delta)