def _compute_attribution_batch_helper_evaluate( self, model: Module, inputs: TensorOrTupleOfTensorsGeneric, baselines: Union[None, Tensor, Tuple[Tensor, ...]] = None, target: Union[None, int] = None, additional_forward_args: Any = None, approximation_method: str = "gausslegendre", ) -> None: ig = IntegratedGradients(model) if not isinstance(inputs, tuple): inputs = (inputs, ) # type: ignore inputs: Tuple[Tensor, ...] if baselines is not None and not isinstance(baselines, tuple): baselines = (baselines, ) if baselines is None: baselines = _tensorize_baseline(inputs, _zeros(inputs)) for internal_batch_size in [None, 10, 20]: attributions, delta = ig.attribute( inputs, baselines, additional_forward_args=additional_forward_args, method=approximation_method, n_steps=100, target=target, internal_batch_size=internal_batch_size, return_convergence_delta=True, ) total_delta = 0.0 for i in range(inputs[0].shape[0]): attributions_indiv, delta_indiv = ig.attribute( tuple(input[i:i + 1] for input in inputs), tuple(baseline[i:i + 1] for baseline in baselines), additional_forward_args=additional_forward_args, method=approximation_method, n_steps=100, target=target, internal_batch_size=internal_batch_size, return_convergence_delta=True, ) total_delta += abs(delta_indiv).sum().item() for j in range(len(attributions)): assertTensorAlmostEqual( self, attributions[j][i:i + 1].squeeze(0), attributions_indiv[j].squeeze(0), delta=0.05, mode="max", ) self.assertAlmostEqual(abs(delta).sum().item(), total_delta, delta=0.005)
def _compute_attribution_batch_helper_evaluate( self, model, inputs, baselines=None, target=None, additional_forward_args=None): ig = IntegratedGradients(model) if not isinstance(inputs, tuple): inputs = (inputs, ) if baselines is not None and not isinstance(baselines, tuple): baselines = (baselines, ) if baselines is None: baselines = _tensorize_baseline(inputs, _zeros(inputs)) for method in [ "riemann_right", "riemann_left", "riemann_middle", "riemann_trapezoid", "gausslegendre", ]: for internal_batch_size in [None, 1, 20]: attributions, delta = ig.attribute( inputs, baselines, additional_forward_args=additional_forward_args, method=method, n_steps=100, target=target, internal_batch_size=internal_batch_size, return_convergence_delta=True, ) total_delta = 0 for i in range(inputs[0].shape[0]): attributions_indiv, delta_indiv = ig.attribute( tuple(input[i:i + 1] for input in inputs), tuple(baseline[i:i + 1] for baseline in baselines), additional_forward_args=additional_forward_args, method=method, n_steps=100, target=target, return_convergence_delta=True, ) total_delta += abs(delta_indiv).sum().item() for j in range(len(attributions)): assertArraysAlmostEqual( attributions[j][i:i + 1].squeeze(0).tolist(), attributions_indiv[j].squeeze(0).tolist(), ) self.assertAlmostEqual(abs(delta).sum().item(), total_delta, delta=0.005)
def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType = None, target: TargetType = None, additional_forward_args: Any = None, return_convergence_delta: bool = False, attribute_to_layer_input: bool = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, ) -> Union[Tensor, Tuple[Tensor, ...], Tuple[Union[Tensor, Tuple[ Tensor, ...]], Tensor]]: r""" Args: inputs (tensor or tuple of tensors): Input for which layer attributions are computed. If forward_func takes a single tensor as input, a single input tensor should be provided. If forward_func takes multiple tensors as input, a tuple of the input tensors should be provided. It is assumed that for all given input tensors, dimension 0 corresponds to the number of examples (aka batch size), and if multiple input tensors are provided, the examples must be aligned appropriately. baselines (scalar, tensor, tuple of scalars or tensors, optional): Baselines define reference samples that are compared with the inputs. In order to assign attribution scores DeepLift computes the differences between the inputs/outputs and corresponding references. Baselines can be provided as: - a single tensor, if inputs is a single tensor, with exactly the same dimensions as inputs or the first dimension is one and the remaining dimensions match with inputs. - a single scalar, if inputs is a single tensor, which will be broadcasted for each input value in input tensor. - a tuple of tensors or scalars, the baseline corresponding to each tensor in the inputs' tuple can be: - either a tensor with matching dimensions to corresponding tensor in the inputs' tuple or the first dimension is one and the remaining dimensions match with the corresponding input tensor. - or a scalar, corresponding to a tensor in the inputs' tuple. This scalar value is broadcasted for corresponding input tensor. In the cases when `baselines` is not provided, we internally use zero scalar corresponding to each input tensor. Default: None target (int, tuple, tensor or list, optional): Output indices for which gradients are computed (for classification cases, this is usually the target class). If the network returns a scalar value per example, no target index is necessary. For general 2D outputs, targets can be either: - a single integer or a tensor containing a single integer, which is applied to all input examples - a list of integers or a 1D tensor, with length matching the number of examples in inputs (dim 0). Each integer is applied as the target for the corresponding example. For outputs with > 2 dimensions, targets can be either: - A single tuple, which contains #output_dims - 1 elements. This target index is applied to all examples. - A list of tuples with length equal to the number of examples in inputs (dim 0), and each tuple containing #output_dims - 1 elements. Each tuple is applied as the target for the corresponding example. Default: None additional_forward_args (any, optional): If the forward function requires additional arguments other than the inputs for which attributions should not be computed, this argument can be provided. It must be either a single additional argument of a Tensor or arbitrary (non-tuple) type or a tuple containing multiple additional arguments including tensors or any arbitrary python types. These arguments are provided to forward_func in order, following the arguments in inputs. Note that attributions are not computed with respect to these arguments. Default: None return_convergence_delta (bool, optional): Indicates whether to return convergence delta or not. If `return_convergence_delta` is set to True convergence delta will be returned in a tuple following attributions. Default: False attribute_to_layer_input (bool, optional): Indicates whether to compute the attribution with respect to the layer input or output. If `attribute_to_layer_input` is set to True then the attributions will be computed with respect to layer input, otherwise it will be computed with respect to layer output. Note that currently it is assumed that either the input or the output of internal layer, depending on whether we attribute to the input or output, is a single tensor. Support for multiple tensors will be added later. Default: False custom_attribution_func (callable, optional): A custom function for computing final attribution scores. This function can take at least one and at most three arguments with the following signature: - custom_attribution_func(multipliers) - custom_attribution_func(multipliers, inputs) - custom_attribution_func(multipliers, inputs, baselines) In case this function is not provided, we use the default logic defined as: multipliers * (inputs - baselines) It is assumed that all input arguments, `multipliers`, `inputs` and `baselines` are provided in tuples of same length. `custom_attribution_func` returns a tuple of attribution tensors that have the same length as the `inputs`. Default: None Returns: **attributions** or 2-element tuple of **attributions**, **delta**: - **attributions** (*tensor* or tuple of *tensors*): Attribution score computed based on DeepLift's rescale rule with respect to layer's inputs or outputs. Attributions will always be the same size as the provided layer's inputs or outputs, depending on whether we attribute to the inputs or outputs of the layer. If the layer input / output is a single tensor, then just a tensor is returned; if the layer input / output has multiple tensors, then a corresponding tuple of tensors is returned. - **delta** (*tensor*, returned if return_convergence_delta=True): This is computed using the property that the total sum of forward_func(inputs) - forward_func(baselines) must equal the total sum of the attributions computed based on DeepLift's rescale rule. Delta is calculated per example, meaning that the number of elements in returned delta tensor is equal to the number of of examples in input. Note that the logic described for deltas is guaranteed when the default logic for attribution computations is used, meaning that the `custom_attribution_func=None`, otherwise it is not guaranteed and depends on the specifics of the `custom_attribution_func`. Examples:: >>> # ImageClassifier takes a single input tensor of images Nx3x32x32, >>> # and returns an Nx10 tensor of class probabilities. >>> net = ImageClassifier() >>> # creates an instance of LayerDeepLift to interpret target >>> # class 1 with respect to conv4 layer. >>> dl = LayerDeepLift(net, net.conv4) >>> input = torch.randn(1, 3, 32, 32, requires_grad=True) >>> # Computes deeplift attribution scores for conv4 layer and class 3. >>> attribution = dl.attribute(input, target=1) """ inputs = _format_input(inputs) baselines = _format_baseline(baselines, inputs) gradient_mask = apply_gradient_requirements(inputs) _validate_input(inputs, baselines) baselines = _tensorize_baseline(inputs, baselines) main_model_hooks = [] try: main_model_hooks = self._hook_main_model() self.model.apply(lambda mod: self._register_hooks( mod, attribute_to_layer_input=attribute_to_layer_input)) additional_forward_args = _format_additional_forward_args( additional_forward_args) expanded_target = _expand_target( target, 2, expansion_type=ExpansionTypes.repeat) wrapped_forward_func = self._construct_forward_func( self.model, (inputs, baselines), expanded_target, additional_forward_args, ) def chunk_output_fn( out: TensorOrTupleOfTensorsGeneric) -> Sequence: if isinstance(out, Tensor): return out.chunk(2) return tuple(out_sub.chunk(2) for out_sub in out) gradients, attrs = compute_layer_gradients_and_eval( wrapped_forward_func, self.layer, inputs, attribute_to_layer_input=attribute_to_layer_input, output_fn=lambda out: chunk_output_fn(out), ) attr_inputs = tuple(map(lambda attr: attr[0], attrs)) attr_baselines = tuple(map(lambda attr: attr[1], attrs)) gradients = tuple(map(lambda grad: grad[0], gradients)) if custom_attribution_func is None: if self.multiplies_by_inputs: attributions = tuple( (input - baseline) * gradient for input, baseline, gradient in zip( attr_inputs, attr_baselines, gradients)) else: attributions = gradients else: attributions = _call_custom_attribution_func( custom_attribution_func, gradients, attr_inputs, attr_baselines) finally: # remove hooks from all activations self._remove_hooks(main_model_hooks) undo_gradient_requirements(inputs, gradient_mask) return _compute_conv_delta_and_format_attrs( self, return_convergence_delta, attributions, baselines, inputs, additional_forward_args, target, cast(Union[Literal[True], Literal[False]], len(attributions) > 1), )
def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: Optional[ Union[Tensor, int, float, Tuple[Union[Tensor, int, float], ...]] ] = None, target: Optional[ Union[int, Tuple[int, ...], Tensor, List[Tuple[int, ...]]] ] = None, additional_forward_args: Any = None, n_steps: int = 50, method: str = "gausslegendre", internal_batch_size: Optional[int] = None, return_convergence_delta: bool = False, attribute_to_layer_input: bool = False, ) -> Union[ Tensor, Tuple[Tensor, ...], Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor] ]: r""" This method attributes the output of the model with given target index (in case it is provided, otherwise it assumes that output is a scalar) to layer inputs or outputs of the model, depending on whether `attribute_to_layer_input` is set to True or False, using the approach described above. In addition to that it also returns, if `return_convergence_delta` is set to True, integral approximation delta based on the completeness property of integrated gradients. Args: inputs (tensor or tuple of tensors): Input for which layer integrated gradients are computed. If forward_func takes a single tensor as input, a single input tensor should be provided. If forward_func takes multiple tensors as input, a tuple of the input tensors should be provided. It is assumed that for all given input tensors, dimension 0 corresponds to the number of examples, and if multiple input tensors are provided, the examples must be aligned appropriately. baselines (scalar, tensor, tuple of scalars or tensors, optional): Baselines define the starting point from which integral is computed and can be provided as: - a single tensor, if inputs is a single tensor, with exactly the same dimensions as inputs or the first dimension is one and the remaining dimensions match with inputs. - a single scalar, if inputs is a single tensor, which will be broadcasted for each input value in input tensor. - a tuple of tensors or scalars, the baseline corresponding to each tensor in the inputs' tuple can be: - either a tensor with matching dimensions to corresponding tensor in the inputs' tuple or the first dimension is one and the remaining dimensions match with the corresponding input tensor. - or a scalar, corresponding to a tensor in the inputs' tuple. This scalar value is broadcasted for corresponding input tensor. In the cases when `baselines` is not provided, we internally use zero scalar corresponding to each input tensor. Default: None target (int, tuple, tensor or list, optional): Output indices for which gradients are computed (for classification cases, this is usually the target class). If the network returns a scalar value per example, no target index is necessary. For general 2D outputs, targets can be either: - a single integer or a tensor containing a single integer, which is applied to all input examples - a list of integers or a 1D tensor, with length matching the number of examples in inputs (dim 0). Each integer is applied as the target for the corresponding example. For outputs with > 2 dimensions, targets can be either: - A single tuple, which contains #output_dims - 1 elements. This target index is applied to all examples. - A list of tuples with length equal to the number of examples in inputs (dim 0), and each tuple containing #output_dims - 1 elements. Each tuple is applied as the target for the corresponding example. Default: None additional_forward_args (any, optional): If the forward function requires additional arguments other than the inputs for which attributions should not be computed, this argument can be provided. It must be either a single additional argument of a Tensor or arbitrary (non-tuple) type or a tuple containing multiple additional arguments including tensors or any arbitrary python types. These arguments are provided to forward_func in order following the arguments in inputs. For a tensor, the first dimension of the tensor must correspond to the number of examples. It will be repeated for each of `n_steps` along the integrated path. For all other types, the given argument is used for all forward evaluations. Note that attributions are not computed with respect to these arguments. Default: None n_steps (int, optional): The number of steps used by the approximation method. Default: 50. method (string, optional): Method for approximating the integral, one of `riemann_right`, `riemann_left`, `riemann_middle`, `riemann_trapezoid` or `gausslegendre`. Default: `gausslegendre` if no method is provided. internal_batch_size (int, optional): Divides total #steps * #examples data points into chunks of size internal_batch_size, which are computed (forward / backward passes) sequentially. For DataParallel models, each batch is split among the available devices, so evaluations on each available device contain internal_batch_size / num_devices examples. If internal_batch_size is None, then all evaluations are processed in one batch. Default: None return_convergence_delta (bool, optional): Indicates whether to return convergence delta or not. If `return_convergence_delta` is set to True convergence delta will be returned in a tuple following attributions. Default: False attribute_to_layer_input (bool, optional): Indicates whether to compute the attribution with respect to the layer input or output. If `attribute_to_layer_input` is set to True then the attributions will be computed with respect to layer input, otherwise it will be computed with respect to layer output. Note that currently it is assumed that either the input or the output of internal layer, depending on whether we attribute to the input or output, is a single tensor. Support for multiple tensors will be added later. Default: False Returns: **attributions** or 2-element tuple of **attributions**, **delta**: - **attributions** (*tensor* or tuple of *tensors*): Integrated gradients with respect to `layer`'s inputs or outputs. Attributions will always be the same size and dimensionality as the input or output of the given layer, depending on whether we attribute to the inputs or outputs of the layer which is decided by the input flag `attribute_to_layer_input`. - **delta** (*tensor*, returned if return_convergence_delta=True): The difference between the total approximated and true integrated gradients. This is computed using the property that the total sum of forward_func(inputs) - forward_func(baselines) must equal the total sum of the integrated gradient. Delta is calculated per example, meaning that the number of elements in returned delta tensor is equal to the number of of examples in inputs. Examples:: >>> # ImageClassifier takes a single input tensor of images Nx3x32x32, >>> # and returns an Nx10 tensor of class probabilities. >>> # It contains an attribute conv1, which is an instance of nn.conv2d, >>> # and the output of this layer has dimensions Nx12x32x32. >>> net = ImageClassifier() >>> lig = LayerIntegratedGradients(net, net.conv1) >>> input = torch.randn(2, 3, 32, 32, requires_grad=True) >>> # Computes layer integrated gradients for class 3. >>> # attribution size matches layer output, Nx12x32x32 >>> attribution = lig.attribute(input, target=3) """ inps, baselines = _format_input_baseline(inputs, baselines) _validate_input(inps, baselines, n_steps, method) baselines = _tensorize_baseline(inps, baselines) additional_forward_args = _format_additional_forward_args( additional_forward_args ) if self.device_ids is None: self.device_ids = getattr(self.forward_func, "device_ids", None) inputs_layer, is_layer_tuple = _forward_layer_eval( self.forward_func, inps, self.layer, device_ids=self.device_ids, additional_forward_args=additional_forward_args, attribute_to_layer_input=attribute_to_layer_input, ) baselines_layer, _ = _forward_layer_eval( self.forward_func, baselines, self.layer, device_ids=self.device_ids, additional_forward_args=additional_forward_args, attribute_to_layer_input=attribute_to_layer_input, ) # inputs -> these inputs are scaled def gradient_func( forward_fn: Callable, inputs: Union[Tensor, Tuple[Tensor, ...]], target_ind: Optional[ Union[int, Tuple[int, ...], Tensor, List[Tuple[int, ...]]] ] = None, additional_forward_args: Any = None, ) -> Tuple[Tensor, ...]: if self.device_ids is None: scattered_inputs = (inputs,) else: # scatter method does not have a precise enough return type in its # stub, so suppress the type warning. scattered_inputs = scatter( # type:ignore inputs, target_gpus=self.device_ids ) scattered_inputs_dict = { scattered_input[0].device: scattered_input for scattered_input in scattered_inputs } with torch.autograd.set_grad_enabled(True): def layer_forward_hook(module, hook_inputs, hook_outputs=None): device = _extract_device(module, hook_inputs, hook_outputs) if is_layer_tuple: return scattered_inputs_dict[device] return scattered_inputs_dict[device][0] if attribute_to_layer_input: hook = self.layer.register_forward_pre_hook(layer_forward_hook) else: hook = self.layer.register_forward_hook(layer_forward_hook) output = _run_forward( self.forward_func, additional_forward_args, target_ind, ) hook.remove() assert output[0].numel() == 1, ( "Target not provided when necessary, cannot" " take gradient with respect to multiple outputs." ) # torch.unbind(forward_out) is a list of scalar tensor tuples and # contains batch_size * #steps elements grads = torch.autograd.grad(torch.unbind(output), inputs) return grads self.ig.gradient_func = gradient_func all_inputs = ( (inps + additional_forward_args) if additional_forward_args is not None else inps ) attributions = self.ig.attribute( inputs_layer, baselines=baselines_layer, target=target, additional_forward_args=all_inputs, n_steps=n_steps, method=method, internal_batch_size=internal_batch_size, return_convergence_delta=False, ) if return_convergence_delta: start_point, end_point = baselines, inps # computes approximation error based on the completeness axiom delta = self.compute_convergence_delta( attributions, start_point, end_point, additional_forward_args=additional_forward_args, target=target, ) return _format_attributions(is_layer_tuple, attributions), delta return _format_attributions(is_layer_tuple, attributions)
def attribute( # type: ignore self, inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, additional_forward_args: Any = None, return_convergence_delta: bool = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, ) -> Union[TensorOrTupleOfTensorsGeneric, Tuple[ TensorOrTupleOfTensorsGeneric, Tensor]]: r""" Args: inputs (tensor or tuple of tensors): Input for which attributions are computed. If forward_func takes a single tensor as input, a single input tensor should be provided. If forward_func takes multiple tensors as input, a tuple of the input tensors should be provided. It is assumed that for all given input tensors, dimension 0 corresponds to the number of examples (aka batch size), and if multiple input tensors are provided, the examples must be aligned appropriately. baselines (scalar, tensor, tuple of scalars or tensors, optional): Baselines define reference samples that are compared with the inputs. In order to assign attribution scores DeepLift computes the differences between the inputs/outputs and corresponding references. Baselines can be provided as: - a single tensor, if inputs is a single tensor, with exactly the same dimensions as inputs or the first dimension is one and the remaining dimensions match with inputs. - a single scalar, if inputs is a single tensor, which will be broadcasted for each input value in input tensor. - a tuple of tensors or scalars, the baseline corresponding to each tensor in the inputs' tuple can be: - either a tensor with matching dimensions to corresponding tensor in the inputs' tuple or the first dimension is one and the remaining dimensions match with the corresponding input tensor. - or a scalar, corresponding to a tensor in the inputs' tuple. This scalar value is broadcasted for corresponding input tensor. In the cases when `baselines` is not provided, we internally use zero scalar corresponding to each input tensor. Default: None target (int, tuple, tensor or list, optional): Output indices for which gradients are computed (for classification cases, this is usually the target class). If the network returns a scalar value per example, no target index is necessary. For general 2D outputs, targets can be either: - a single integer or a tensor containing a single integer, which is applied to all input examples - a list of integers or a 1D tensor, with length matching the number of examples in inputs (dim 0). Each integer is applied as the target for the corresponding example. For outputs with > 2 dimensions, targets can be either: - A single tuple, which contains #output_dims - 1 elements. This target index is applied to all examples. - A list of tuples with length equal to the number of examples in inputs (dim 0), and each tuple containing #output_dims - 1 elements. Each tuple is applied as the target for the corresponding example. Default: None additional_forward_args (any, optional): If the forward function requires additional arguments other than the inputs for which attributions should not be computed, this argument can be provided. It must be either a single additional argument of a Tensor or arbitrary (non-tuple) type or a tuple containing multiple additional arguments including tensors or any arbitrary python types. These arguments are provided to forward_func in order, following the arguments in inputs. Note that attributions are not computed with respect to these arguments. Default: None return_convergence_delta (bool, optional): Indicates whether to return convergence delta or not. If `return_convergence_delta` is set to True convergence delta will be returned in a tuple following attributions. Default: False custom_attribution_func (callable, optional): A custom function for computing final attribution scores. This function can take at least one and at most three arguments with the following signature: - custom_attribution_func(multipliers) - custom_attribution_func(multipliers, inputs) - custom_attribution_func(multipliers, inputs, baselines) In case this function is not provided, we use the default logic defined as: multipliers * (inputs - baselines) It is assumed that all input arguments, `multipliers`, `inputs` and `baselines` are provided in tuples of same length. `custom_attribution_func` returns a tuple of attribution tensors that have the same length as the `inputs`. Default: None Returns: **attributions** or 2-element tuple of **attributions**, **delta**: - **attributions** (*tensor* or tuple of *tensors*): Attribution score computed based on DeepLift rescale rule with respect to each input feature. Attributions will always be the same size as the provided inputs, with each value providing the attribution of the corresponding input index. If a single tensor is provided as inputs, a single tensor is returned. If a tuple is provided for inputs, a tuple of corresponding sized tensors is returned. - **delta** (*tensor*, returned if return_convergence_delta=True): This is computed using the property that the total sum of forward_func(inputs) - forward_func(baselines) must equal the total sum of the attributions computed based on DeepLift's rescale rule. Delta is calculated per example, meaning that the number of elements in returned delta tensor is equal to the number of of examples in input. Note that the logic described for deltas is guaranteed when the default logic for attribution computations is used, meaning that the `custom_attribution_func=None`, otherwise it is not guaranteed and depends on the specifics of the `custom_attribution_func`. Examples:: >>> # ImageClassifier takes a single input tensor of images Nx3x32x32, >>> # and returns an Nx10 tensor of class probabilities. >>> net = ImageClassifier() >>> dl = DeepLift(net) >>> input = torch.randn(2, 3, 32, 32, requires_grad=True) >>> # Computes deeplift attribution scores for class 3. >>> attribution = dl.attribute(input, target=3) """ # Keeps track whether original input is a tuple or not before # converting it into a tuple. is_inputs_tuple = _is_tuple(inputs) inputs = _format_tensor_into_tuples(inputs) baselines = _format_baseline(baselines, inputs) gradient_mask = apply_gradient_requirements(inputs) _validate_input(inputs, baselines) # set hooks for baselines warnings.warn( """Setting forward, backward hooks and attributes on non-linear activations. The hooks and attributes will be removed after the attribution is finished""") baselines = _tensorize_baseline(inputs, baselines) main_model_hooks = [] try: main_model_hooks = self._hook_main_model() self.model.apply(self._register_hooks) additional_forward_args = _format_additional_forward_args( additional_forward_args) expanded_target = _expand_target( target, 2, expansion_type=ExpansionTypes.repeat) wrapped_forward_func = self._construct_forward_func( self.model, (inputs, baselines), expanded_target, additional_forward_args, ) gradients = self.gradient_func(wrapped_forward_func, inputs) if custom_attribution_func is None: if self.multiplies_by_inputs: attributions = tuple((input - baseline) * gradient for input, baseline, gradient in zip( inputs, baselines, gradients)) else: attributions = gradients else: attributions = _call_custom_attribution_func( custom_attribution_func, gradients, inputs, baselines) finally: # Even if any error is raised, remove all hooks before raising self._remove_hooks(main_model_hooks) undo_gradient_requirements(inputs, gradient_mask) return _compute_conv_delta_and_format_attrs( self, return_convergence_delta, attributions, baselines, inputs, additional_forward_args, target, is_inputs_tuple, )
def _compute_attribution_and_evaluate( self, model: Module, inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: Union[None, int] = None, additional_forward_args: Any = None, type: str = "vanilla", approximation_method: str = "gausslegendre", multiply_by_inputs=True, ) -> Tuple[Tensor, ...]: r""" attrib_type: 'vanilla', 'smoothgrad', 'smoothgrad_sq', 'vargrad' """ ig = IntegratedGradients(model, multiply_by_inputs=multiply_by_inputs) self.assertEquals(ig.multiplies_by_inputs, multiply_by_inputs) if not isinstance(inputs, tuple): inputs = (inputs,) # type: ignore inputs: Tuple[Tensor, ...] if baselines is not None and not isinstance(baselines, tuple): baselines = (baselines,) if baselines is None: baselines = _tensorize_baseline(inputs, _zeros(inputs)) if type == "vanilla": attributions, delta = ig.attribute( inputs, baselines, additional_forward_args=additional_forward_args, method=approximation_method, n_steps=500, target=target, return_convergence_delta=True, ) model.zero_grad() attributions_without_delta, delta = ig.attribute( inputs, baselines, additional_forward_args=additional_forward_args, method=approximation_method, n_steps=500, target=target, return_convergence_delta=True, ) model.zero_grad() self.assertEqual([inputs[0].shape[0]], list(delta.shape)) delta_external = ig.compute_convergence_delta( attributions, baselines, inputs, target=target, additional_forward_args=additional_forward_args, ) assertArraysAlmostEqual(delta, delta_external, 0.0) else: nt = NoiseTunnel(ig) n_samples = 5 attributions, delta = nt.attribute( inputs, nt_type=type, nt_samples=n_samples, stdevs=0.00000002, baselines=baselines, target=target, additional_forward_args=additional_forward_args, method=approximation_method, n_steps=500, return_convergence_delta=True, ) with self.assertWarns(DeprecationWarning): attributions_without_delta = nt.attribute( inputs, nt_type=type, n_samples=n_samples, stdevs=0.00000002, baselines=baselines, target=target, additional_forward_args=additional_forward_args, method=approximation_method, n_steps=500, ) self.assertEquals(nt.multiplies_by_inputs, multiply_by_inputs) self.assertEqual([inputs[0].shape[0] * n_samples], list(delta.shape)) for input, attribution in zip(inputs, attributions): self.assertEqual(attribution.shape, input.shape) if multiply_by_inputs: self.assertTrue(all(abs(delta.numpy().flatten()) < 0.07)) # compare attributions retrieved with and without # `return_convergence_delta` flag for attribution, attribution_without_delta in zip( attributions, attributions_without_delta ): assertTensorAlmostEqual( self, attribution, attribution_without_delta, delta=0.05 ) return cast(Tuple[Tensor, ...], attributions)
def compute_convergence_delta( self, attributions: Union[Tensor, Tuple[Tensor, ...]], start_point: Union[None, int, float, Tensor, Tuple[Union[int, float, Tensor], ...]], end_point: Union[Tensor, Tuple[Tensor, ...]], target: TargetType = None, additional_forward_args: Any = None, ) -> Tensor: r""" Here we provide a specific implementation for `compute_convergence_delta` which is based on a common property among gradient-based attribution algorithms. In the literature sometimes it is also called completeness axiom. Completeness axiom states that the sum of the attribution must be equal to the differences of NN Models's function at its end and start points. In other words: sum(attributions) - (F(end_point) - F(start_point)) is close to zero. Returned delta of this method is defined as above stated difference. This implementation assumes that both the `start_point` and `end_point` have the same shape and dimensionality. It also assumes that the target must have the same number of examples as the `start_point` and the `end_point` in case it is provided in form of a list or a non-singleton tensor. Args: attributions (tensor or tuple of tensors): Precomputed attribution scores. The user can compute those using any attribution algorithm. It is assumed the the shape and the dimensionality of attributions must match the shape and the dimensionality of `start_point` and `end_point`. It also assumes that the attribution tensor's dimension 0 corresponds to the number of examples, and if multiple input tensors are provided, the examples must be aligned appropriately. start_point (tensor or tuple of tensors, optional): `start_point` is passed as an input to model's forward function. It is the starting point of attributions' approximation. It is assumed that both `start_point` and `end_point` have the same shape and dimensionality. end_point (tensor or tuple of tensors): `end_point` is passed as an input to model's forward function. It is the end point of attributions' approximation. It is assumed that both `start_point` and `end_point` have the same shape and dimensionality. target (int, tuple, tensor or list, optional): Output indices for which gradients are computed (for classification cases, this is usually the target class). If the network returns a scalar value per example, no target index is necessary. For general 2D outputs, targets can be either: - a single integer or a tensor containing a single integer, which is applied to all input examples - a list of integers or a 1D tensor, with length matching the number of examples in inputs (dim 0). Each integer is applied as the target for the corresponding example. For outputs with > 2 dimensions, targets can be either: - A single tuple, which contains #output_dims - 1 elements. This target index is applied to all examples. - A list of tuples with length equal to the number of examples in inputs (dim 0), and each tuple containing #output_dims - 1 elements. Each tuple is applied as the target for the corresponding example. Default: None additional_forward_args (any, optional): If the forward function requires additional arguments other than the inputs for which attributions should not be computed, this argument can be provided. It must be either a single additional argument of a Tensor or arbitrary (non-tuple) type or a tuple containing multiple additional arguments including tensors or any arbitrary python types. These arguments are provided to forward_func in order following the arguments in inputs. For a tensor, the first dimension of the tensor must correspond to the number of examples. `additional_forward_args` is used both for `start_point` and `end_point` when computing the forward pass. Default: None Returns: *tensor* of **deltas**: - **deltas** (*tensor*): This implementation returns convergence delta per sample. Deriving sub-classes may do any type of aggregation of those values, if necessary. """ end_point, start_point = _format_input_baseline(end_point, start_point) additional_forward_args = _format_additional_forward_args( additional_forward_args) # tensorizing start_point in case it is a scalar or one example baseline # If the batch size is large we could potentially also tensorize only one # sample and expand the output to the rest of the elements in the batch start_point = _tensorize_baseline(end_point, start_point) attributions = _format_tensor_into_tuples(attributions) # verify that the attributions and end_point match on 1st dimension for attribution, end_point_tnsr in zip(attributions, end_point): assert end_point_tnsr.shape[0] == attribution.shape[0], ( "Attributions tensor and the end_point must match on the first" " dimension but found attribution: {} and end_point: {}". format(attribution.shape[0], end_point_tnsr.shape[0])) num_samples = end_point[0].shape[0] _validate_input(end_point, start_point) _validate_target(num_samples, target) with torch.no_grad(): start_out_sum = _sum_rows( _run_forward(self.forward_func, start_point, target, additional_forward_args)) end_out_sum = _sum_rows( _run_forward(self.forward_func, end_point, target, additional_forward_args)) row_sums = [_sum_rows(attribution) for attribution in attributions] attr_sum = torch.stack( [cast(Tensor, sum(row_sum)) for row_sum in zip(*row_sums)]) _delta = attr_sum - (end_out_sum - start_out_sum) return _delta
def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, baselines: BaselineType = None, target: TargetType = None, additional_forward_args: Any = None, feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None, n_samples: int = 25, perturbations_per_eval: int = 1, show_progress: bool = False, ) -> TensorOrTupleOfTensorsGeneric: r""" NOTE: The feature_mask argument differs from other perturbation based methods, since feature indices can overlap across tensors. See the description of the feature_mask argument below for more details. Args: inputs (tensor or tuple of tensors): Input for which Shapley value sampling attributions are computed. If forward_func takes a single tensor as input, a single input tensor should be provided. If forward_func takes multiple tensors as input, a tuple of the input tensors should be provided. It is assumed that for all given input tensors, dimension 0 corresponds to the number of examples (aka batch size), and if multiple input tensors are provided, the examples must be aligned appropriately. baselines (scalar, tensor, tuple of scalars or tensors, optional): Baselines define reference value which replaces each feature when ablated. Baselines can be provided as: - a single tensor, if inputs is a single tensor, with exactly the same dimensions as inputs or the first dimension is one and the remaining dimensions match with inputs. - a single scalar, if inputs is a single tensor, which will be broadcasted for each input value in input tensor. - a tuple of tensors or scalars, the baseline corresponding to each tensor in the inputs' tuple can be: - either a tensor with matching dimensions to corresponding tensor in the inputs' tuple or the first dimension is one and the remaining dimensions match with the corresponding input tensor. - or a scalar, corresponding to a tensor in the inputs' tuple. This scalar value is broadcasted for corresponding input tensor. In the cases when `baselines` is not provided, we internally use zero scalar corresponding to each input tensor. Default: None target (int, tuple, tensor or list, optional): Output indices for which difference is computed (for classification cases, this is usually the target class). If the network returns a scalar value per example, no target index is necessary. For general 2D outputs, targets can be either: - a single integer or a tensor containing a single integer, which is applied to all input examples - a list of integers or a 1D tensor, with length matching the number of examples in inputs (dim 0). Each integer is applied as the target for the corresponding example. For outputs with > 2 dimensions, targets can be either: - A single tuple, which contains #output_dims - 1 elements. This target index is applied to all examples. - A list of tuples with length equal to the number of examples in inputs (dim 0), and each tuple containing #output_dims - 1 elements. Each tuple is applied as the target for the corresponding example. Default: None additional_forward_args (any, optional): If the forward function requires additional arguments other than the inputs for which attributions should not be computed, this argument can be provided. It must be either a single additional argument of a Tensor or arbitrary (non-tuple) type or a tuple containing multiple additional arguments including tensors or any arbitrary python types. These arguments are provided to forward_func in order following the arguments in inputs. For a tensor, the first dimension of the tensor must correspond to the number of examples. For all other types, the given argument is used for all forward evaluations. Note that attributions are not computed with respect to these arguments. Default: None feature_mask (tensor or tuple of tensors, optional): feature_mask defines a mask for the input, grouping features which should be added together. feature_mask should contain the same number of tensors as inputs. Each tensor should be the same size as the corresponding input or broadcastable to match the input tensor. Values across all tensors should be integers in the range 0 to num_features - 1, and indices corresponding to the same feature should have the same value. Note that features are grouped across tensors (unlike feature ablation and occlusion), so if the same index is used in different tensors, those features are still grouped and added simultaneously. If the forward function returns a single scalar per batch, we enforce that the first dimension of each mask must be 1, since attributions are returned batch-wise rather than per example, so the attributions must correspond to the same features (indices) in each input example. If None, then a feature mask is constructed which assigns each scalar within a tensor as a separate feature Default: None n_samples (int, optional): The number of feature permutations tested. Default: `25` if `n_samples` is not provided. perturbations_per_eval (int, optional): Allows multiple ablations to be processed simultaneously in one call to forward_fn. Each forward pass will contain a maximum of perturbations_per_eval * #examples samples. For DataParallel models, each batch is split among the available devices, so evaluations on each available device contain at most (perturbations_per_eval * #examples) / num_devices samples. If the forward function returns a single scalar per batch, perturbations_per_eval must be set to 1. Default: 1 show_progress (bool, optional): Displays the progress of computation. It will try to use tqdm if available for advanced features (e.g. time estimation). Otherwise, it will fallback to a simple output of progress. Default: False Returns: *tensor* or tuple of *tensors* of **attributions**: - **attributions** (*tensor* or tuple of *tensors*): The attributions with respect to each input feature. If the forward function returns a scalar value per example, attributions will be the same size as the provided inputs, with each value providing the attribution of the corresponding input index. If the forward function returns a scalar per batch, then attribution tensor(s) will have first dimension 1 and the remaining dimensions will match the input. If a single tensor is provided as inputs, a single tensor is returned. If a tuple is provided for inputs, a tuple of corresponding sized tensors is returned. Examples:: >>> # SimpleClassifier takes a single input tensor of size Nx4x4, >>> # and returns an Nx3 tensor of class probabilities. >>> net = SimpleClassifier() >>> # Generating random input with size 2 x 4 x 4 >>> input = torch.randn(2, 4, 4) >>> # Defining ShapleyValueSampling interpreter >>> svs = ShapleyValueSampling(net) >>> # Computes attribution, taking random orderings >>> # of the 16 features and computing the output change when adding >>> # each feature. We average over 200 trials (random permutations). >>> attr = svs.attribute(input, target=1, n_samples=200) >>> # Alternatively, we may want to add features in groups, e.g. >>> # grouping each 2x2 square of the inputs and adding them together. >>> # This can be done by creating a feature mask as follows, which >>> # defines the feature groups, e.g.: >>> # +---+---+---+---+ >>> # | 0 | 0 | 1 | 1 | >>> # +---+---+---+---+ >>> # | 0 | 0 | 1 | 1 | >>> # +---+---+---+---+ >>> # | 2 | 2 | 3 | 3 | >>> # +---+---+---+---+ >>> # | 2 | 2 | 3 | 3 | >>> # +---+---+---+---+ >>> # With this mask, all inputs with the same value are added >>> # together, and the attribution for each input in the same >>> # group (0, 1, 2, and 3) per example are the same. >>> # The attributions can be calculated as follows: >>> # feature mask has dimensions 1 x 4 x 4 >>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1], >>> [2,2,3,3],[2,2,3,3]]]) >>> attr = svs.attribute(input, target=1, feature_mask=feature_mask) """ # Keeps track whether original input is a tuple or not before # converting it into a tuple. is_inputs_tuple = _is_tuple(inputs) inputs, baselines = _format_input_baseline(inputs, baselines) additional_forward_args = _format_additional_forward_args( additional_forward_args ) feature_mask = ( _format_tensor_into_tuples(feature_mask) if feature_mask is not None else None ) assert ( isinstance(perturbations_per_eval, int) and perturbations_per_eval >= 1 ), "Ablations per evaluation must be at least 1." with torch.no_grad(): baselines = _tensorize_baseline(inputs, baselines) num_examples = inputs[0].shape[0] if feature_mask is None: feature_mask, total_features = _construct_default_feature_mask(inputs) else: total_features = int( max(torch.max(single_mask).item() for single_mask in feature_mask) + 1 ) if show_progress: attr_progress = progress( desc=f"{self.get_name()} attribution", total=self._get_n_evaluations( total_features, n_samples, perturbations_per_eval ) + 1, # add 1 for the initial eval ) attr_progress.update(0) initial_eval = _run_forward( self.forward_func, baselines, target, additional_forward_args ) if show_progress: attr_progress.update() agg_output_mode = _find_output_mode_and_verify( initial_eval, num_examples, perturbations_per_eval, feature_mask ) # Initialize attribution totals and counts total_attrib = [ torch.zeros_like( input[0:1] if agg_output_mode else input, dtype=torch.float ) for input in inputs ] iter_count = 0 # Iterate for number of samples, generate a permutation of the features # and evalute the incremental increase for each feature. for feature_permutation in self.permutation_generator( total_features, n_samples ): iter_count += 1 prev_results = initial_eval for ( current_inputs, current_add_args, current_target, current_masks, ) in self._perturbation_generator( inputs, additional_forward_args, target, baselines, feature_mask, feature_permutation, perturbations_per_eval, ): if sum(torch.sum(mask).item() for mask in current_masks) == 0: warnings.warn( "Feature mask is missing some integers between 0 and " "num_features, for optimal performance, make sure each" " consecutive integer corresponds to a feature." ) # modified_eval dimensions: 1D tensor with length # equal to #num_examples * #features in batch modified_eval = _run_forward( self.forward_func, current_inputs, current_target, current_add_args, ) if show_progress: attr_progress.update() if agg_output_mode: eval_diff = modified_eval - prev_results prev_results = modified_eval else: all_eval = torch.cat((prev_results, modified_eval), dim=0) eval_diff = all_eval[num_examples:] - all_eval[:-num_examples] prev_results = all_eval[-num_examples:] for j in range(len(total_attrib)): current_eval_diff = eval_diff if not agg_output_mode: # current_eval_diff dimensions: # (#features in batch, #num_examples, 1,.. 1) # (contains 1 more dimension than inputs). This adds extra # dimensions of 1 to make the tensor broadcastable with the # inputs tensor. current_eval_diff = current_eval_diff.reshape( (-1, num_examples) + (len(inputs[j].shape) - 1) * (1,) ) total_attrib[j] += ( current_eval_diff * current_masks[j].float() ).sum(dim=0) if show_progress: attr_progress.close() # Divide total attributions by number of random permutations and return # formatted attributions. attrib = tuple( tensor_attrib_total / iter_count for tensor_attrib_total in total_attrib ) formatted_attr = _format_output(is_inputs_tuple, attrib) return formatted_attr
def _compute_attribution_and_evaluate( self, model, inputs, baselines=None, target=None, additional_forward_args=None, type="vanilla", ): r""" attrib_type: 'vanilla', 'smoothgrad', 'smoothgrad_sq', 'vargrad' """ ig = IntegratedGradients(model) if not isinstance(inputs, tuple): inputs = (inputs, ) if baselines is not None and not isinstance(baselines, tuple): baselines = (baselines, ) if baselines is None: baselines = _tensorize_baseline(inputs, _zeros(inputs)) for method in [ "riemann_right", "riemann_left", "riemann_middle", "riemann_trapezoid", "gausslegendre", ]: if type == "vanilla": attributions, delta = ig.attribute( inputs, baselines, additional_forward_args=additional_forward_args, method=method, n_steps=100, target=target, return_convergence_delta=True, ) model.zero_grad() attributions_without_delta, delta = ig.attribute( inputs, baselines, additional_forward_args=additional_forward_args, method=method, n_steps=100, target=target, return_convergence_delta=True, ) model.zero_grad() self.assertEqual([inputs[0].shape[0]], list(delta.shape)) delta_external = ig.compute_convergence_delta( attributions, baselines, inputs, target=target, additional_forward_args=additional_forward_args, ) assertArraysAlmostEqual(delta, delta_external, 0.0) else: nt = NoiseTunnel(ig) n_samples = 5 attributions, delta = nt.attribute( inputs, nt_type=type, n_samples=n_samples, stdevs=0.00000002, baselines=baselines, target=target, additional_forward_args=additional_forward_args, method=method, n_steps=100, return_convergence_delta=True, ) attributions_without_delta = nt.attribute( inputs, nt_type=type, n_samples=n_samples, stdevs=0.00000002, baselines=baselines, target=target, additional_forward_args=additional_forward_args, method=method, n_steps=100, ) self.assertEqual([inputs[0].shape[0] * n_samples], list(delta.shape)) for input, attribution in zip(inputs, attributions): self.assertEqual(attribution.shape, input.shape) # TODO (T57097503): Separate tests for different # integration methods and decrease threshold. self.assertTrue(all(abs(delta.numpy().flatten()) < 0.4)) # compare attributions retrieved with and without # `return_convergence_delta` flag for attribution, attribution_without_delta in zip( attributions, attributions_without_delta): assertTensorAlmostEqual(self, attribution, attribution_without_delta, delta=0.05) return attributions