def test_undo_gradient_reqs(self): initial_grads = [False, True, False] test_tensor = torch.tensor([[6.0]], requires_grad=True) test_tensor.grad = torch.tensor([[7.0]]) test_tensor_tuple = ( torch.tensor([[6.0]], requires_grad=True), test_tensor, torch.tensor([[7.0]], requires_grad=True), ) undo_gradient_requirements(test_tensor_tuple, initial_grads) for i in range(len(test_tensor_tuple)): self.assertEqual(test_tensor_tuple[i].requires_grad, initial_grads[i]) if test_tensor_tuple[i].grad is not None: self.assertAlmostEqual(torch.sum(test_tensor_tuple[i].grad).item(), 0.0)
def attribute( self, inputs, baselines=None, target=None, n_steps=500, method="riemann_trapezoid", ): r""" Computes conductance using gradients along the path, applying riemann's method or gauss-legendre. The details of the approach can be found here: https://arxiv.org/abs/1805.12233 Args inputs: A single high dimensional input tensor, in which dimension 0 corresponds to number of examples. baselines: A single high dimensional baseline tensor, which has the same shape as the input target: Predicted class index. This is necessary only for classification use cases n_steps: The number of steps used by the approximation method method: Method for integral approximation, one of `riemann_right`, `riemann_middle`, `riemann_trapezoid` or `gausslegendre` Return attributions: Total conductance with respect to each neuron in output of given layer """ if baselines is None: baselines = 0 gradient_mask = apply_gradient_requirements((inputs, )) # retrieve step size and scaling factor for specified approximation method step_sizes_func, alphas_func = approximation_parameters(method) step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps) # compute scaled inputs from baseline to final input. scaled_features = torch.cat( [baselines + alpha * (inputs - baselines) for alpha in alphas], dim=0) # Conductance Gradients - Returns gradient of output with respect to # hidden layer, gradient of hidden layer with respect to input, # and number of hidden units. input_gradients, mid_layer_gradients, hidden_units = self._conductance_grads( self.forward_func, scaled_features, target) # Multiply gradient of hidden layer with respect to input by input - baseline scaled_input_gradients = torch.repeat_interleave(inputs - baselines, hidden_units, dim=0) scaled_input_gradients = input_gradients * scaled_input_gradients.repeat( *([len(alphas)] + [1] * (len(scaled_input_gradients.shape) - 1))) # Sum gradients for each input neuron in order to have total # for each hidden unit and reshape to match hidden layer shape summed_input_grads = torch.sum( scaled_input_gradients, tuple(range(1, len( scaled_input_gradients.shape)))).view_as(mid_layer_gradients) # Rescale gradients of hidden layer by by step size. scaled_grads = mid_layer_gradients.contiguous().view( n_steps, -1) * torch.tensor(step_sizes).view(n_steps, 1).to( mid_layer_gradients.device) undo_gradient_requirements((inputs, ), gradient_mask) # Element-wise mutliply gradient of output with respect to hidden layer # and summed gradients with respect to input (chain rule) and sum across # stepped inputs. return _reshape_and_sum( scaled_grads.view(mid_layer_gradients.shape) * summed_input_grads, n_steps, inputs.shape[0], mid_layer_gradients.shape[1:], )
def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], baselines: BaselineType = None, target: TargetType = None, additional_forward_args: Any = None, return_convergence_delta: bool = False, attribute_to_layer_input: bool = False, custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, ) -> Union[Tensor, Tuple[Tensor, ...], Tuple[Union[Tensor, Tuple[ Tensor, ...]], Tensor], ]: r""" Args: inputs (tensor or tuple of tensors): Input for which layer attributions are computed. If forward_func takes a single tensor as input, a single input tensor should be provided. If forward_func takes multiple tensors as input, a tuple of the input tensors should be provided. It is assumed that for all given input tensors, dimension 0 corresponds to the number of examples (aka batch size), and if multiple input tensors are provided, the examples must be aligned appropriately. baselines (scalar, tensor, tuple of scalars or tensors, optional): Baselines define reference samples that are compared with the inputs. In order to assign attribution scores DeepLift computes the differences between the inputs/outputs and corresponding references. Baselines can be provided as: - a single tensor, if inputs is a single tensor, with exactly the same dimensions as inputs or the first dimension is one and the remaining dimensions match with inputs. - a single scalar, if inputs is a single tensor, which will be broadcasted for each input value in input tensor. - a tuple of tensors or scalars, the baseline corresponding to each tensor in the inputs' tuple can be: - either a tensor with matching dimensions to corresponding tensor in the inputs' tuple or the first dimension is one and the remaining dimensions match with the corresponding input tensor. - or a scalar, corresponding to a tensor in the inputs' tuple. This scalar value is broadcasted for corresponding input tensor. In the cases when `baselines` is not provided, we internally use zero scalar corresponding to each input tensor. Default: None target (int, tuple, tensor or list, optional): Output indices for which gradients are computed (for classification cases, this is usually the target class). If the network returns a scalar value per example, no target index is necessary. For general 2D outputs, targets can be either: - a single integer or a tensor containing a single integer, which is applied to all input examples - a list of integers or a 1D tensor, with length matching the number of examples in inputs (dim 0). Each integer is applied as the target for the corresponding example. For outputs with > 2 dimensions, targets can be either: - A single tuple, which contains #output_dims - 1 elements. This target index is applied to all examples. - A list of tuples with length equal to the number of examples in inputs (dim 0), and each tuple containing #output_dims - 1 elements. Each tuple is applied as the target for the corresponding example. Default: None additional_forward_args (any, optional): If the forward function requires additional arguments other than the inputs for which attributions should not be computed, this argument can be provided. It must be either a single additional argument of a Tensor or arbitrary (non-tuple) type or a tuple containing multiple additional arguments including tensors or any arbitrary python types. These arguments are provided to forward_func in order, following the arguments in inputs. Note that attributions are not computed with respect to these arguments. Default: None return_convergence_delta (bool, optional): Indicates whether to return convergence delta or not. If `return_convergence_delta` is set to True convergence delta will be returned in a tuple following attributions. Default: False attribute_to_layer_input (bool, optional): Indicates whether to compute the attribution with respect to the layer input or output. If `attribute_to_layer_input` is set to True then the attributions will be computed with respect to layer input, otherwise it will be computed with respect to layer output. Note that currently it is assumed that either the input or the output of internal layer, depending on whether we attribute to the input or output, is a single tensor. Support for multiple tensors will be added later. Default: False custom_attribution_func (callable, optional): A custom function for computing final attribution scores. This function can take at least one and at most three arguments with the following signature: - custom_attribution_func(multipliers) - custom_attribution_func(multipliers, inputs) - custom_attribution_func(multipliers, inputs, baselines) In case this function is not provided, we use the default logic defined as: multipliers * (inputs - baselines) It is assumed that all input arguments, `multipliers`, `inputs` and `baselines` are provided in tuples of same length. `custom_attribution_func` returns a tuple of attribution tensors that have the same length as the `inputs`. Default: None Returns: **attributions** or 2-element tuple of **attributions**, **delta**: - **attributions** (*tensor* or tuple of *tensors*): Attribution score computed based on DeepLift's rescale rule with respect to layer's inputs or outputs. Attributions will always be the same size as the provided layer's inputs or outputs, depending on whether we attribute to the inputs or outputs of the layer. If the layer input / output is a single tensor, then just a tensor is returned; if the layer input / output has multiple tensors, then a corresponding tuple of tensors is returned. - **delta** (*tensor*, returned if return_convergence_delta=True): This is computed using the property that the total sum of forward_func(inputs) - forward_func(baselines) must equal the total sum of the attributions computed based on DeepLift's rescale rule. Delta is calculated per example, meaning that the number of elements in returned delta tensor is equal to the number of of examples in input. Note that the logic described for deltas is guaranteed when the default logic for attribution computations is used, meaning that the `custom_attribution_func=None`, otherwise it is not guaranteed and depends on the specifics of the `custom_attribution_func`. Examples:: >>> # ImageClassifier takes a single input tensor of images Nx3x32x32, >>> # and returns an Nx10 tensor of class probabilities. >>> net = ImageClassifier() >>> # creates an instance of LayerDeepLift to interpret target >>> # class 1 with respect to conv4 layer. >>> dl = LayerDeepLift(net, net.conv4) >>> input = torch.randn(1, 3, 32, 32, requires_grad=True) >>> # Computes deeplift attribution scores for conv4 layer and class 3. >>> attribution = dl.attribute(input, target=1) """ inputs = _format_input(inputs) baselines = _format_baseline(baselines, inputs) gradient_mask = apply_gradient_requirements(inputs) _validate_input(inputs, baselines) baselines = _tensorize_baseline(inputs, baselines) main_model_pre_hook = self._pre_hook_main_model() self.model.apply(self._register_hooks) additional_forward_args = _format_additional_forward_args( additional_forward_args) input_base_additional_args = _expand_additional_forward_args( additional_forward_args, 2, ExpansionTypes.repeat) expanded_target = _expand_target(target, 2, expansion_type=ExpansionTypes.repeat) wrapped_forward_func = self._construct_forward_func( self.model, (inputs, baselines), expanded_target, input_base_additional_args, ) def chunk_output_fn(out: TensorOrTupleOfTensorsGeneric, ) -> Sequence: if isinstance(out, Tensor): return out.chunk(2) return tuple(out_sub.chunk(2) for out_sub in out) (gradients, attrs, is_layer_tuple) = compute_layer_gradients_and_eval( wrapped_forward_func, self.layer, inputs, attribute_to_layer_input=attribute_to_layer_input, output_fn=lambda out: chunk_output_fn(out), ) attr_inputs = tuple(map(lambda attr: attr[0], attrs)) attr_baselines = tuple(map(lambda attr: attr[1], attrs)) gradients = tuple(map(lambda grad: grad[0], gradients)) if custom_attribution_func is None: attributions = tuple((input - baseline) * gradient for input, baseline, gradient in zip( attr_inputs, attr_baselines, gradients)) else: attributions = _call_custom_attribution_func( custom_attribution_func, gradients, attr_inputs, attr_baselines) # remove hooks from all activations main_model_pre_hook.remove() self._remove_hooks() undo_gradient_requirements(inputs, gradient_mask) return _compute_conv_delta_and_format_attrs( self, return_convergence_delta, attributions, baselines, inputs, additional_forward_args, target, is_layer_tuple, )
def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], target: TargetType = None, additional_forward_args: Any = None, attribute_to_layer_input: bool = False, relu_attributions: bool = False, ) -> Union[Tensor, Tuple[Tensor, ...]]: r""" Args: inputs (tensor or tuple of tensors): Input for which attributions are computed. If forward_func takes a single tensor as input, a single input tensor should be provided. If forward_func takes multiple tensors as input, a tuple of the input tensors should be provided. It is assumed that for all given input tensors, dimension 0 corresponds to the number of examples, and if multiple input tensors are provided, the examples must be aligned appropriately. target (int, tuple, tensor or list, optional): Output indices for which gradients are computed (for classification cases, this is usually the target class). If the network returns a scalar value per example, no target index is necessary. For general 2D outputs, targets can be either: - a single integer or a tensor containing a single integer, which is applied to all input examples - a list of integers or a 1D tensor, with length matching the number of examples in inputs (dim 0). Each integer is applied as the target for the corresponding example. For outputs with > 2 dimensions, targets can be either: - A single tuple, which contains #output_dims - 1 elements. This target index is applied to all examples. - A list of tuples with length equal to the number of examples in inputs (dim 0), and each tuple containing #output_dims - 1 elements. Each tuple is applied as the target for the corresponding example. Default: None additional_forward_args (any, optional): If the forward function requires additional arguments other than the inputs for which attributions should not be computed, this argument can be provided. It must be either a single additional argument of a Tensor or arbitrary (non-tuple) type or a tuple containing multiple additional arguments including tensors or any arbitrary python types. These arguments are provided to forward_func in order following the arguments in inputs. Note that attributions are not computed with respect to these arguments. Default: None attribute_to_layer_input (bool, optional): Indicates whether to compute the attributions with respect to the layer input or output. If `attribute_to_layer_input` is set to True then the attributions will be computed with respect to the layer input, otherwise it will be computed with respect to layer output. Note that currently it is assumed that either the input or the outputs of internal layers, depending on whether we attribute to the input or output, are single tensors. Support for multiple tensors will be added later. Default: False relu_attributions (bool, optional): Indicates whether to apply a ReLU operation on the final attribution, returning only non-negative attributions. Setting this flag to True matches the original GradCAM algorithm, otherwise, by default, both positive and negative attributions are returned. Default: False Returns: *tensor* or tuple of *tensors* of **attributions**: - **attributions** (*tensor* or tuple of *tensors*): Attributions based on GradCAM method. Attributions will be the same size as the output of the given layer, except for dimension 2, which will be 1 due to summing over channels. Attributions are returned in a tuple based on whether the layer inputs / outputs are contained in a tuple from a forward hook. For standard modules, inputs of a single tensor are usually wrapped in a tuple, while outputs of a single tensor are not. Examples:: # >>> # ImageClassifier takes a single input tensor of images Nx3x32x32, # >>> # and returns an Nx10 tensor of class probabilities. # >>> # It contains a layer conv4, which is an instance of nn.conv2d, # >>> # and the output of this layer has dimensions Nx50x8x8. # >>> # It is the last convolution layer, which is the recommended # >>> # use case for GradCAM. # >>> net = ImageClassifier() # >>> layer_gc = LayerGradCam(net, net.conv4) # >>> input = torch.randn(2, 3, 32, 32, requires_grad=True) # >>> # Computes layer GradCAM for class 3. # >>> # attribution size matches layer output except for dimension # >>> # 1, so dimensions of attr would be Nx1x8x8. # >>> attr = layer_gc.attribute(input, 3) # >>> # GradCAM attributions are often upsampled and viewed as a # >>> # mask to the input, since the convolutional layer output # >>> # spatially matches the original input image. # >>> # This can be done with LayerAttribution's interpolate method. # >>> upsampled_attr = LayerAttribution.interpolate(attr, (32, 32)) """ inputs = _format_input(inputs) additional_forward_args = _format_additional_forward_args( additional_forward_args) gradient_mask = apply_gradient_requirements(inputs) # Returns gradient of output with respect to # hidden layer and hidden layer evaluated at each input. layer_gradients, layer_evals, is_layer_tuple = compute_layer_gradients_and_eval( self.forward_func, self.layer, inputs, target, additional_forward_args, device_ids=self.device_ids, attribute_to_layer_input=attribute_to_layer_input, ) undo_gradient_requirements(inputs, gradient_mask) # Gradient Calculation end # what I add: shape from PyG to General PyTorch layer_gradients = tuple( layer_grad.transpose(0, 1).unsqueeze(0) for layer_grad in layer_gradients) layer_evals = tuple( layer_eval.transpose(0, 1).unsqueeze(0) for layer_eval in layer_evals) # end summed_grads = tuple( torch.mean( layer_grad, dim=tuple(x for x in range(2, len(layer_grad.shape))), keepdim=True, ) for layer_grad in layer_gradients) scaled_acts = tuple( torch.sum(summed_grad * layer_eval, dim=1, keepdim=True) for summed_grad, layer_eval in zip(summed_grads, layer_evals)) if relu_attributions: scaled_acts = tuple( F.relu(scaled_act) for scaled_act in scaled_acts) # what I add: shape from General PyTorch to PyG scaled_acts = tuple( scaled_act.squeeze(0).transpose(0, 1) for scaled_act in scaled_acts) # end return _format_attributions(is_layer_tuple, scaled_acts)