Exemple #1
0
 def neuron_forward_func(*args: Any):
     with torch.no_grad():
         layer_eval = _forward_layer_eval(
             self.forward_func,
             args,
             self.layer,
             device_ids=self.device_ids,
             attribute_to_layer_input=attribute_to_neuron_input,
         )
         return _verify_select_neuron(layer_eval, neuron_selector)
Exemple #2
0
def _neuron_gradients(
    inputs: Union[Tensor, Tuple[Tensor, ...]],
    saved_layer: Dict[device, Tuple[Tensor, ...]],
    key_list: List[device],
    gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...],
                                    Callable],
) -> Tuple[Tensor, ...]:
    with torch.autograd.set_grad_enabled(True):
        gradient_tensors = []
        for key in key_list:
            current_out_tensor = _verify_select_neuron(
                saved_layer[key], gradient_neuron_selector)
            gradient_tensors.append(
                torch.autograd.grad(
                    torch.unbind(current_out_tensor)
                    if current_out_tensor.numel() > 1 else current_out_tensor,
                    inputs,
                ))
        _total_gradients = _reduce_list(gradient_tensors, sum)
    return _total_gradients
Exemple #3
0
    def _attribute(
        self,
        inputs: Tuple[Tensor, ...],
        neuron_selector: Union[int, Tuple[int, ...], Callable],
        baselines: Tuple[Union[Tensor, int, float], ...],
        target: TargetType = None,
        additional_forward_args: Any = None,
        n_steps: int = 50,
        method: str = "riemann_trapezoid",
        attribute_to_neuron_input: bool = False,
        step_sizes_and_alphas: Union[None, Tuple[List[float],
                                                 List[float]]] = None,
    ) -> Tuple[Tensor, ...]:

        num_examples = inputs[0].shape[0]
        total_batch = num_examples * n_steps

        if step_sizes_and_alphas is None:
            # retrieve step size and scaling factor for specified approximation method
            step_sizes_func, alphas_func = approximation_parameters(method)
            step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)
        else:
            step_sizes, alphas = step_sizes_and_alphas

        # Compute scaled inputs from baseline to final input.
        scaled_features_tpl = tuple(
            torch.cat(
                [baseline + alpha * (input - baseline) for alpha in alphas],
                dim=0).requires_grad_()
            for input, baseline in zip(inputs, baselines))

        additional_forward_args = _format_additional_forward_args(
            additional_forward_args)
        # apply number of steps to additional forward args
        # currently, number of steps is applied only to additional forward arguments
        # that are nd-tensors. It is assumed that the first dimension is
        # the number of batches.
        # dim -> (#examples * #steps x additional_forward_args[0].shape[1:], ...)
        input_additional_args = (_expand_additional_forward_args(
            additional_forward_args, n_steps) if additional_forward_args
                                 is not None else None)
        expanded_target = _expand_target(target, n_steps)

        # Conductance Gradients - Returns gradient of output with respect to
        # hidden layer and hidden layer evaluated at each input.
        layer_gradients, layer_eval, input_grads = compute_layer_gradients_and_eval(
            forward_fn=self.forward_func,
            layer=self.layer,
            inputs=scaled_features_tpl,
            target_ind=expanded_target,
            additional_forward_args=input_additional_args,
            gradient_neuron_selector=neuron_selector,
            device_ids=self.device_ids,
            attribute_to_layer_input=attribute_to_neuron_input,
        )

        mid_grads = _verify_select_neuron(layer_gradients, neuron_selector)
        scaled_input_gradients = tuple(
            input_grad * mid_grads.reshape((total_batch, ) + (1, ) *
                                           (len(input_grad.shape) - 1))
            for input_grad in input_grads)

        # Mutliplies by appropriate step size.
        scaled_grads = tuple(
            scaled_input_gradient.contiguous().view(n_steps, -1) *
            torch.tensor(step_sizes).view(n_steps, 1).to(
                scaled_input_gradient.device)
            for scaled_input_gradient in scaled_input_gradients)

        # Aggregates across all steps for each tensor in the input tuple
        total_grads = tuple(
            _reshape_and_sum(scaled_grad, n_steps, num_examples,
                             input_grad.shape[1:])
            for (scaled_grad, input_grad) in zip(scaled_grads, input_grads))

        if self.multiplies_by_inputs:
            # computes attribution for each tensor in input tuple
            # attributions has the same dimensionality as inputs
            attributions = tuple(total_grad * (input - baseline)
                                 for total_grad, input, baseline in zip(
                                     total_grads, inputs, baselines))
        else:
            attributions = total_grads

        return attributions