示例#1
0
 def test_error_ablations_per_eval_limit_batch_scalar(self):
     net = BasicModel_MultiLayer()
     inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]],
                        requires_grad=True)
     ablation = FeatureAblation(lambda inp: torch.sum(net(inp)).item())
     with self.assertRaises(AssertionError):
         _ = ablation.attribute(inp, ablations_per_eval=2)
示例#2
0
 def _ablation_test_assert(
     self,
     model: Callable,
     test_input: TensorOrTupleOfTensors,
     expected_ablation: Union[List[float], List[List[float]],
                              Tuple[List[List[float]], ...], Tuple[Tensor,
                                                                   ...], ],
     feature_mask: Optional[TensorOrTupleOfTensors] = None,
     additional_input: Any = None,
     ablations_per_eval: Tuple[int, ...] = (1, ),
     baselines: Optional[Union[Tensor, int, float, Tuple[Union[Tensor, int,
                                                               float],
                                                         ...]]] = None,
     target: Optional[Union[int, Tuple[int, ...], Tensor,
                            List[Tuple[int, ...]]]] = 0,
 ) -> None:
     for batch_size in ablations_per_eval:
         ablation = FeatureAblation(model)
         attributions = ablation.attribute(
             test_input,
             target=target,
             feature_mask=feature_mask,
             additional_forward_args=additional_input,
             baselines=baselines,
             ablations_per_eval=batch_size,
         )
         if isinstance(expected_ablation, tuple):
             for i in range(len(expected_ablation)):
                 assertTensorAlmostEqual(self, attributions[i],
                                         expected_ablation[i])
         else:
             assertTensorAlmostEqual(self, attributions, expected_ablation)
示例#3
0
 def _ablation_test_assert(
         self,
         model,
         test_input,
         expected_ablation,
         feature_mask=None,
         additional_input=None,
         ablations_per_eval=(1, ),
         baselines=None,
         target=0,
 ):
     for batch_size in ablations_per_eval:
         ablation = FeatureAblation(model)
         attributions = ablation.attribute(
             test_input,
             target=target,
             feature_mask=feature_mask,
             additional_forward_args=additional_input,
             baselines=baselines,
             ablations_per_eval=batch_size,
         )
         if isinstance(expected_ablation, tuple):
             for i in range(len(expected_ablation)):
                 assertTensorAlmostEqual(self, attributions[i],
                                         expected_ablation[i])
         else:
             assertTensorAlmostEqual(self, attributions, expected_ablation)
示例#4
0
    def __init__(self, forward_func: Callable) -> None:
        r"""
        Args:

            forward_func (callable): The forward function of the model or
                        any modification of it
        """
        FeatureAblation.__init__(self, forward_func)
        self.use_weights = True
示例#5
0
    def test_error_agg_mode_incorrect_fm(self) -> None:
        def forward_func(inp):
            return inp[0].unsqueeze(0)

        inp = torch.tensor([[1, 2, 3], [4, 5, 6]])
        mask = torch.tensor([[0, 1, 2], [0, 0, 1]])

        ablation = FeatureAblation(forward_func)
        with self.assertRaises(AssertionError):
            _ = ablation.attribute(inp, perturbations_per_eval=1, feature_mask=mask)
示例#6
0
    def test_error_agg_mode_arbitrary_output(self) -> None:
        net = BasicModel_MultiLayer()

        # output 3 numbers for the entire batch
        # note that the batch size == 2
        def forward_func(inp):
            pred = net(inp)
            return torch.stack([pred.sum(), pred.max(), pred.min()])

        inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
        ablation = FeatureAblation(forward_func)
        with self.assertRaises(AssertionError):
            _ = ablation.attribute(inp, perturbations_per_eval=2)
示例#7
0
    def _ablation_test_assert(
        self,
        model: Callable,
        test_input: TensorOrTupleOfTensorsGeneric,
        expected_ablation: Union[
            Tensor,
            Tuple[Tensor, ...],
            # NOTE: mypy doesn't support recursive types
            # would do a List[NestedList[Union[int, float]]
            # or Tuple[NestedList[Union[int, float]]
            # but... we can't.
            #
            # See https://github.com/python/mypy/issues/731
            List[Any],
            Tuple[List[Any], ...],
        ],
        feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
        additional_input: Any = None,
        perturbations_per_eval: Tuple[int, ...] = (1,),
        baselines: BaselineType = None,
        target: TargetType = 0,
    ) -> None:
        for batch_size in perturbations_per_eval:
            ablation = FeatureAblation(model)
            self.assertTrue(ablation.multiplies_by_inputs)
            attributions = ablation.attribute(
                test_input,
                target=target,
                feature_mask=feature_mask,
                additional_forward_args=additional_input,
                baselines=baselines,
                perturbations_per_eval=batch_size,
            )
            if isinstance(expected_ablation, tuple):
                for i in range(len(expected_ablation)):
                    expected = expected_ablation[i]
                    if not isinstance(expected, torch.Tensor):
                        expected = torch.tensor(expected)

                    self.assertEqual(attributions[i].shape, expected.shape)
                    self.assertEqual(attributions[i].dtype, expected.dtype)
                    assertTensorAlmostEqual(self, attributions[i], expected)
            else:
                if not isinstance(expected_ablation, torch.Tensor):
                    expected_ablation = torch.tensor(expected_ablation)

                self.assertEqual(attributions.shape, expected_ablation.shape)
                self.assertEqual(attributions.dtype, expected_ablation.dtype)
                assertTensorAlmostEqual(self, attributions, expected_ablation)
示例#8
0
    def test_simple_ablation_with_mask_and_show_progress(self,
                                                         mock_stderr) -> None:
        ablation_algo = FeatureAblation(BasicModel_MultiLayer())
        inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)

        # test progress output for each batch size
        for bsz in (1, 2, 3):
            self._ablation_test_assert(
                ablation_algo,
                inp,
                [[280.0, 280.0, 120.0]],
                feature_mask=torch.tensor([[0, 0, 1]]),
                perturbations_per_eval=(bsz, ),
                show_progress=True,
            )

            output = mock_stderr.getvalue()

            # to test if progress calculation aligns with the actual iteration
            # all perturbations_per_eval should reach progress of 100%
            assert ("Feature Ablation attribution: 100%"
                    in output), f"Error progress output: {repr(output)}"

            mock_stderr.seek(0)
            mock_stderr.truncate(0)
示例#9
0
 def test_simple_multi_input_conv(self) -> None:
     ablation_algo = FeatureAblation(BasicModel_ConvNet_One_Conv())
     inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     inp2 = torch.ones((1, 1, 4, 4))
     self._ablation_test_assert(
         ablation_algo,
         (inp, inp2),
         (67 * torch.ones_like(inp), 13 * torch.ones_like(inp2)),
         feature_mask=(torch.tensor(0), torch.tensor(1)),
         perturbations_per_eval=(1, 2, 4, 8, 12, 16),
     )
     self._ablation_test_assert(
         ablation_algo,
         (inp, inp2),
         (
             [[[
                 [0.0, 2.0, 4.0, 3.0],
                 [4.0, 9.0, 10.0, 7.0],
                 [4.0, 13.0, 14.0, 11.0],
                 [0.0, 0.0, 0.0, 0.0],
             ]]],
             [[[
                 [1.0, 2.0, 2.0, 1.0],
                 [1.0, 2.0, 2.0, 1.0],
                 [1.0, 2.0, 2.0, 1.0],
                 [0.0, 0.0, 0.0, 0.0],
             ]]],
         ),
         perturbations_per_eval=(1, 3, 7, 14),
     )
示例#10
0
 def test_multi_input_ablation(self) -> None:
     ablation_algo = FeatureAblation(BasicModel_MultiLayer_MultiInput())
     inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
     inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
     inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
     baseline1 = torch.tensor([[3.0, 0.0, 0.0]])
     baseline2 = torch.tensor([[0.0, 1.0, 0.0]])
     baseline3 = torch.tensor([[1.0, 2.0, 3.0]])
     self._ablation_test_assert(
         ablation_algo,
         (inp1, inp2, inp3),
         (
             [[80.0, 400.0, 0.0], [68.0, 200.0, 120.0]],
             [[80.0, 196.0, 120.0], [0.0, 396.0, 0.0]],
             [[-4.0, 392.0, 28.0], [4.0, 32.0, 0.0]],
         ),
         additional_input=(1, ),
         baselines=(baseline1, baseline2, baseline3),
         perturbations_per_eval=(1, 2, 3),
     )
     baseline1_exp = torch.tensor([[3.0, 0.0, 0.0], [3.0, 0.0, 2.0]])
     baseline2_exp = torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 4.0]])
     baseline3_exp = torch.tensor([[3.0, 2.0, 4.0], [1.0, 2.0, 3.0]])
     self._ablation_test_assert(
         ablation_algo,
         (inp1, inp2, inp3),
         (
             [[80.0, 400.0, 0.0], [68.0, 200.0, 112.0]],
             [[80.0, 196.0, 120.0], [0.0, 396.0, -16.0]],
             [[-12.0, 392.0, 24.0], [4.0, 32.0, 0.0]],
         ),
         additional_input=(1, ),
         baselines=(baseline1_exp, baseline2_exp, baseline3_exp),
         perturbations_per_eval=(1, 2, 3),
     )
示例#11
0
 def test_multi_sample_ablation_batch_scalar_float(self) -> None:
     net = BasicModel_MultiLayer()
     ablation_algo = FeatureAblation(lambda inp: torch.sum(net(inp)).item())
     self._single_input_multi_sample_batch_scalar_ablation_assert(
         ablation_algo,
         dtype=torch.float64,
     )
示例#12
0
 def test_sparse_features(self) -> None:
     ablation_algo = FeatureAblation(BasicModelWithSparseInputs())
     inp1 = torch.tensor([[1.0, -2.0, 3.0], [2.0, -1.0, 3.0]])
     # Length of sparse index list may not match # of examples
     inp2 = torch.tensor([1, 7, 2, 4, 5, 3, 6])
     self._ablation_test_assert(ablation_algo, (inp1, inp2),
                                ([[9.0, -3.0, 12.0]], [2.0]),
                                target=None)
示例#13
0
 def test_multi_inp_ablation_batch_scalar_float(self) -> None:
     net = BasicModel_MultiLayer_MultiInput()
     ablation_algo = FeatureAblation(
         lambda *inp: torch.sum(net(*inp)).item())
     self._multi_input_batch_scalar_ablation_assert(
         ablation_algo,
         dtype=torch.float64,
     )
示例#14
0
    def __init__(self,
                 forward_func: Callable,
                 perm_func: Callable = _permute_feature) -> None:
        r"""
        Args:

            forward_func (callable): The forward function of the model or
                any modification of it
            perm_func (callable, optional): A function that accepts a batch of
                inputs and a feature mask, and "permutes" the feature using
                feature mask across the batch. This defaults to a function
                which applies a random permutation, this argument only needs
                to be provided if a custom permutation behavior is desired.
                Default: `_permute_feature`
        """
        FeatureAblation.__init__(self, forward_func=forward_func)
        self.perm_func = perm_func
示例#15
0
 def test_empty_sparse_features(self) -> None:
     ablation_algo = FeatureAblation(BasicModelWithSparseInputs())
     inp1 = torch.tensor([[1.0, -2.0, 3.0], [2.0, -1.0, 3.0]])
     inp2 = torch.tensor([])
     exp: Tuple[List[List[float]], List[float]] = ([[9.0, -3.0,
                                                     12.0]], [0.0])
     self._ablation_test_assert(ablation_algo, (inp1, inp2),
                                exp,
                                target=None)
示例#16
0
 def test_simple_ablation_int_to_int_nt(self) -> None:
     ablation_algo = NoiseTunnel(FeatureAblation(BasicModel()))
     inp = torch.tensor([[-3, 1, 2]]).float()
     self._ablation_test_assert(
         ablation_algo,
         inp,
         [[-3.0, 0.0, 0.0]],
         perturbations_per_eval=(1, 2, 3),
         stdevs=1e-10,
     )
示例#17
0
 def test_simple_ablation_boolean(self) -> None:
     ablation_algo = FeatureAblation(BasicModelBoolInput())
     inp = torch.tensor([[True, False, True]])
     self._ablation_test_assert(
         ablation_algo,
         inp,
         [[40.0, 40.0, 40.0]],
         feature_mask=torch.tensor([[0, 0, 1]]),
         perturbations_per_eval=(1, 2, 3),
     )
示例#18
0
 def test_multi_sample_ablation(self) -> None:
     ablation_algo = FeatureAblation(BasicModel_MultiLayer())
     inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]],
                        requires_grad=True)
     self._ablation_test_assert(
         ablation_algo,
         inp,
         [[8.0, 35.0, 12.0], [80.0, 200.0, 120.0]],
         perturbations_per_eval=(1, 2, 3),
     )
示例#19
0
 def test_simple_ablation_with_mask(self) -> None:
     ablation_algo = FeatureAblation(BasicModel_MultiLayer())
     inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
     self._ablation_test_assert(
         ablation_algo,
         inp,
         [[280.0, 280.0, 120.0]],
         feature_mask=torch.tensor([[0, 0, 1]]),
         perturbations_per_eval=(1, 2, 3),
     )
示例#20
0
    def test_simple_ablation_int_to_float(self) -> None:
        net = BasicModel()

        def wrapper_func(inp):
            return net(inp).float()

        ablation_algo = FeatureAblation(wrapper_func)

        inp = torch.tensor([[-3, 1, 2]])
        self._ablation_test_assert(ablation_algo,
                                   inp, [[-3.0, 0.0, 0.0]],
                                   perturbations_per_eval=(1, 2, 3))
示例#21
0
    def test_unassociated_output_3d_tensor(self) -> None:
        def forward_func(inp):
            return torch.ones(1, 5, 3, 2)

        inp = torch.randn(10, 5)
        mask = torch.arange(5).unsqueeze(0)
        self._ablation_test_assert(
            ablation_algo=FeatureAblation(forward_func),
            test_input=inp,
            baselines=None,
            target=None,
            feature_mask=mask,
            perturbations_per_eval=(1, ),
            expected_ablation=torch.zeros((5 * 3 * 2, ) + inp[0].shape),
        )
示例#22
0
    def test_single_inp_ablation_multi_output_aggr_non_standard(self) -> None:
        def forward_func(inp):
            return inp[0].unsqueeze(0)

        inp = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
        mask = torch.tensor([[0, 0, 1]])
        self._ablation_test_assert(
            ablation_algo=FeatureAblation(forward_func),
            test_input=inp,
            feature_mask=mask,
            baselines=None,
            target=None,
            perturbations_per_eval=(1, ),
            expected_ablation=[[1.0, 1.0, 0.0], [2.0, 2.0, 0.0],
                               [0.0, 0.0, 3.0]],
        )
示例#23
0
    def test_single_inp_ablation_multi_output_aggr_mask_none(self) -> None:
        def forward_func(inp):
            return inp[0].unsqueeze(0)

        inp = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
        self._ablation_test_assert(
            ablation_algo=FeatureAblation(forward_func),
            test_input=inp,
            feature_mask=None,
            baselines=None,
            target=None,
            perturbations_per_eval=(1, ),
            # should just be the first input spread across each feature
            expected_ablation=[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0],
                               [0.0, 0.0, 3.0]],
        )
示例#24
0
 def test_multi_input_ablation_with_mask_nt(self) -> None:
     ablation_algo = NoiseTunnel(
         FeatureAblation(BasicModel_MultiLayer_MultiInput()))
     inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
     inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
     inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
     mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
     mask2 = torch.tensor([[0, 1, 2]])
     mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
     expected = (
         [[492.0, 492.0, 492.0], [200.0, 200.0, 200.0]],
         [[80.0, 200.0, 120.0], [0.0, 400.0, 0.0]],
         [[0.0, 400.0, 40.0], [60.0, 60.0, 60.0]],
     )
     self._ablation_test_assert(
         ablation_algo,
         (inp1, inp2, inp3),
         expected,
         additional_input=(1, ),
         feature_mask=(mask1, mask2, mask3),
         stdevs=1e-10,
     )
     self._ablation_test_assert(
         ablation_algo,
         (inp1, inp2),
         expected[0:1],
         additional_input=(inp3, 1),
         feature_mask=(mask1, mask2),
         perturbations_per_eval=(1, 2, 3),
         stdevs=1e-10,
     )
     expected_with_baseline = (
         [[468.0, 468.0, 468.0], [184.0, 192.0, 184.0]],
         [[68.0, 188.0, 108.0], [-12.0, 388.0, -12.0]],
         [[-16.0, 384.0, 24.0], [12.0, 12.0, 12.0]],
     )
     self._ablation_test_assert(
         ablation_algo,
         (inp1, inp2, inp3),
         expected_with_baseline,
         additional_input=(1, ),
         feature_mask=(mask1, mask2, mask3),
         baselines=(2, 3.0, 4),
         perturbations_per_eval=(1, 2, 3),
         stdevs=1e-10,
     )
示例#25
0
 def test_single_ablation_batch_scalar_tensor_int(self) -> None:
     net = BasicModel_MultiLayer()
     ablation_algo = FeatureAblation(
         lambda inp: int(torch.sum(net(inp)).item()))
     self._single_input_one_sample_batch_scalar_ablation_assert(
         ablation_algo, dtype=torch.int64)
示例#26
0
    def attribute(
        self,
        inputs: Union[Tensor, Tuple[Tensor, ...]],
        layer_baselines: BaselineType = None,
        target: TargetType = None,
        additional_forward_args: Any = None,
        layer_mask: Union[None, Tensor, Tuple[Tensor, ...]] = None,
        attribute_to_layer_input: bool = False,
        perturbations_per_eval: int = 1,
    ) -> Union[Tensor, Tuple[Tensor, ...]]:
        r"""
        Args:

            inputs (tensor or tuple of tensors):  Input for which layer
                        attributions are computed. If forward_func takes a single
                        tensor as input, a single input tensor should be provided.
                        If forward_func takes multiple tensors as input, a tuple
                        of the input tensors should be provided. It is assumed
                        that for all given input tensors, dimension 0 corresponds
                        to the number of examples, and if multiple input tensors
                        are provided, the examples must be aligned appropriately.
            layer_baselines (scalar, tensor, tuple of scalars or tensors, optional):
                        Layer baselines define reference values which replace each
                        layer input / output value when ablated.
                        Layer baselines should be a single tensor with dimensions
                        matching the input / output of the target layer (or
                        broadcastable to match it), based
                        on whether we are attributing to the input or output
                        of the target layer.
                        In the cases when `baselines` is not provided, we internally
                        use zero as the baseline for each neuron.
                        Default: None
            target (int, tuple, tensor or list, optional):  Output indices for
                        which gradients are computed (for classification cases,
                        this is usually the target class).
                        If the network returns a scalar value per example,
                        no target index is necessary.
                        For general 2D outputs, targets can be either:

                        - a single integer or a tensor containing a single
                          integer, which is applied to all input examples

                        - a list of integers or a 1D tensor, with length matching
                          the number of examples in inputs (dim 0). Each integer
                          is applied as the target for the corresponding example.

                        For outputs with > 2 dimensions, targets can be either:

                        - A single tuple, which contains #output_dims - 1
                          elements. This target index is applied to all examples.

                        - A list of tuples with length equal to the number of
                          examples in inputs (dim 0), and each tuple containing
                          #output_dims - 1 elements. Each tuple is applied as the
                          target for the corresponding example.

                        Default: None
            additional_forward_args (any, optional): If the forward function
                        requires additional arguments other than the inputs for
                        which attributions should not be computed, this argument
                        can be provided. It must be either a single additional
                        argument of a Tensor or arbitrary (non-tuple) type or a
                        tuple containing multiple additional arguments including
                        tensors or any arbitrary python types. These arguments
                        are provided to forward_func in order following the
                        arguments in inputs.
                        Note that attributions are not computed with respect
                        to these arguments.
                        Default: None
            layer_mask (tensor or tuple of tensors, optional):
                        layer_mask defines a mask for the layer, grouping
                        elements of the layer input / output which should be
                        ablated together.
                        layer_mask should be a single tensor with dimensions
                        matching the input / output of the target layer (or
                        broadcastable to match it), based
                        on whether we are attributing to the input or output
                        of the target layer. layer_mask
                        should contain integers in the range 0 to num_groups
                        - 1, and all elements with the same value are
                        considered to be in the same group.
                        If None, then a layer mask is constructed which assigns
                        each neuron within the layer as a separate group, which
                        is ablated independently.
                        Default: None
            attribute_to_layer_input (bool, optional): Indicates whether to
                        compute the attributions with respect to the layer input
                        or output. If `attribute_to_layer_input` is set to True
                        then the attributions will be computed with respect to
                        layer's inputs, otherwise it will be computed with respect
                        to layer's outputs.
                        Note that currently it is assumed that either the input
                        or the output of the layer, depending on whether we
                        attribute to the input or output, is a single tensor.
                        Support for multiple tensors will be added later.
                        Default: False
            perturbations_per_eval (int, optional): Allows ablation of multiple
                        neuron (groups) to be processed simultaneously in one
                        call to forward_fn.
                        Each forward pass will contain a maximum of
                        perturbations_per_eval * #examples samples.
                        For DataParallel models, each batch is split among the
                        available devices, so evaluations on each available
                        device contain at most
                        (perturbations_per_eval * #examples) / num_devices
                        samples.
                        Default: 1

        Returns:
            *tensor* or tuple of *tensors* of **attributions**:
            - **attributions** (*tensor* or tuple of *tensors*):
                        Attribution of each neuron in given layer input or
                        output. Attributions will always be the same size as
                        the input or output of the given layer, depending on
                        whether we attribute to the inputs or outputs
                        of the layer which is decided by the input flag
                        `attribute_to_layer_input`
                        Attributions are returned in a tuple if
                        the layer inputs / outputs contain multiple tensors,
                        otherwise a single tensor is returned.


        Examples::

        >>> # SimpleClassifier takes a single input tensor of size Nx4x4,
        >>> # and returns an Nx3 tensor of class probabilities.
        >>> # It contains an attribute conv1, which is an instance of nn.conv2d,
        >>> # and the output of this layer has dimensions Nx12x3x3.
        >>> net = SimpleClassifier()
        >>> # Generating random input with size 2 x 4 x 4
        >>> input = torch.randn(2, 4, 4)
        >>> # Defining LayerFeatureAblation interpreter
        >>> ablator = LayerFeatureAblation(net, net.conv1)
        >>> # Computes ablation attribution, ablating each of the 108
        >>> # neurons independently.
        >>> attr = ablator.attribute(input, target=1)

        >>> # Alternatively, we may want to ablate neurons in groups, e.g.
        >>> # grouping all the layer outputs in the same row.
        >>> # This can be done by creating a layer mask as follows, which
        >>> # defines the groups of layer inputs / outouts, e.g.:
        >>> # +---+---+---+
        >>> # | 0 | 0 | 0 |
        >>> # +---+---+---+
        >>> # | 1 | 1 | 1 |
        >>> # +---+---+---+
        >>> # | 2 | 2 | 2 |
        >>> # +---+---+---+
        >>> # With this mask, all the 36 neurons in a row / channel are ablated
        >>> # simultaneously, and the attribution for each neuron in the same
        >>> # group (0 - 2) per example are the same.
        >>> # The attributions can be calculated as follows:
        >>> # layer mask has dimensions 1 x 3 x 3
        >>> layer_mask = torch.tensor([[[0,0,0],[1,1,1],
        >>>                             [2,2,2]]])
        >>> attr = ablator.attribute(input, target=1,
        >>>                          layer_mask=layer_mask)
        """
        def layer_forward_func(*args):
            layer_length = args[-1]
            layer_input = args[:layer_length]
            original_inputs = args[layer_length:-1]

            device_ids = self.device_ids
            if device_ids is None:
                device_ids = getattr(self.forward_func, "device_ids", None)

            all_layer_inputs = {}
            if device_ids is not None:
                scattered_layer_input = scatter(layer_input,
                                                target_gpus=device_ids)
                for device_tensors in scattered_layer_input:
                    all_layer_inputs[device_tensors[0].device] = device_tensors
            else:
                all_layer_inputs[layer_input[0].device] = layer_input

            def forward_hook(module, inp, out=None):
                device = _extract_device(module, inp, out)
                is_layer_tuple = (isinstance(out, tuple) if out is not None
                                  else isinstance(inp, tuple))
                if device not in all_layer_inputs:
                    raise AssertionError(
                        "Layer input not placed on appropriate "
                        "device. If using a DataParallel model, either provide the "
                        "DataParallel model as forward_func or provide device ids"
                        " to the constructor.")
                if not is_layer_tuple:
                    return all_layer_inputs[device][0]
                return all_layer_inputs[device]

            hook = None
            try:
                if attribute_to_layer_input:
                    hook = self.layer.register_forward_pre_hook(forward_hook)
                else:
                    hook = self.layer.register_forward_hook(forward_hook)
                eval = _run_forward(self.forward_func,
                                    original_inputs,
                                    target=target)
            finally:
                if hook is not None:
                    hook.remove()
            return eval

        with torch.no_grad():
            inputs = _format_tensor_into_tuples(inputs)
            additional_forward_args = _format_additional_forward_args(
                additional_forward_args)
            layer_eval = _forward_layer_eval(
                self.forward_func,
                inputs,
                self.layer,
                additional_forward_args,
                device_ids=self.device_ids,
                attribute_to_layer_input=attribute_to_layer_input,
            )
            layer_eval_len = (len(layer_eval), )
            all_inputs = ((inputs + additional_forward_args + layer_eval_len)
                          if additional_forward_args is not None else inputs +
                          layer_eval_len)

            ablator = FeatureAblation(layer_forward_func)

            layer_attribs = ablator.attribute.__wrapped__(
                ablator,  # self
                layer_eval,
                baselines=layer_baselines,
                additional_forward_args=all_inputs,
                feature_mask=layer_mask,
                perturbations_per_eval=perturbations_per_eval,
            )
            _attr = _format_output(len(layer_attribs) > 1, layer_attribs)
        return _attr
示例#27
0
    def attribute(
        self,
        inputs: TensorOrTupleOfTensorsGeneric,
        neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
        baselines: BaselineType = None,
        additional_forward_args: Any = None,
        feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
        attribute_to_neuron_input: bool = False,
        perturbations_per_eval: int = 1,
    ) -> TensorOrTupleOfTensorsGeneric:
        r"""
        Args:

            inputs (tensor or tuple of tensors):  Input for which neuron
                        attributions are computed. If forward_func takes a single
                        tensor as input, a single input tensor should be provided.
                        If forward_func takes multiple tensors as input, a tuple
                        of the input tensors should be provided. It is assumed
                        that for all given input tensors, dimension 0 corresponds
                        to the number of examples, and if multiple input tensors
                        are provided, the examples must be aligned appropriately.
            neuron_selector (int, callable, or tuple of ints or slices):
                        Selector for neuron
                        in given layer for which attribution is desired.
                        Neuron selector can be provided as:

                        - a single integer, if the layer output is 2D. This integer
                          selects the appropriate neuron column in the layer input
                          or output

                        - a tuple of integers or slice objects. Length of this
                          tuple must be one less than the number of dimensions
                          in the input / output of the given layer (since
                          dimension 0 corresponds to number of examples).
                          The elements of the tuple can be either integers or
                          slice objects (slice object allows indexing a
                          range of neurons rather individual ones).

                          If any of the tuple elements is a slice object, the
                          indexed output tensor is used for attribution. Note
                          that specifying a slice of a tensor would amount to
                          computing the attribution of the sum of the specified
                          neurons, and not the individual neurons independantly.

                        - a callable, which should
                          take the target layer as input (single tensor or tuple
                          if multiple tensors are in layer) and return a neuron or
                          aggregate of the layer's neurons for attribution.
                          For example, this function could return the
                          sum of the neurons in the layer or sum of neurons with
                          activations in a particular range. It is expected that
                          this function returns either a tensor with one element
                          or a 1D tensor with length equal to batch_size (one scalar
                          per input example)
            baselines (scalar, tensor, tuple of scalars or tensors, optional):
                        Baselines define reference value which replaces each
                        feature when ablated.
                        Baselines can be provided as:

                        - a single tensor, if inputs is a single tensor, with
                          exactly the same dimensions as inputs or
                          broadcastable to match the dimensions of inputs

                        - a single scalar, if inputs is a single tensor, which will
                          be broadcasted for each input value in input tensor.

                        - a tuple of tensors or scalars, the baseline corresponding
                          to each tensor in the inputs' tuple can be:

                          - either a tensor with matching dimensions to
                            corresponding tensor in the inputs' tuple
                            or the first dimension is one and the remaining
                            dimensions match with the corresponding
                            input tensor.

                          - or a scalar, corresponding to a tensor in the
                            inputs' tuple. This scalar value is broadcasted
                            for corresponding input tensor.
                        In the cases when `baselines` is not provided, we internally
                        use zero scalar corresponding to each input tensor.
                        Default: None
            additional_forward_args (any, optional): If the forward function
                        requires additional arguments other than the inputs for
                        which attributions should not be computed, this argument
                        can be provided. It must be either a single additional
                        argument of a Tensor or arbitrary (non-tuple) type or a
                        tuple containing multiple additional arguments including
                        tensors or any arbitrary python types. These arguments
                        are provided to forward_func in order following the
                        arguments in inputs.
                        Note that attributions are not computed with respect
                        to these arguments.
                        Default: None
            feature_mask (tensor or tuple of tensors, optional):
                        feature_mask defines a mask for the input, grouping
                        features which should be ablated together. feature_mask
                        should contain the same number of tensors as inputs.
                        Each tensor should
                        be the same size as the corresponding input or
                        broadcastable to match the input tensor. Each tensor
                        should contain integers in the range 0 to num_features
                        - 1, and indices corresponding to the same feature should
                        have the same value.
                        Note that features within each input tensor are ablated
                        independently (not across tensors).
                        If None, then a feature mask is constructed which assigns
                        each scalar within a tensor as a separate feature, which
                        is ablated independently.
                        Default: None
            attribute_to_neuron_input (bool, optional): Indicates whether to
                        compute the attributions with respect to the neuron input
                        or output. If `attribute_to_neuron_input` is set to True
                        then the attributions will be computed with respect to
                        neuron's inputs, otherwise it will be computed with respect
                        to neuron's outputs.
                        Note that currently it is assumed that either the input
                        or the output of internal neurons, depending on whether we
                        attribute to the input or output, is a single tensor.
                        Support for multiple tensors will be added later.
                        Default: False
            perturbations_per_eval (int, optional): Allows ablation of multiple
                        features to be processed simultaneously in one call to
                        forward_fn.
                        Each forward pass will contain a maximum of
                        perturbations_per_eval * #examples samples.
                        For DataParallel models, each batch is split among the
                        available devices, so evaluations on each available
                        device contain at most
                        (perturbations_per_eval * #examples) / num_devices
                        samples.
                        Default: 1

        Returns:
            *tensor* or tuple of *tensors* of **attributions**:
            - **attributions** (*tensor* or tuple of *tensors*):
                        Attributions of particular neuron with respect to each input
                        feature. Attributions will always be the same size as the
                        provided inputs, with each value providing the attribution
                        of the corresponding input index.
                        If a single tensor is provided as inputs, a single tensor is
                        returned. If a tuple is provided for inputs, a tuple of
                        corresponding sized tensors is returned.

        Examples::

            >>> # SimpleClassifier takes a single input tensor of size Nx4x4,
            >>> # and returns an Nx3 tensor of class probabilities.
            >>> # It contains an attribute conv1, which is an instance of nn.conv2d,
            >>> # and the output of this layer has dimensions Nx12x3x3.
            >>> net = SimpleClassifier()
            >>> # Generating random input with size 2 x 4 x 4
            >>> input = torch.randn(2, 4, 4)
            >>> # Defining NeuronFeatureAblation interpreter
            >>> ablator = NeuronFeatureAblation(net, net.conv1)
            >>> # To compute neuron attribution, we need to provide the neuron
            >>> # index for which attribution is desired. Since the layer output
            >>> # is Nx12x3x3, we need a tuple in the form (0..11,0..2,0..2)
            >>> # which indexes a particular neuron in the layer output.
            >>> # For this example, we choose the index (4,1,2).
            >>> # Computes neuron gradient for neuron with
            >>> # index (4,1,2).
            >>> # Computes ablation attribution, ablating each of the 16
            >>> # scalar inputs independently.
            >>> attr = ablator.attribute(input, neuron_selector=(4,1,2))

            >>> # Alternatively, we may want to ablate features in groups, e.g.
            >>> # grouping each 2x2 square of the inputs and ablating them together.
            >>> # This can be done by creating a feature mask as follows, which
            >>> # defines the feature groups, e.g.:
            >>> # +---+---+---+---+
            >>> # | 0 | 0 | 1 | 1 |
            >>> # +---+---+---+---+
            >>> # | 0 | 0 | 1 | 1 |
            >>> # +---+---+---+---+
            >>> # | 2 | 2 | 3 | 3 |
            >>> # +---+---+---+---+
            >>> # | 2 | 2 | 3 | 3 |
            >>> # +---+---+---+---+
            >>> # With this mask, all inputs with the same value are ablated
            >>> # simultaneously, and the attribution for each input in the same
            >>> # group (0, 1, 2, and 3) per example are the same.
            >>> # The attributions can be calculated as follows:
            >>> # feature mask has dimensions 1 x 4 x 4
            >>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
            >>>                             [2,2,3,3],[2,2,3,3]]])
            >>> attr = ablator.attribute(input, neuron_selector=(4,1,2),
            >>>                          feature_mask=feature_mask)
        """
        def neuron_forward_func(*args: Any):
            with torch.no_grad():
                layer_eval = _forward_layer_eval(
                    self.forward_func,
                    args,
                    self.layer,
                    device_ids=self.device_ids,
                    attribute_to_layer_input=attribute_to_neuron_input,
                )
                return _verify_select_neuron(layer_eval, neuron_selector)

        ablator = FeatureAblation(neuron_forward_func)

        # NOTE: using __wrapped__ to not log
        return ablator.attribute.__wrapped__(
            ablator,  # self
            inputs,
            baselines=baselines,
            additional_forward_args=additional_forward_args,
            feature_mask=feature_mask,
            perturbations_per_eval=perturbations_per_eval,
        )
示例#28
0
 def test_multi_sample_ablation_batch_scalar_tensor_1d(self) -> None:
     net = BasicModel_MultiLayer()
     ablation_algo = FeatureAblation(
         lambda inp: torch.sum(net(inp)).reshape(1))
     self._single_input_multi_sample_batch_scalar_ablation_assert(
         ablation_algo)
示例#29
0
 def test_simple_ablation_int_to_int(self) -> None:
     ablation_algo = FeatureAblation(BasicModel())
     inp = torch.tensor([[-3, 1, 2]])
     self._ablation_test_assert(ablation_algo,
                                inp, [[-3, 0, 0]],
                                perturbations_per_eval=(1, 2, 3))
示例#30
0
 def test_multi_inp_ablation_batch_scalar_tensor_1d(self) -> None:
     net = BasicModel_MultiLayer_MultiInput()
     ablation_algo = FeatureAblation(
         lambda *inp: torch.sum(net(*inp)).reshape(1))
     self._multi_input_batch_scalar_ablation_assert(ablation_algo)