예제 #1
0
    def test_basic_infidelity_multiple_with_normalize(self) -> None:
        input1 = torch.tensor([3.0] * 3)
        input2 = torch.tensor([1.0] * 3)
        inputs = (input1, input2)
        expected = torch.zeros(3)

        model = BasicModel2()
        ig = IntegratedGradients(model)
        attrs = ig.attribute(inputs)
        scaled_attrs = tuple(attr * 100 for attr in attrs)

        infid = self.infidelity_assert(model,
                                       attrs,
                                       inputs,
                                       expected,
                                       normalize=True)
        scaled_infid = self.infidelity_assert(
            model,
            scaled_attrs,
            inputs,
            expected,
            normalize=True,
        )

        # scaling attr should not change normalized infidelity
        assertTensorAlmostEqual(self, infid, scaled_infid)
예제 #2
0
 def infidelity_assert(self,
                       model: Module,
                       attributions: TensorOrTupleOfTensorsGeneric,
                       inputs: TensorOrTupleOfTensorsGeneric,
                       expected: Tensor,
                       additional_args: Any = None,
                       baselines: BaselineType = None,
                       n_perturb_samples: int = 10,
                       target: TargetType = None,
                       max_batch_size: int = None,
                       multi_input: bool = True,
                       perturb_func: Callable = _local_perturb_func,
                       normalize: bool = False,
                       **kwargs: Any) -> Tensor:
     infid = infidelity(
         model,
         perturb_func,
         inputs,
         attributions,
         additional_forward_args=additional_args,
         target=target,
         baselines=baselines,
         n_perturb_samples=n_perturb_samples,
         max_examples_per_batch=max_batch_size,
         normalize=normalize,
     )
     assertTensorAlmostEqual(self, infid, expected, 0.05)
     return infid
예제 #3
0
 def _occlusion_test_assert(
     self,
     model: Callable,
     test_input: TensorOrTupleOfTensorsGeneric,
     expected_ablation: Union[float, List[float], List[List[float]],
                              Tuple[Union[Tensor, List[float],
                                          List[List[float]]], ...], ],
     sliding_window_shapes: Union[Tuple[int, ...], Tuple[Tuple[int, ...],
                                                         ...]],
     target: TargetType = 0,
     additional_input: Any = None,
     perturbations_per_eval: Tuple[int, ...] = (1, ),
     baselines: BaselineType = None,
     strides: Union[None, int, Tuple[Union[int, Tuple[int, ...]],
                                     ...]] = None,
     show_progress: bool = False,
 ) -> None:
     for batch_size in perturbations_per_eval:
         ablation = Occlusion(model)
         attributions = ablation.attribute(
             test_input,
             sliding_window_shapes=sliding_window_shapes,
             target=target,
             additional_forward_args=additional_input,
             baselines=baselines,
             perturbations_per_eval=batch_size,
             strides=strides,
             show_progress=show_progress,
         )
         if isinstance(expected_ablation, tuple):
             for i in range(len(expected_ablation)):
                 assertTensorAlmostEqual(self, attributions[i],
                                         expected_ablation[i])
         else:
             assertTensorAlmostEqual(self, attributions, expected_ablation)
예제 #4
0
 def _guided_grad_cam_test_assert(
     self,
     model: Module,
     target_layer: Module,
     test_input: TensorOrTupleOfTensorsGeneric,
     expected,
     additional_input: Any = None,
     interpolate_mode: str = "nearest",
     attribute_to_layer_input: bool = False,
 ) -> None:
     guided_gc = GuidedGradCam(model, target_layer)
     self.assertFalse(guided_gc.multiplies_by_inputs)
     attributions = guided_gc.attribute(
         test_input,
         target=0,
         additional_forward_args=additional_input,
         interpolate_mode=interpolate_mode,
         attribute_to_layer_input=attribute_to_layer_input,
     )
     if isinstance(test_input, tuple):
         for i in range(len(test_input)):
             assertTensorAlmostEqual(
                 self,
                 attributions[i],
                 expected[i],
                 delta=0.01,
             )
     else:
         assertTensorAlmostEqual(
             self,
             attributions,
             expected,
             delta=0.01,
         )
예제 #5
0
 def _assert_compare_with_layer_conductance(
         self,
         model: Module,
         input: Tensor,
         attribute_to_layer_input: bool = False):
     lc = LayerConductance(model, cast(Module, model.linear2))
     # For large number of steps layer conductance and layer integrated gradients
     # become very close
     attribution, delta = lc.attribute(
         input,
         target=0,
         n_steps=1500,
         return_convergence_delta=True,
         attribute_to_layer_input=attribute_to_layer_input,
     )
     lig = LayerIntegratedGradients(model, cast(Module, model.linear2))
     attributions2, delta2 = lig.attribute(
         input,
         target=0,
         n_steps=1500,
         return_convergence_delta=True,
         attribute_to_layer_input=attribute_to_layer_input,
     )
     assertTensorAlmostEqual(self,
                             attribution,
                             attributions2,
                             delta=0.01,
                             mode="max")
     assertTensorAlmostEqual(self, delta, delta2, delta=0.5, mode="max")
예제 #6
0
    def _compare_sample_grads_per_sample(
        self,
        model: Module,
        inputs: Tuple[Tensor, ...],
        loss_fn: Callable,
        loss_type: str = "mean",
    ):
        wrapper = SampleGradientWrapper(model)
        wrapper.add_hooks()
        out = model(*inputs)
        wrapper.compute_param_sample_gradients(loss_fn(out), loss_type)

        batch_size = inputs[0].shape[0]
        for i in range(batch_size):
            model.zero_grad()
            single_inp = tuple(inp[i:i + 1] for inp in inputs)
            out = model(*single_inp)
            loss_fn(out).backward()
            for layer in model.modules():
                if isinstance(layer, tuple(SUPPORTED_MODULES.keys())):
                    assertTensorAlmostEqual(
                        self,
                        layer.weight.grad,
                        layer.weight.sample_grad[i],  # type: ignore
                        mode="max",
                    )
                    assertTensorAlmostEqual(
                        self,
                        layer.bias.grad,
                        layer.bias.sample_grad[i],  # type: ignore
                        mode="max",
                    )
예제 #7
0
파일: test_PGD.py 프로젝트: pytorch/captum
 def test_attack_random_start(self) -> None:
     model = BasicModel()
     input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
     adv = PGD(model)
     perturbed_input = adv.perturb(input,
                                   0.25,
                                   0.1,
                                   0,
                                   4,
                                   random_start=True)
     assertTensorAlmostEqual(
         self,
         perturbed_input,
         [[2.0, -9.0, 9.0, 1.0, -3.0]],
         delta=0.25,
         mode="max",
     )
     perturbed_input = adv.perturb(input,
                                   0.25,
                                   0.1,
                                   0,
                                   4,
                                   norm="L2",
                                   random_start=True)
     norm = torch.norm((perturbed_input - input).squeeze()).numpy()
     self.assertLessEqual(norm, 0.25)
예제 #8
0
    def test_classification_infidelity_tpl_target(self) -> None:
        model = BasicModel_MultiLayer()
        input = torch.arange(1.0, 13.0).view(4, 3)
        additional_forward_args = (torch.arange(1, 13).view(4,
                                                            3).float(), True)
        targets: List = [(0, 1, 1), (0, 1, 1), (1, 1, 1), (0, 1, 1)]
        sa = Saliency(model)

        infid1 = self.infidelity_assert(
            model,
            sa.attribute(input,
                         target=targets,
                         additional_forward_args=additional_forward_args),
            input,
            torch.zeros(4),
            additional_args=additional_forward_args,
            target=targets,
            multi_input=False,
        )

        infid2 = self.infidelity_assert(
            model,
            sa.attribute(input,
                         target=targets,
                         additional_forward_args=additional_forward_args),
            input,
            torch.zeros(4),
            additional_args=additional_forward_args,
            target=targets,
            max_batch_size=2,
            multi_input=False,
        )
        assertTensorAlmostEqual(self, infid1, infid2, delta=1e-05, mode="max")
예제 #9
0
    def _assert_attributions(
        self,
        model: Module,
        layer: Module,
        inputs: Tensor,
        baselines: Union[Tensor, Callable[..., Tensor]],
        neuron_ind: Union[int, Tuple[Union[int, slice], ...], Callable],
        n_samples: int = 5,
    ) -> None:
        ngs = NeuronGradientShap(model, layer)
        nig = NeuronIntegratedGradients(model, layer)
        attrs_gs = ngs.attribute(inputs,
                                 neuron_ind,
                                 baselines=baselines,
                                 n_samples=n_samples,
                                 stdevs=0.09)

        if callable(baselines):
            baselines = baselines(inputs)

        attrs_ig = []
        for baseline in torch.unbind(baselines):
            attrs_ig.append(
                nig.attribute(inputs,
                              neuron_ind,
                              baselines=baseline.unsqueeze(0)))
        combined_attrs_ig = torch.stack(attrs_ig, dim=0).mean(dim=0)
        self.assertTrue(ngs.multiplies_by_inputs)
        assertTensorAlmostEqual(self, attrs_gs, combined_attrs_ig, 0.5)
예제 #10
0
    def test_nested_multi_embeddings(self):
        input1 = torch.tensor([[3, 2, 0], [1, 2, 4]])
        input2 = torch.tensor([[0, 1, 0], [2, 6, 8]])
        input3 = torch.tensor([[4, 1, 0], [2, 2, 8]])
        model = BasicEmbeddingModel(nested_second_embedding=True)
        output = model(input1, input2, input3)
        expected = model.embedding2(input=input2, another_input=input3)
        # in this case we make interpretable the custom embedding layer - TextModule
        interpretable_embedding2 = configure_interpretable_embedding_layer(
            model, "embedding2")
        actual = interpretable_embedding2.indices_to_embeddings(
            input=input2, another_input=input3)
        output_interpretable_models = model(input1, actual)
        assertTensorAlmostEqual(self,
                                output,
                                output_interpretable_models,
                                delta=0.05,
                                mode="max")

        assertTensorAlmostEqual(self, expected, actual, delta=0.0, mode="max")
        self.assertTrue(
            model.embedding2.__class__ is InterpretableEmbeddingBase)
        remove_interpretable_embedding_layer(model, interpretable_embedding2)
        self.assertTrue(model.embedding2.__class__ is TextModule)
        self._assert_embeddings_equal(input2, output, interpretable_embedding2)
예제 #11
0
    def test_sensitivity_additional_forward_args_multi_args(self) -> None:
        model = BasicModel4_MultiArgs()

        input1 = torch.tensor([[1.5, 2.0, 3.3]])
        input2 = torch.tensor([[3.0, 3.5, 2.2]])

        args = torch.tensor([[1.0, 3.0, 4.0]])
        ig = DeepLift(model)

        sensitivity1 = self.sensitivity_max_assert(
            ig.attribute,
            (input1, input2),
            torch.zeros(1),
            additional_forward_args=args,
            n_perturb_samples=1,
            max_examples_per_batch=1,
            perturb_func=_perturb_func,
        )

        sensitivity2 = self.sensitivity_max_assert(
            ig.attribute,
            (input1, input2),
            torch.zeros(1),
            additional_forward_args=args,
            n_perturb_samples=4,
            max_examples_per_batch=2,
            perturb_func=_perturb_func,
        )
        assertTensorAlmostEqual(self, sensitivity1, sensitivity2, 0.0)
예제 #12
0
    def test_mulitple_perturbations_per_eval(self) -> None:
        perturbations_per_eval = 4
        batch_size = 2
        input_size = (4, )

        inp = torch.randn((batch_size, ) + input_size)

        def forward_func(x):
            return 1 - x

        target = 1
        feature_importance = FeaturePermutation(forward_func=forward_func)

        attribs = feature_importance.attribute(
            inp, perturbations_per_eval=perturbations_per_eval, target=target)
        self.assertTrue(attribs.size() == (batch_size, ) + input_size)

        for i in range(inp.size(1)):
            if i == target:
                continue
            assertTensorAlmostEqual(self, attribs[:, i], 0)

        y = forward_func(inp)
        actual_diff = torch.stack([(y[0] - y[1])[target],
                                   (y[1] - y[0])[target]])
        assertTensorAlmostEqual(self, attribs[:, target], actual_diff)
예제 #13
0
    def test_convnet_multi_target_and_default_pert_func(self) -> None:
        r"""
        Similar to previous example but here we also test default
        perturbation function.
        """
        model = BasicModel_ConvNet_One_Conv()
        gbp = GuidedBackprop(model)

        input = torch.stack([torch.arange(1, 17).float()] * 20, dim=0).view(20, 1, 4, 4)

        sens1 = self.sensitivity_max_assert(
            gbp.attribute,
            input,
            torch.zeros(20),
            perturb_func=default_perturb_func,
            target=torch.tensor([1] * 20),
            n_perturb_samples=10,
            max_examples_per_batch=40,
        )

        sens2 = self.sensitivity_max_assert(
            gbp.attribute,
            input,
            torch.zeros(20),
            perturb_func=default_perturb_func,
            target=torch.tensor([1] * 20),
            n_perturb_samples=10,
            max_examples_per_batch=5,
        )
        assertTensorAlmostEqual(self, sens1, sens2)
예제 #14
0
파일: test_FGSM.py 프로젝트: pytorch/captum
 def _FGSM_assert(
         self,
         model: Callable,
         inputs: TensorOrTupleOfTensorsGeneric,
         target: Any,
         epsilon: float,
         answer: Union[TensorLikeList, Tuple[TensorLikeList, ...]],
         targeted=False,
         additional_inputs: Any = None,
         lower_bound: float = float("-inf"),
         upper_bound: float = float("inf"),
 ) -> None:
     adv = FGSM(model, lower_bound=lower_bound, upper_bound=upper_bound)
     perturbed_input = adv.perturb(inputs, epsilon, target,
                                   additional_inputs, targeted)
     if isinstance(perturbed_input, Tensor):
         assertTensorAlmostEqual(self,
                                 perturbed_input,
                                 answer,
                                 delta=0.01,
                                 mode="max")
     else:
         for i in range(len(perturbed_input)):
             assertTensorAlmostEqual(self,
                                     perturbed_input[i],
                                     answer[i],
                                     delta=0.01,
                                     mode="max")
예제 #15
0
 def _ablation_test_assert(
     self,
     model: Module,
     layer: Module,
     test_input: TensorOrTupleOfTensorsGeneric,
     expected_ablation: Union[List[float], List[List[float]],
                              Tuple[Tensor, ...], Tuple[List[List[float]],
                                                        ...], ],
     feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
     additional_input: Any = None,
     perturbations_per_eval: Tuple[int, ...] = (1, ),
     baselines: BaselineType = None,
     neuron_selector: Union[int, Tuple[Union[int, slice], ...],
                            Callable] = 0,
     attribute_to_neuron_input: bool = False,
 ) -> None:
     for batch_size in perturbations_per_eval:
         ablation = NeuronFeatureAblation(model, layer)
         self.assertTrue(ablation.multiplies_by_inputs)
         attributions = ablation.attribute(
             test_input,
             neuron_selector=neuron_selector,
             feature_mask=feature_mask,
             additional_forward_args=additional_input,
             baselines=baselines,
             perturbations_per_eval=batch_size,
             attribute_to_neuron_input=attribute_to_neuron_input,
         )
         if isinstance(expected_ablation, tuple):
             for i in range(len(expected_ablation)):
                 assertTensorAlmostEqual(self, attributions[i],
                                         expected_ablation[i])
         else:
             assertTensorAlmostEqual(self, attributions, expected_ablation)
예제 #16
0
    def test_lrp_skip_connection(self) -> None:
        # A custom addition module needs to be used so that relevance is
        # propagated correctly.
        class Addition_Module(nn.Module):
            def __init__(self) -> None:
                super().__init__()

            def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
                return x1 + x2

        class SkipConnection(nn.Module):
            def __init__(self) -> None:
                super().__init__()
                self.linear = nn.Linear(2, 2, bias=False)
                self.linear.weight.data.fill_(5)
                self.add = Addition_Module()

            def forward(self, input: Tensor) -> Module:
                x = self.add(self.linear(input), input)
                return x

        model = SkipConnection()
        input = torch.Tensor([[2, 3]])
        model.add.rule = EpsilonRule()  # type: ignore
        lrp = LRP(model)
        relevance = lrp.attribute(input, target=1)
        assertTensorAlmostEqual(self, relevance, torch.Tensor([[10, 18]]))
예제 #17
0
 def sensitivity_max_assert(
     self,
     expl_func: Callable,
     inputs: TensorOrTupleOfTensorsGeneric,
     expected_sensitivity: Tensor,
     perturb_func: Callable = _perturb_func,
     n_perturb_samples: int = 5,
     max_examples_per_batch: int = None,
     baselines: BaselineType = None,
     target: TargetType = None,
     additional_forward_args: Any = None,
 ) -> Tensor:
     if baselines is None:
         sens = sensitivity_max(
             expl_func,
             inputs,
             perturb_func=perturb_func,
             target=target,
             additional_forward_args=additional_forward_args,
             n_perturb_samples=n_perturb_samples,
             max_examples_per_batch=max_examples_per_batch,
         )
     else:
         sens = sensitivity_max(
             expl_func,
             inputs,
             perturb_func=perturb_func,
             baselines=baselines,
             target=target,
             additional_forward_args=additional_forward_args,
             n_perturb_samples=n_perturb_samples,
             max_examples_per_batch=max_examples_per_batch,
         )
     assertTensorAlmostEqual(self, sens, expected_sensitivity, 0.05)
     return sens
예제 #18
0
    def test_linear_layer_deeplift_batch(self) -> None:
        model = ReLULinearModel(inplace=True)
        _, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing(
        )
        x1 = torch.tensor(
            [[-10.0, 1.0, -5.0], [-10.0, 1.0, -5.0], [-10.0, 1.0, -5.0]],
            requires_grad=True,
        )
        x2 = torch.tensor([[3.0, 3.0, 1.0], [3.0, 3.0, 1.0], [3.0, 3.0, 1.0]],
                          requires_grad=True)
        inputs = (x1, x2)

        layer_dl = LayerDeepLift(model, model.l3)
        attributions, delta = layer_dl.attribute(
            inputs,
            baselines,
            attribute_to_layer_input=True,
            return_convergence_delta=True,
        )
        assertTensorAlmostEqual(self, attributions[0], [[0.0, 15.0]])
        assert_delta(self, delta)

        attributions, delta = layer_dl.attribute(
            inputs,
            baselines,
            attribute_to_layer_input=False,
            return_convergence_delta=True,
        )
        assertTensorAlmostEqual(self, attributions, [[15.0]])
        assert_delta(self, delta)
예제 #19
0
    def test_classification_sensitivity_tpl_target_w_baseline(self) -> None:
        model = BasicModel_MultiLayer()
        input = torch.arange(1.0, 13.0).view(4, 3)
        baseline = torch.ones(4, 3)
        additional_forward_args = (torch.arange(1, 13).view(4, 3).float(), True)
        targets: List = [(0, 1, 1), (0, 1, 1), (1, 1, 1), (0, 1, 1)]
        dl = DeepLift(model)

        sens1 = self.sensitivity_max_assert(
            dl.attribute,
            input,
            torch.tensor([0.01, 0.003, 0.001, 0.001]),
            additional_forward_args=additional_forward_args,
            baselines=baseline,
            target=targets,
            n_perturb_samples=10,
            perturb_func=_perturb_func,
        )
        sens2 = self.sensitivity_max_assert(
            dl.attribute,
            input,
            torch.zeros(4),
            additional_forward_args=additional_forward_args,
            baselines=baseline,
            target=targets,
            n_perturb_samples=10,
            perturb_func=_perturb_func,
            max_examples_per_batch=30,
        )
        assertTensorAlmostEqual(self, sens1, sens2)
예제 #20
0
    def test_sensitivity_max_multi_dim_batching(self) -> None:
        model = BasicModel_MultiLayer()

        input = torch.arange(1.0, 16.0).view(5, 3)

        additional_forward_args = (torch.ones(5, 3).float(), False)
        targets: List = [0, 0, 0, 0, 0]

        sa = Saliency(model)

        sensitivity1 = self.sensitivity_max_assert(
            sa.attribute,
            input,
            torch.zeros(5),
            n_perturb_samples=1,
            max_examples_per_batch=None,
            perturb_func=_perturb_func,
            target=targets,
            additional_forward_args=additional_forward_args,
        )
        sensitivity2 = self.sensitivity_max_assert(
            sa.attribute,
            input,
            torch.zeros(5),
            n_perturb_samples=10,
            max_examples_per_batch=10,
            perturb_func=_perturb_func,
            target=targets,
            additional_forward_args=additional_forward_args,
        )
        assertTensorAlmostEqual(self, sensitivity1, sensitivity2, 0.0)
    def test_batched_input_smoothgrad_wo_mutliplying_by_inputs(self) -> None:
        model = BasicModel_MultiLayer()
        inputs = torch.tensor(
            [[1.5, 2.0, 1.3], [0.5, 0.1, 2.3], [1.5, 2.0, 1.3]], requires_grad=True
        )
        ig_wo_mutliplying_by_inputs = IntegratedGradients(
            model, multiply_by_inputs=False
        )
        nt_wo_mutliplying_by_inputs = NoiseTunnel(ig_wo_mutliplying_by_inputs)

        ig = IntegratedGradients(model)
        nt = NoiseTunnel(ig)
        n_samples = 5
        target = 0
        type = "smoothgrad"
        attributions_wo_mutliplying_by_inputs = nt_wo_mutliplying_by_inputs.attribute(
            inputs,
            nt_type=type,
            nt_samples=n_samples,
            stdevs=0.0,
            target=target,
            n_steps=500,
        )
        with self.assertWarns(DeprecationWarning):
            attributions = nt.attribute(
                inputs,
                nt_type=type,
                n_samples=n_samples,
                stdevs=0.0,
                target=target,
                n_steps=500,
            )
        assertTensorAlmostEqual(
            self, attributions_wo_mutliplying_by_inputs * inputs, attributions
        )
예제 #22
0
    def test_var_defin(self):
        """
        Variance is avg squared distance to mean. Thus it should be positive.
        This test is to ensure this is the case.

        To test it, we will we make a skewed distribution leaning to one end
        (either very large or small values).

        We will also compare to numpy and ensure it is approximately the same.
        This is assuming numpy is correct, for which it should be.
        """
        SMALL_VAL = -10000
        BIG_VAL = 10000
        AMOUNT_OF_SMALLS = [100, 10]
        AMOUNT_OF_BIGS = [10, 100]
        for sm, big in zip(AMOUNT_OF_SMALLS, AMOUNT_OF_BIGS):
            summ = Summarizer([Var()])
            values = []
            for _ in range(sm):
                values.append(SMALL_VAL)
                summ.update(torch.tensor(SMALL_VAL, dtype=torch.float64))

            for _ in range(big):
                values.append(BIG_VAL)
                summ.update(torch.tensor(BIG_VAL, dtype=torch.float64))

            actual_var = torch.var(torch.tensor(values).double(),
                                   unbiased=False)

            var = summ.summary["variance"]

            assertTensorAlmostEqual(self, var, actual_var)
            self.assertTrue((var > 0).all())
예제 #23
0
 def _assert_attribution(self, expected_grad, input, attribution):
     assertTensorAlmostEqual(
         self,
         attribution,
         (expected_grad * input),
         delta=0.05,
         mode="max",
     )
예제 #24
0
 def test_reduce_list_tuples(self):
     tensors = [
         (torch.tensor([[3, 4, 5]]), torch.tensor([[0, 1, 2]])),
         (torch.tensor([[3, 4, 5]]), torch.tensor([[0, 1, 2]])),
     ]
     reduced = _reduce_list(tensors)
     assertTensorAlmostEqual(self, reduced[0], [[3, 4, 5], [3, 4, 5]])
     assertTensorAlmostEqual(self, reduced[1], [[0, 1, 2], [0, 1, 2]])
예제 #25
0
 def test_lrp_simple_attributions(self) -> None:
     model, inputs = _get_simple_model()
     model.eval()
     model.linear.rule = EpsilonRule()  # type: ignore
     model.linear2.rule = EpsilonRule()  # type: ignore
     lrp = LRP(model)
     relevance = lrp.attribute(inputs)
     assertTensorAlmostEqual(self, relevance,
                             torch.tensor([18.0, 36.0, 54.0]))
예제 #26
0
 def test_lrp_ixg_equivalency(self) -> None:
     model, inputs = _get_simple_model()
     lrp = LRP(model)
     attributions_lrp = lrp.attribute(inputs)
     ixg = InputXGradient(model)
     attributions_ixg = ixg.attribute(inputs)
     assertTensorAlmostEqual(
         self, attributions_lrp, attributions_ixg
     )  # Divide by score because LRP relevance is normalized.
예제 #27
0
    def test_tracin_regression_1D_numerical(
            self, reduction: str, tracin_constructor: Callable) -> None:

        low = 1
        high = 17
        features = 1
        dataset = RangeDataset(low, high, features)
        net = CoefficientNet()
        self.assertTrue(isinstance(reduction, str))
        criterion = nn.MSELoss(reduction=cast(str, reduction))
        batch_size = 4
        weights = [0.4379, 0.1653, 0.5132, 0.3651, 0.9992]

        train_inputs = dataset.samples
        train_labels = dataset.labels

        with tempfile.TemporaryDirectory() as tmpdir:

            for i, weight in enumerate(weights):
                net.fc1.weight.data.fill_(weight)
                checkpoint_name = "-".join(
                    ["checkpoint-reg", str(i + 1) + ".pt"])
                torch.save(net.state_dict(),
                           os.path.join(tmpdir, checkpoint_name))

            self.assertTrue(callable(tracin_constructor))
            tracin = tracin_constructor(
                net,
                dataset,
                tmpdir,
                batch_size,
                criterion,
            )

            train_scores = tracin.influence(train_inputs, train_labels, k=None)
            r"""
            Derivation for gradient / resulting TracIn score:

            For each checkpoint: $y = Wx,$ and $loss = (y - label)^2.$ Recall for this
            test case, there is no activation on y. For this example, $label = x.$

            Fast Rand Proj gives $\nabla_W loss = \nabla_y loss (x^T).$ We have $x$ and
            y as scalars so we can simply multiply. So then,

            \[\nabla_y loss * x = 2(y-x)*x = 2(Wx -x)*x = 2x^2 (w - 1).\]

            And we simply multiply these for x, x'. In this case, $x, x' \in [1..16]$.
            """
            for i in range(train_scores.shape[0]):
                for j in range(len(train_scores[0])):
                    _weights = torch.Tensor(weights)
                    num = 2 * (i + 1) * (i + 1) * (_weights - 1)
                    num *= 2 * (j + 1) * (j + 1) * (_weights - 1)
                    assertTensorAlmostEqual(self,
                                            torch.sum(num),
                                            train_scores[i][j],
                                            delta=0.1)
예제 #28
0
 def test_tuple_splice_range_3d(self):
     test_tuple = (
         torch.tensor([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [6, 7, 8]]]),
         "test",
     )
     spliced_tuple = _tuple_splice_range(test_tuple, 1, 2)
     assertTensorAlmostEqual(self, spliced_tuple[0],
                             [[[6, 7, 8], [6, 7, 8]]])
     self.assertEqual(spliced_tuple[1], "test")
예제 #29
0
    def test_basic_multilayer(self) -> None:
        model = BasicModel_MultiLayer(inplace=True)
        model.eval()

        inputs = torch.tensor([[1.0, 20.0, 10.0]])
        baselines = torch.zeros(2, 3)
        ngs = NeuronGradientShap(model, model.linear1, multiply_by_inputs=False)
        attr = ngs.attribute(inputs, 0, baselines=baselines, stdevs=0.0)
        self.assertFalse(ngs.multiplies_by_inputs)
        assertTensorAlmostEqual(self, attr, [1.0, 1.0, 1.0])
예제 #30
0
 def test_lrp_simple_repeat_attributions(self) -> None:
     model, inputs = _get_simple_model()
     model.eval()
     model.linear.rule = GammaRule()  # type: ignore
     model.linear2.rule = Alpha1_Beta0_Rule()  # type: ignore
     output = model(inputs)
     lrp = LRP(model)
     _ = lrp.attribute(inputs)
     output_after = model(inputs)
     assertTensorAlmostEqual(self, output, output_after)