示例#1
0
    def test_basic_infidelity_multiple(self) -> None:
        input1 = torch.tensor([3.0] * 3)
        input2 = torch.tensor([1.0] * 3)
        inputs = (input1, input2)
        expected = torch.zeros(3)

        infid = self.basic_model_assert(BasicModel2(), inputs, expected)
        infid_w_common_func = self.basic_model_assert(
            BasicModel2(),
            inputs,
            expected,
            perturb_func=_local_perturb_func_default,
            multiply_by_inputs=False,
        )
        assertTensorAlmostEqual(self, infid, infid_w_common_func)
示例#2
0
    def test_basic_relu_multi_input(self) -> None:
        model = BasicModel2()

        input1 = torch.tensor([[3.0]])
        input2 = torch.tensor([[1.0]], requires_grad=True)

        baseline1 = torch.tensor([[0.0]])
        baseline2 = torch.tensor([[0.0]])
        inputs = (input1, input2)
        baselines = (baseline1, baseline2)

        gs = GradientShap(model)
        n_samples = 30000
        attributions, delta = cast(
            Tuple[Tuple[Tensor, ...], Tensor],
            gs.attribute(
                inputs,
                baselines=baselines,
                n_samples=n_samples,
                return_convergence_delta=True,
            ),
        )
        _assert_attribution_delta(self, inputs, attributions, n_samples, delta)

        ig = IntegratedGradients(model)
        attributions_ig = ig.attribute(inputs, baselines=baselines)
        self._assert_shap_ig_comparision(attributions, attributions_ig)
示例#3
0
    def test_basic_sensitivity_max_multiple_gradshap(self) -> None:
        model = BasicModel2()
        gs = GradientShap(model)

        input1 = torch.tensor([0.0] * 5)
        input2 = torch.tensor([0.0] * 5)

        baseline1 = torch.arange(0, 2).float() / 1000
        baseline2 = torch.arange(0, 2).float() / 1000

        self.sensitivity_max_assert(
            gs.attribute,
            (input1, input2),
            torch.zeros(5),
            baselines=(baseline1, baseline2),
            max_examples_per_batch=2,
        )

        self.sensitivity_max_assert(
            gs.attribute,
            (input1, input2),
            torch.zeros(5),
            baselines=(baseline1, baseline2),
            max_examples_per_batch=20,
        )
示例#4
0
    def test_basic_infidelity_multiple_with_normalize(self) -> None:
        input1 = torch.tensor([3.0] * 3)
        input2 = torch.tensor([1.0] * 3)
        inputs = (input1, input2)
        expected = torch.zeros(3)

        model = BasicModel2()
        ig = IntegratedGradients(model)
        attrs = ig.attribute(inputs)
        scaled_attrs = tuple(attr * 100 for attr in attrs)

        infid = self.infidelity_assert(model,
                                       attrs,
                                       inputs,
                                       expected,
                                       normalize=True)
        scaled_infid = self.infidelity_assert(
            model,
            scaled_attrs,
            inputs,
            expected,
            normalize=True,
        )

        # scaling attr should not change normalized infidelity
        assertTensorAlmostEqual(self, infid, scaled_infid)
示例#5
0
    def test_basic_infidelity_single(self) -> None:
        input1 = torch.tensor([3.0])
        input2 = torch.tensor([1.0])
        inputs = (input1, input2)
        expected = torch.zeros(1)

        self.basic_model_assert(BasicModel2(), inputs, expected)
    def _assert_multi_variable(
        self,
        type: str,
        approximation_method: str = "gausslegendre",
        multiply_by_inputs: bool = True,
    ) -> None:
        model = BasicModel2()

        input1 = torch.tensor([3.0])
        input2 = torch.tensor([1.0], requires_grad=True)

        baseline1 = torch.tensor([0.0])
        baseline2 = torch.tensor([0.0])

        attributions1 = self._compute_attribution_and_evaluate(
            model,
            (input1, input2),
            (baseline1, baseline2),
            type=type,
            approximation_method=approximation_method,
            multiply_by_inputs=multiply_by_inputs,
        )
        if type == "vanilla":
            assertArraysAlmostEqual(
                attributions1[0].tolist(),
                [1.5] if multiply_by_inputs else [0.5],
                delta=0.05,
            )
            assertArraysAlmostEqual(
                attributions1[1].tolist(),
                [-0.5] if multiply_by_inputs else [-0.5],
                delta=0.05,
            )
        model = BasicModel3()
        attributions2 = self._compute_attribution_and_evaluate(
            model,
            (input1, input2),
            (baseline1, baseline2),
            type=type,
            approximation_method=approximation_method,
            multiply_by_inputs=multiply_by_inputs,
        )
        if type == "vanilla":
            assertArraysAlmostEqual(
                attributions2[0].tolist(),
                [1.5] if multiply_by_inputs else [0.5],
                delta=0.05,
            )
            assertArraysAlmostEqual(
                attributions2[1].tolist(),
                [-0.5] if multiply_by_inputs else [-0.5],
                delta=0.05,
            )
            # Verifies implementation invariance
            self.assertEqual(
                sum(attribution for attribution in attributions1),
                sum(attribution for attribution in attributions2),
            )
示例#7
0
 def test_gradient_target_list(self) -> None:
     model = BasicModel2()
     input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
     input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
     grads = compute_gradients(model, (input1, input2), target_ind=[0, 1])
     assertArraysAlmostEqual(torch.flatten(grads[0]).tolist(),
                             [1.0, 0.0, 0.0, 1.0],
                             delta=0.01)
     assertArraysAlmostEqual(torch.flatten(grads[1]).tolist(),
                             [-1.0, 0.0, 0.0, -1.0],
                             delta=0.01)
示例#8
0
 def test_attack_label_list(self) -> None:
     model = BasicModel2()
     input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
     input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
     self._FGSM_assert(
         model,
         (input1, input2),
         [0, 1],
         0.1,
         ([[3.9, -1.0], [3.0, 9.9]], [[2.1, -5.0], [-2.0, 1.1]]),
     )
示例#9
0
 def test_attack_multiinput(self) -> None:
     model = BasicModel2()
     input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
     input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
     self._FGSM_assert(
         model,
         (input1, input2),
         0,
         0.25,
         ([[3.75, -1.0], [2.75, 10.0]], [[2.25, -5.0], [-2.0, 1.0]]),
     )
示例#10
0
    def test_basic_infidelity_multiple_with_batching(self) -> None:
        input1 = torch.tensor([3.0] * 20)
        input2 = torch.tensor([1.0] * 20)
        expected = torch.zeros(20)

        infid1 = self.basic_model_assert(
            BasicModel2(),
            (input1, input2),
            expected,
            n_perturb_samples=5,
            max_batch_size=21,
        )
        infid2 = self.basic_model_assert(
            BasicModel2(),
            (input1, input2),
            expected,
            n_perturb_samples=5,
            max_batch_size=60,
        )
        assertArraysAlmostEqual(infid1, infid2, 0.01)
示例#11
0
    def test_basic_sensitivity_max_multiple(self) -> None:
        model = BasicModel2()
        sa = Saliency(model)

        input1 = torch.tensor([3.0] * 20)
        input2 = torch.tensor([1.0] * 20)
        self.sensitivity_max_assert(
            sa.attribute, (input1, input2), torch.zeros(20), max_examples_per_batch=21
        )
        self.sensitivity_max_assert(
            sa.attribute, (input1, input2), torch.zeros(20), max_examples_per_batch=60
        )
示例#12
0
    def test_basic_sensitivity_max_single(self) -> None:
        model = BasicModel2()
        sa = Saliency(model)

        input1 = torch.tensor([3.0])
        input2 = torch.tensor([1.0])
        self.sensitivity_max_assert(
            sa.attribute,
            (input1, input2),
            torch.zeros(1),
            perturb_func=default_perturb_func,
        )
示例#13
0
 def test_attack_label_tensor(self) -> None:
     model = BasicModel2()
     input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
     input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
     labels = torch.tensor([0, 1])
     self._FGSM_assert(
         model,
         (input1, input2),
         labels,
         0.1,
         ([[4.1, -1.0], [3.0, 10.1]], [[1.9, -5.0], [-2.0, 0.9]]),
         targeted=True,
     )
示例#14
0
 def test_gradient_target_int(self) -> None:
     model = BasicModel2()
     input1 = torch.tensor([[4.0, -1.0]], requires_grad=True)
     input2 = torch.tensor([[2.0, 5.0]], requires_grad=True)
     grads0 = compute_gradients(model, (input1, input2), target_ind=0)
     grads1 = compute_gradients(model, (input1, input2), target_ind=1)
     assertArraysAlmostEqual(grads0[0].squeeze(0).tolist(), [1.0, 0.0],
                             delta=0.01)
     assertArraysAlmostEqual(grads0[1].squeeze(0).tolist(), [-1.0, 0.0],
                             delta=0.01)
     assertArraysAlmostEqual(grads1[0].squeeze(0).tolist(), [0.0, 0.0],
                             delta=0.01)
     assertArraysAlmostEqual(grads1[1].squeeze(0).tolist(), [0.0, 0.0],
                             delta=0.01)
示例#15
0
 def test_attack_multiinput(self) -> None:
     model = BasicModel2()
     input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
     input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
     adv = PGD(model)
     perturbed_input = adv.perturb((input1, input2),
                                   0.25,
                                   0.1,
                                   3,
                                   0,
                                   norm="L2")
     answer = ([3.75, -1.0, 2.75, 10.0], [2.25, -5.0, -2.0, 1.0])
     for i in range(len(perturbed_input)):
         assertArraysAlmostEqual(torch.flatten(perturbed_input[i]).tolist(),
                                 answer[i],
                                 delta=0.01)