def test_relu_linear_deeplift_compare_inplace(self) -> None:
        model1 = ReLULinearModel(inplace=True)
        x1 = torch.tensor([[-10.0, 1.0, -5.0], [2.0, 3.0, 4.0]], requires_grad=True)
        x2 = torch.tensor([[3.0, 3.0, 1.0], [2.3, 5.0, 4.0]], requires_grad=True)
        inputs = (x1, x2)
        attributions1 = DeepLift(model1).attribute(inputs)

        model2 = ReLULinearModel()
        attributions2 = DeepLift(model2).attribute(inputs)
        assertTensorAlmostEqual(self, attributions1[0], attributions2[0])
        assertTensorAlmostEqual(self, attributions1[1], attributions2[1])
Exemple #2
0
    def test_linear_layer_deeplift_batch(self) -> None:
        model = ReLULinearModel(inplace=True)
        _, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing(
        )
        x1 = torch.tensor(
            [[-10.0, 1.0, -5.0], [-10.0, 1.0, -5.0], [-10.0, 1.0, -5.0]],
            requires_grad=True,
        )
        x2 = torch.tensor([[3.0, 3.0, 1.0], [3.0, 3.0, 1.0], [3.0, 3.0, 1.0]],
                          requires_grad=True)
        inputs = (x1, x2)

        layer_dl = LayerDeepLift(model, model.l3)
        attributions, delta = layer_dl.attribute(
            inputs,
            baselines,
            attribute_to_layer_input=True,
            return_convergence_delta=True,
        )
        assertTensorAlmostEqual(self, attributions[0], [[0.0, 15.0]])
        assert_delta(self, delta)

        attributions, delta = layer_dl.attribute(
            inputs,
            baselines,
            attribute_to_layer_input=False,
            return_convergence_delta=True,
        )
        assertTensorAlmostEqual(self, attributions, [[15.0]])
        assert_delta(self, delta)
    def test_linear_neuron_deeplift_shap_wo_inp_marginal_effects(self) -> None:
        model = ReLULinearModel()
        (
            inputs,
            baselines,
        ) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()

        neuron_dl = NeuronDeepLiftShap(model,
                                       model.l3,
                                       multiply_by_inputs=False)
        attributions = neuron_dl.attribute(inputs,
                                           0,
                                           baselines,
                                           attribute_to_neuron_input=False)

        assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
        assertTensorAlmostEqual(self, attributions[1], [[2.0, 3.0, 0.0]])

        attributions = neuron_dl.attribute(inputs,
                                           lambda x: x[:, 0],
                                           baselines,
                                           attribute_to_neuron_input=False)

        assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
        assertTensorAlmostEqual(self, attributions[1], [[2.0, 3.0, 0.0]])
Exemple #4
0
 def test_relu_deeplift_with_custom_attr_func(self) -> None:
     model = ReLULinearModel()
     inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing(
     )
     attr_method = LayerDeepLift(model, model.l3)
     self._relu_custom_attr_func_assert(attr_method, inputs, baselines,
                                        [[2.0]])
 def test_relu_deeplift_with_custom_attr_func(self) -> None:
     model = ReLULinearModel()
     inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing(
     )
     neuron_dl = NeuronDeepLift(model, model.l3)
     expected = ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0])
     self._relu_custom_attr_func_assert(neuron_dl, inputs, baselines,
                                        expected)
    def test_relu_linear_deeplift_batch(self) -> None:
        model = ReLULinearModel(inplace=True)
        x1 = torch.tensor([[-10.0, 1.0, -5.0], [2.0, 3.0, 4.0]], requires_grad=True)
        x2 = torch.tensor([[3.0, 3.0, 1.0], [2.3, 5.0, 4.0]], requires_grad=True)

        inputs = (x1, x2)
        baselines = (torch.zeros(1, 3), torch.rand(1, 3) * 0.001)
        # expected = [[[0.0, 0.0]], [[6.0, 2.0]]]
        self._deeplift_assert(model, DeepLift(model), inputs, baselines)
    def test_deeplift_compare_with_and_without_inplace(self) -> None:
        model1 = ReLULinearModel(inplace=True)
        model2 = ReLULinearModel()
        x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
        x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)
        inputs = (x1, x2)
        neuron_dl1 = NeuronDeepLift(model1, model1.relu)
        attributions1 = neuron_dl1.attribute(inputs,
                                             0,
                                             attribute_to_neuron_input=False)

        neuron_dl2 = NeuronDeepLift(model2, model2.relu)
        attributions2 = neuron_dl2.attribute(inputs,
                                             0,
                                             attribute_to_neuron_input=False)

        assertTensorAlmostEqual(self, attributions1[0], attributions2[0])
        assertTensorAlmostEqual(self, attributions1[1], attributions2[1])
    def test_relu_linear_deeplift(self) -> None:
        model = ReLULinearModel(inplace=False)
        x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
        x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)

        inputs = (x1, x2)
        baselines = (0, 0.0001)

        # expected = [[[0.0, 0.0]], [[6.0, 2.0]]]
        self._deeplift_assert(model, DeepLift(model), inputs, baselines)
 def test_relu_deepliftshap_with_custom_attr_func(self) -> None:
     model = ReLULinearModel()
     (
         inputs,
         baselines,
     ) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
     neuron_dl = NeuronDeepLiftShap(model, model.l3)
     expected = (torch.zeros(3, 3), torch.zeros(3, 3))
     self._relu_custom_attr_func_assert(neuron_dl, inputs, baselines,
                                        expected)
Exemple #10
0
    def test_relu_layer_deeplift_wo_mutliplying_by_inputs(self) -> None:
        model = ReLULinearModel(inplace=True)
        inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing(
        )

        layer_dl = LayerDeepLift(model, model.relu, multiply_by_inputs=False)
        attributions = layer_dl.attribute(
            inputs,
            baselines,
            attribute_to_layer_input=True,
        )
        assertTensorAlmostEqual(self, attributions[0], [[0.0, 1.0]])
Exemple #11
0
    def test_linear_layer_deeplift(self) -> None:
        model = ReLULinearModel(inplace=True)
        inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing(
        )

        layer_dl = LayerDeepLift(model, model.l3)
        attributions, delta = layer_dl.attribute(
            inputs,
            baselines,
            attribute_to_layer_input=True,
            return_convergence_delta=True,
        )
        assertTensorAlmostEqual(self, attributions[0], [[0.0, 15.0]])
        assert_delta(self, delta)
    def test_relu_neuron_deeplift(self) -> None:
        model = ReLULinearModel(inplace=True)

        x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
        x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)

        inputs = (x1, x2)

        neuron_dl = NeuronDeepLift(model, model.relu)
        attributions = neuron_dl.attribute(inputs,
                                           0,
                                           attribute_to_neuron_input=False)
        assertTensorAlmostEqual(self, attributions[0], [[0.0, 0.0, 0.0]])
        assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])
Exemple #13
0
    def test_relu_layer_deeplift_add_args(self) -> None:
        model = ReLULinearModel()
        inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing(
        )

        layer_dl = LayerDeepLift(model, model.relu)
        attributions, delta = layer_dl.attribute(
            inputs,
            baselines,
            additional_forward_args=3.0,
            attribute_to_layer_input=True,
            return_convergence_delta=True,
        )
        assertTensorAlmostEqual(self, attributions[0], [[0.0, 45.0]])
        assert_delta(self, delta)
Exemple #14
0
 def test_relu_layer_deepliftshap(self) -> None:
     model = ReLULinearModel()
     (
         inputs,
         baselines,
     ) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
     layer_dl_shap = LayerDeepLiftShap(model, model.relu)
     attributions, delta = layer_dl_shap.attribute(
         inputs,
         baselines,
         attribute_to_layer_input=True,
         return_convergence_delta=True,
     )
     assertTensorAlmostEqual(self, attributions[0], [[0.0, 15.0]])
     assert_delta(self, delta)
    def test_relu_neuron_deeplift_shap(self) -> None:
        model = ReLULinearModel()
        (
            inputs,
            baselines,
        ) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()

        neuron_dl = NeuronDeepLiftShap(model, model.relu)

        attributions = neuron_dl.attribute(inputs,
                                           0,
                                           baselines,
                                           attribute_to_neuron_input=False)
        assertTensorAlmostEqual(self, attributions[0], [[0.0, 0.0, 0.0]])
        assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])
Exemple #16
0
    def test_relu_deepliftshap_baselines_as_func(self) -> None:
        model = ReLULinearModel(inplace=False)
        x1 = torch.tensor([[-10.0, 1.0, -5.0]])
        x2 = torch.tensor([[3.0, 3.0, 1.0]])

        def gen_baselines() -> Tuple[Tensor, ...]:
            b1 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
            b2 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
            return (b1, b2)

        def gen_baselines_scalar() -> Tuple[float, ...]:
            return (0.0, 0.0001)

        def gen_baselines_with_inputs(
                inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:
            b1 = torch.cat([inputs[0], inputs[0] - 10])
            b2 = torch.cat([inputs[1], inputs[1] - 10])
            return (b1, b2)

        def gen_baselines_returns_array() -> Tuple[List[List[float]], ...]:
            b1 = [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]
            b2 = [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]
            return (b1, b2)

        inputs = (x1, x2)

        dl_shap = DeepLiftShap(model)
        self._deeplift_assert(model, dl_shap, inputs, gen_baselines)
        self._deeplift_assert(model, dl_shap, inputs,
                              gen_baselines_with_inputs)
        with self.assertRaises(AssertionError):
            self._deeplift_assert(model, DeepLiftShap(model), inputs,
                                  gen_baselines_returns_array)
        with self.assertRaises(AssertionError):
            self._deeplift_assert(model, dl_shap, inputs, gen_baselines_scalar)

        baselines = gen_baselines()
        attributions = dl_shap.attribute(inputs, baselines)
        attributions_with_func = dl_shap.attribute(inputs, gen_baselines)
        assertTensorAlmostEqual(self, attributions[0],
                                attributions_with_func[0])
        assertTensorAlmostEqual(self, attributions[1],
                                attributions_with_func[1])
    def test_linear_neuron_deeplift(self) -> None:
        model = ReLULinearModel()
        inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing(
        )

        neuron_dl = NeuronDeepLift(model, model.l3)
        attributions = neuron_dl.attribute(inputs,
                                           0,
                                           baselines,
                                           attribute_to_neuron_input=True)
        assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
        assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])

        attributions = neuron_dl.attribute(inputs,
                                           0,
                                           baselines,
                                           attribute_to_neuron_input=False)
        self.assertTrue(neuron_dl.multiplies_by_inputs)
        assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])
        assertTensorAlmostEqual(self, attributions[1], [[6.0, 9.0, 0.0]])
    def test_relu_deepliftshap_with_custom_attr_func(self) -> None:
        def custom_attr_func(
            multipliers: Tuple[Tensor, ...],
            inputs: Tuple[Tensor, ...],
            baselines: Tuple[Tensor, ...],
        ) -> Tuple[Tensor, ...]:
            return tuple(multiplier * 0.0 for multiplier in multipliers)

        model = ReLULinearModel(inplace=True)
        x1 = torch.tensor([[-10.0, 1.0, -5.0]])
        x2 = torch.tensor([[3.0, 3.0, 1.0]])
        b1 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
        b2 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
        inputs = (x1, x2)
        baselines = (b1, b2)
        dls = DeepLiftShap(model)
        attr_w_func = dls.attribute(
            inputs, baselines, custom_attribution_func=custom_attr_func
        )

        assertTensorAlmostEqual(self, attr_w_func[0], [[0.0, 0.0, 0.0]], 0.0)
        assertTensorAlmostEqual(self, attr_w_func[1], [[0.0, 0.0, 0.0]], 0.0)
Exemple #19
0
 },
 {
     "name": "conv_occlusion_with_perturbations_per_eval",
     "algorithms": [Occlusion],
     "model": BasicModel_ConvNet(),
     "attribute_args": {
         "inputs": torch.arange(400).view(4, 1, 10, 10).float(),
         "perturbations_per_eval": 8,
         "sliding_window_shapes": (1, 4, 2),
         "target": 0,
     },
 },
 {
     "name": "basic_multi_input_with_perturbations_per_eval_occlusion",
     "algorithms": [Occlusion],
     "model": ReLULinearModel(),
     "attribute_args": {
         "inputs": (torch.randn(4, 3), torch.randn(4, 3)),
         "perturbations_per_eval": 2,
         "sliding_window_shapes": ((2,), (1,)),
     },
 },
 {
     "name": "basic_multiple_tuple_target_occlusion",
     "algorithms": [Occlusion],
     "model": BasicModel_MultiLayer(),
     "attribute_args": {
         "inputs": torch.randn(4, 3),
         "target": [(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
         "additional_forward_args": (None, True),
         "sliding_window_shapes": (2,),