Пример #1
0
 def _internal_influence_test_assert(
     self,
     model: Module,
     target_layer: Module,
     test_input: Union[Tensor, Tuple[Tensor, ...]],
     expected_activation: Union[
         float,
         List[List[float]],
         Tuple[List[float], ...],
         Tuple[List[List[float]], ...],
     ],
     baseline: BaselineType = None,
     additional_args: Any = None,
     attribute_to_layer_input: bool = False,
 ):
     for internal_batch_size in [None, 5, 20]:
         int_inf = InternalInfluence(model, target_layer)
         self.assertFalse(int_inf.multiplies_by_inputs)
         attributions = int_inf.attribute(
             test_input,
             baselines=baseline,
             target=0,
             n_steps=500,
             method="riemann_trapezoid",
             additional_forward_args=additional_args,
             internal_batch_size=internal_batch_size,
             attribute_to_layer_input=attribute_to_layer_input,
         )
         assertTensorTuplesAlmostEqual(
             self, attributions, expected_activation, delta=0.01, mode="max"
         )
Пример #2
0
    def _conductance_test_assert(
        self,
        model: Module,
        target_layer: Module,
        test_input: Union[Tensor, Tuple[Tensor, ...]],
        expected_conductance: Union[List[List[float]], Tuple[List[List[float]], ...]],
        baselines: BaselineType = None,
        additional_args: Any = None,
    ) -> None:
        cond = LayerConductance(model, target_layer)
        self.assertTrue(cond.multiplies_by_inputs)
        for internal_batch_size in (None, 4, 20):
            attributions, delta = cond.attribute(
                test_input,
                baselines=baselines,
                target=0,
                n_steps=500,
                method="gausslegendre",
                additional_forward_args=additional_args,
                internal_batch_size=internal_batch_size,
                return_convergence_delta=True,
            )
            delta_condition = all(abs(delta.numpy().flatten()) < 0.01)
            self.assertTrue(
                delta_condition,
                "Sum of attributions does {}"
                " not match the difference of endpoints.".format(delta),
            )

            assertTensorTuplesAlmostEqual(
                self, attributions, expected_conductance, delta=0.1
            )
 def test_multiple_tensors_compare_with_exp_wo_mult_by_inputs(self) -> None:
     net = BasicModel_MultiLayer(multi_input_module=True)
     inp = torch.tensor([[0.0, 100.0, 0.0]])
     base = torch.tensor([[0.0, 0.0, 0.0]])
     target_layer = net.multi_relu
     layer_ig = LayerIntegratedGradients(net, target_layer)
     layer_ig_wo_mult_by_inputs = LayerIntegratedGradients(
         net, target_layer, multiply_by_inputs=False
     )
     layer_act = LayerActivation(net, target_layer)
     attributions = layer_ig.attribute(inp, target=0)
     attributions_wo_mult_by_inputs = layer_ig_wo_mult_by_inputs.attribute(
         inp, target=0
     )
     inp_minus_baseline_activ = tuple(
         inp_act - base_act
         for inp_act, base_act in zip(
             layer_act.attribute(inp), layer_act.attribute(base)
         )
     )
     assertTensorTuplesAlmostEqual(
         self,
         tuple(
             attr_wo_mult * inp_min_base
             for attr_wo_mult, inp_min_base in zip(
                 attributions_wo_mult_by_inputs, inp_minus_baseline_activ
             )
         ),
         attributions,
     )
 def _ig_input_test_assert(
     self,
     model: Module,
     target_layer: Module,
     test_input: TensorOrTupleOfTensorsGeneric,
     test_neuron: Union[int, Tuple[Union[int, slice], ...], Callable],
     expected_input_ig: Union[List[float], Tuple[List[List[float]], ...]],
     additional_input: Any = None,
     multiply_by_inputs: bool = True,
 ) -> None:
     for internal_batch_size in [None, 5, 20]:
         grad = NeuronIntegratedGradients(
             model, target_layer, multiply_by_inputs=multiply_by_inputs)
         self.assertEquals(grad.multiplies_by_inputs, multiply_by_inputs)
         attributions = grad.attribute(
             test_input,
             test_neuron,
             n_steps=200,
             method="gausslegendre",
             additional_forward_args=additional_input,
             internal_batch_size=internal_batch_size,
         )
         assertTensorTuplesAlmostEqual(self,
                                       attributions,
                                       expected_input_ig,
                                       delta=0.1)
Пример #5
0
 def test_neuron_index_deprecated_warning(self) -> None:
     net = BasicModel_MultiLayer()
     grad = NeuronGradient(net, net.linear2)
     inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)
     with self.assertWarns(DeprecationWarning):
         attributions = grad.attribute(
             inp,
             neuron_index=(0, ),
         )
     assertTensorTuplesAlmostEqual(self, attributions, [4.0, 4.0, 4.0])
Пример #6
0
 def test_saliency_test_basic_multivar_sg_n_samples_batch_size_3(self) -> None:
     attributions_batch_size = self._saliency_base_assert(
         *_get_multiargs_basic_config_large(),
         nt_type="smoothgrad_sq",
         n_samples_batch_size=3,
     )
     attributions = self._saliency_base_assert(
         *_get_multiargs_basic_config_large(),
         nt_type="smoothgrad_sq",
     )
     assertTensorTuplesAlmostEqual(self, attributions_batch_size, attributions)
Пример #7
0
    def _kernel_shap_test_assert(
        self,
        model: Callable,
        test_input: TensorOrTupleOfTensorsGeneric,
        expected_attr,
        feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
        additional_input: Any = None,
        perturbations_per_eval: Tuple[int, ...] = (1, ),
        baselines: BaselineType = None,
        target: Union[None, int] = 0,
        n_samples: int = 100,
        delta: float = 1.0,
        expected_coefs: Union[None, List[float], List[List[float]]] = None,
        show_progress: bool = False,
    ) -> None:
        for batch_size in perturbations_per_eval:
            kernel_shap = KernelShap(model)
            attributions = kernel_shap.attribute(
                test_input,
                target=target,
                feature_mask=feature_mask,
                additional_forward_args=additional_input,
                baselines=baselines,
                perturbations_per_eval=batch_size,
                n_samples=n_samples,
                show_progress=show_progress,
            )

            assertTensorTuplesAlmostEqual(self,
                                          attributions,
                                          expected_attr,
                                          delta=delta,
                                          mode="max")

            if expected_coefs is not None:
                # Test with return_input_shape = False
                attributions = kernel_shap.attribute(
                    test_input,
                    target=target,
                    feature_mask=feature_mask,
                    additional_forward_args=additional_input,
                    baselines=baselines,
                    perturbations_per_eval=batch_size,
                    n_samples=n_samples,
                    return_input_shape=False,
                    show_progress=show_progress,
                )
                assertTensorAlmostEqual(self,
                                        attributions,
                                        expected_coefs,
                                        delta=delta,
                                        mode="max")
 def _assert_compare_with_expected(
     self,
     model: Module,
     target_layer: Module,
     test_input: Union[Tensor, Tuple[Tensor, ...]],
     expected_ig: Tuple[List[List[float]], ...],
     additional_input: Any = None,
 ):
     layer_ig = LayerIntegratedGradients(model, target_layer)
     attributions = layer_ig.attribute(
         test_input, target=0, additional_forward_args=additional_input
     )
     assertTensorTuplesAlmostEqual(self, attributions, expected_ig, delta=0.01)
Пример #9
0
 def _shapley_test_assert(
     self,
     model: Callable,
     test_input: TensorOrTupleOfTensorsGeneric,
     expected_attr,
     feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
     additional_input: Any = None,
     perturbations_per_eval: Tuple[int, ...] = (1, ),
     baselines: BaselineType = None,
     target: Union[None, int] = 0,
     n_samples: int = 100,
     delta: float = 1.0,
     test_true_shapley: bool = True,
     show_progress: bool = False,
 ) -> None:
     for batch_size in perturbations_per_eval:
         shapley_samp = ShapleyValueSampling(model)
         attributions = shapley_samp.attribute(
             test_input,
             target=target,
             feature_mask=feature_mask,
             additional_forward_args=additional_input,
             baselines=baselines,
             perturbations_per_eval=batch_size,
             n_samples=n_samples,
             show_progress=show_progress,
         )
         assertTensorTuplesAlmostEqual(self,
                                       attributions,
                                       expected_attr,
                                       delta=delta,
                                       mode="max")
         if test_true_shapley:
             shapley_val = ShapleyValues(model)
             attributions = shapley_val.attribute(
                 test_input,
                 target=target,
                 feature_mask=feature_mask,
                 additional_forward_args=additional_input,
                 baselines=baselines,
                 perturbations_per_eval=batch_size,
                 show_progress=show_progress,
             )
             assertTensorTuplesAlmostEqual(self,
                                           attributions,
                                           expected_attr,
                                           mode="max",
                                           delta=0.001)
Пример #10
0
    def test_multiple_layers_multiple_inputs_shared_input(self) -> None:
        input1 = torch.randn(5, 3)
        input2 = torch.randn(5, 3)
        input3 = torch.randn(5, 3)
        inputs = (input1, input2, input3)
        baseline = tuple(torch.zeros_like(inp) for inp in inputs)

        net = BasicModel_MultiLayer_TrueMultiInput()

        lig = LayerIntegratedGradients(net, layer=[net.m1, net.m234])
        ig = IntegratedGradients(net)

        # test layer inputs
        attribs_inputs = lig.attribute(inputs,
                                       baseline,
                                       target=0,
                                       attribute_to_layer_input=True)
        attribs_inputs_regular_ig = ig.attribute(inputs, baseline, target=0)

        self.assertIsInstance(attribs_inputs, list)
        self.assertEqual(len(attribs_inputs), 2)
        self.assertIsInstance(attribs_inputs[0], Tensor)
        self.assertIsInstance(attribs_inputs[1], tuple)
        self.assertEqual(len(attribs_inputs[1]), 3)

        assertTensorTuplesAlmostEqual(
            self,
            # last input for second layer is first input =>
            # add the attributions
            (
                attribs_inputs[0] + attribs_inputs[1][-1], ) +
            attribs_inputs[1][0:-1],
            attribs_inputs_regular_ig,
            delta=1e-5,
        )

        # test layer outputs
        attribs = lig.attribute(inputs, baseline, target=0)
        ig = IntegratedGradients(lambda x, y: x + y)
        attribs_ig = ig.attribute(
            (net.m1(input1), net.m234(input2, input3, input1, 1)),
            (net.m1(baseline[0]),
             net.m234(baseline[1], baseline[2], baseline[1], 1)),
            target=0,
        )

        assertTensorTuplesAlmostEqual(self, attribs, attribs_ig, delta=1e-5)
Пример #11
0
    def test_relu_layer_deeplift_multiple_output(self) -> None:
        model = BasicModel_MultiLayer(multi_input_module=True)
        inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing(
        )

        layer_dl = LayerDeepLift(model, model.multi_relu)
        attributions, delta = layer_dl.attribute(
            inputs[0],
            baselines[0],
            target=0,
            attribute_to_layer_input=False,
            return_convergence_delta=True,
        )
        assertTensorTuplesAlmostEqual(
            self, attributions,
            ([[0.0, -1.0, -1.0, -1.0]], [[0.0, -1.0, -1.0, -1.0]]))
        assert_delta(self, delta)
Пример #12
0
    def test_multiple_layers_multiple_input_outputs(self) -> None:
        # test with multiple layers, where one layer accepts multiple inputs
        input1 = torch.randn(5, 3)
        input2 = torch.randn(5, 3)
        input3 = torch.randn(5, 3)
        input4 = torch.randn(5, 3)
        inputs = (input1, input2, input3, input4)
        baseline = tuple(torch.zeros_like(inp) for inp in inputs)

        net = BasicModel_MultiLayer_TrueMultiInput()

        lig = LayerIntegratedGradients(net, layer=[net.m1, net.m234])
        ig = IntegratedGradients(net)

        # test layer inputs
        attribs_inputs = lig.attribute(inputs,
                                       baseline,
                                       target=0,
                                       attribute_to_layer_input=True)
        attribs_inputs_regular_ig = ig.attribute(inputs, baseline, target=0)

        self.assertIsInstance(attribs_inputs, list)
        self.assertEqual(len(attribs_inputs), 2)
        self.assertIsInstance(attribs_inputs[0], Tensor)
        self.assertIsInstance(attribs_inputs[1], tuple)
        self.assertEqual(len(attribs_inputs[1]), 3)

        assertTensorTuplesAlmostEqual(
            self,
            (attribs_inputs[0], ) + attribs_inputs[1],
            attribs_inputs_regular_ig,
            delta=1e-7,
        )

        # test layer outputs
        attribs = lig.attribute(inputs, baseline, target=0)
        ig = IntegratedGradients(lambda x, y: x + y)
        attribs_ig = ig.attribute(
            (net.m1(input1), net.m234(input2, input3, input4, 1)),
            (net.m1(baseline[0]),
             net.m234(baseline[1], baseline[2], baseline[3], 1)),
            target=0,
        )

        assertTensorTuplesAlmostEqual(self, attribs, attribs_ig, delta=1e-7)
Пример #13
0
    def layer_method_with_input_layer_patches(
        self,
        layer_method_class: Callable,
        equiv_method_class: Callable,
        multi_layer: bool,
    ) -> None:
        model = BasicModel_MultiLayer_TrueMultiInput(
        ) if multi_layer else BasicModel()

        input_names = ["x1", "x2", "x3", "x4"] if multi_layer else ["input"]
        model = ModelInputWrapper(model)

        layers = [model.input_maps[inp] for inp in input_names]
        layer_method = layer_method_class(
            model, layer=layers if multi_layer else layers[0])
        equivalent_method = equiv_method_class(model)

        inputs = tuple(torch.rand(5, 3) for _ in input_names)
        baseline = tuple(torch.zeros(5, 3) for _ in input_names)

        args = inspect.getfullargspec(
            equivalent_method.attribute.__wrapped__).args

        args_to_use = [inputs]
        if "baselines" in args:
            args_to_use += [baseline]

        a1 = layer_method.attribute(*args_to_use, target=0)
        a2 = layer_method.attribute(*args_to_use,
                                    target=0,
                                    attribute_to_layer_input=True)

        real_attributions = equivalent_method.attribute(*args_to_use, target=0)

        if not isinstance(a1, tuple):
            a1 = (a1, )
            a2 = (a2, )

        if not isinstance(real_attributions, tuple):
            real_attributions = (real_attributions, )

        assertTensorTuplesAlmostEqual(self, a1, a2)
        assertTensorTuplesAlmostEqual(self, a1, real_attributions)
Пример #14
0
 def _layer_activation_test_assert(
     self,
     model: Module,
     target_layer: Module,
     test_input: Union[Tensor, Tuple[Tensor, ...]],
     expected_activation: Union[List[float], Tuple[List[float], ...]],
     additional_input: Any = None,
     attribute_to_layer_input: bool = False,
 ):
     layer_act = LayerActivation(model, target_layer)
     self.assertTrue(layer_act.multiplies_by_inputs)
     attributions = layer_act.attribute(
         test_input,
         additional_forward_args=additional_input,
         attribute_to_layer_input=attribute_to_layer_input,
     )
     assertTensorTuplesAlmostEqual(self,
                                   attributions,
                                   expected_activation,
                                   delta=0.01)
Пример #15
0
 def _gradient_input_test_assert(
     self,
     model: Module,
     target_layer: Module,
     test_input: TensorOrTupleOfTensorsGeneric,
     test_neuron_selector: Union[int, Tuple[Union[int, slice], ...],
                                 Callable],
     expected_input_gradient: Union[List[float], Tuple[List[float], ...]],
     additional_input: Any = None,
     attribute_to_neuron_input: bool = False,
 ) -> None:
     grad = NeuronGradient(model, target_layer)
     attributions = grad.attribute(
         test_input,
         test_neuron_selector,
         additional_forward_args=additional_input,
         attribute_to_neuron_input=attribute_to_neuron_input,
     )
     assertTensorTuplesAlmostEqual(self, attributions,
                                   expected_input_gradient)
Пример #16
0
 def _layer_activation_test_assert(
     self,
     model: Module,
     target_layer: ModuleOrModuleList,
     test_input: Union[Tensor, Tuple[Tensor, ...]],
     expected_activation: Union[List, Tuple[List[float], ...]],
     additional_input: Any = None,
 ) -> None:
     layer_act = LayerGradientXActivation(model, target_layer)
     self.assertTrue(layer_act.multiplies_by_inputs)
     attributions = layer_act.attribute(
         test_input, target=0, additional_forward_args=additional_input)
     if isinstance(target_layer, Module):
         assertTensorTuplesAlmostEqual(self,
                                       attributions,
                                       expected_activation,
                                       delta=0.01)
     else:
         for i in range(len(target_layer)):
             assertTensorTuplesAlmostEqual(self,
                                           attributions[i],
                                           expected_activation[i],
                                           delta=0.01)
     # test Layer Gradient without multiplying with activations
     layer_grads = LayerGradientXActivation(model,
                                            target_layer,
                                            multiply_by_inputs=False)
     layer_act = LayerActivation(model, target_layer)
     self.assertFalse(layer_grads.multiplies_by_inputs)
     grads = layer_grads.attribute(test_input,
                                   target=0,
                                   additional_forward_args=additional_input)
     acts = layer_act.attribute(test_input,
                                additional_forward_args=additional_input)
     if isinstance(target_layer, Module):
         assertTensorTuplesAlmostEqual(
             self,
             attributions,
             tuple(act * grad for act, grad in zip(acts, grads)),
             delta=0.01,
         )
     else:
         for i in range(len(target_layer)):
             assertTensorTuplesAlmostEqual(
                 self,
                 attributions[i],
                 tuple(act * grad for act, grad in zip(acts[i], grads[i])),
                 delta=0.01,
             )
Пример #17
0
 def _assert_attributions(
     self,
     model: Module,
     layer: Module,
     inputs: TensorOrTupleOfTensorsGeneric,
     baselines: Union[TensorOrTupleOfTensorsGeneric, Callable],
     target: TargetType,
     expected: Union[Tensor, Tuple[Tensor, ...], List[float],
                     List[List[float]], Tuple[List[float], ...],
                     Tuple[List[List[float]], ...], ],
     expected_delta: Tensor = None,
     n_samples: int = 5,
     attribute_to_layer_input: bool = False,
     add_args: Any = None,
 ) -> None:
     lgs = LayerGradientShap(model, layer)
     attrs, delta = lgs.attribute(
         inputs,
         baselines,
         target=target,
         additional_forward_args=add_args,
         n_samples=n_samples,
         stdevs=0.0009,
         return_convergence_delta=True,
         attribute_to_layer_input=attribute_to_layer_input,
     )
     assertTensorTuplesAlmostEqual(self, attrs, expected, delta=0.005)
     if expected_delta is None:
         _assert_attribution_delta(self, inputs, attrs, n_samples, delta,
                                   True)
     else:
         for delta_i, expected_delta_i in zip(delta, expected_delta):
             assertTensorAlmostEqual(self,
                                     delta_i,
                                     expected_delta_i,
                                     delta=0.01)
Пример #18
0
 def _grad_cam_test_assert(
     self,
     model: Module,
     target_layer: Module,
     test_input: Union[Tensor, Tuple[Tensor, ...]],
     expected_activation: Union[List[float], Tuple[List[float], ...],
                                List[List[float]], Tuple[Tensor, ...]],
     additional_input: Any = None,
     attribute_to_layer_input: bool = False,
     relu_attributions: bool = False,
 ):
     layer_gc = LayerGradCam(model, target_layer)
     self.assertFalse(layer_gc.multiplies_by_inputs)
     attributions = layer_gc.attribute(
         test_input,
         target=0,
         additional_forward_args=additional_input,
         attribute_to_layer_input=attribute_to_layer_input,
         relu_attributions=relu_attributions,
     )
     assertTensorTuplesAlmostEqual(self,
                                   attributions,
                                   expected_activation,
                                   delta=0.01)
Пример #19
0
    def _lime_test_assert(
        self,
        model: Callable,
        test_input: TensorOrTupleOfTensorsGeneric,
        expected_attr,
        expected_coefs_only=None,
        feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
        additional_input: Any = None,
        perturbations_per_eval: Tuple[int, ...] = (1, ),
        baselines: BaselineType = None,
        target: Union[None, int] = 0,
        n_perturb_samples: int = 100,
        alpha: float = 1.0,
        delta: float = 1.0,
        batch_attr: bool = False,
    ) -> None:
        for batch_size in perturbations_per_eval:
            lime = Lime(
                model,
                similarity_func=get_exp_kernel_similarity_function(
                    "cosine", 10.0),
            )
            attributions = lime.attribute(
                test_input,
                target=target,
                feature_mask=feature_mask,
                additional_forward_args=additional_input,
                baselines=baselines,
                perturbations_per_eval=batch_size,
                n_perturb_samples=n_perturb_samples,
                alpha=alpha,
            )
            assertTensorTuplesAlmostEqual(self,
                                          attributions,
                                          expected_attr,
                                          delta=delta,
                                          mode="max")
            if expected_coefs_only is not None:
                # Test with return_input_shape = False
                attributions = lime.attribute(
                    test_input,
                    target=target,
                    feature_mask=feature_mask,
                    additional_forward_args=additional_input,
                    baselines=baselines,
                    perturbations_per_eval=batch_size,
                    n_perturb_samples=n_perturb_samples,
                    alpha=alpha,
                    return_input_shape=False,
                )
                assertTensorAlmostEqual(self,
                                        attributions,
                                        expected_coefs_only,
                                        delta=delta,
                                        mode="max")

                lime_alt = LimeBase(
                    model,
                    lasso_interpretable_model_trainer,
                    get_exp_kernel_similarity_function("euclidean", 1000.0),
                    alt_perturb_func,
                    False,
                    None,
                    alt_to_interp_rep,
                )

                # Test with equivalent sampling in original input space
                formatted_inputs, baselines = _format_input_baseline(
                    test_input, baselines)
                if feature_mask is None:
                    (
                        formatted_feature_mask,
                        num_interp_features,
                    ) = _construct_default_feature_mask(formatted_inputs)
                else:
                    formatted_feature_mask = _format_input(feature_mask)
                    num_interp_features = int(
                        max(
                            torch.max(single_inp).item()
                            for single_inp in feature_mask) + 1)
                if batch_attr:
                    attributions = lime_alt.attribute(
                        test_input,
                        target=target,
                        feature_mask=formatted_feature_mask if isinstance(
                            test_input, tuple) else formatted_feature_mask[0],
                        additional_forward_args=additional_input,
                        baselines=baselines,
                        perturbations_per_eval=batch_size,
                        n_perturb_samples=n_perturb_samples,
                        alpha=alpha,
                        num_interp_features=num_interp_features,
                    )
                    assertTensorAlmostEqual(self,
                                            attributions,
                                            expected_coefs_only,
                                            delta=delta,
                                            mode="max")
                    return

                bsz = formatted_inputs[0].shape[0]
                for (
                        curr_inps,
                        curr_target,
                        curr_additional_args,
                        curr_baselines,
                        curr_feature_mask,
                        expected_coef_single,
                ) in _batch_example_iterator(
                        bsz,
                        test_input,
                        target,
                        additional_input,
                        baselines
                        if isinstance(test_input, tuple) else baselines[0],
                        formatted_feature_mask if isinstance(
                            test_input, tuple) else formatted_feature_mask[0],
                        expected_coefs_only,
                ):
                    attributions = lime_alt.attribute(
                        curr_inps,
                        target=curr_target,
                        feature_mask=curr_feature_mask,
                        additional_forward_args=curr_additional_args,
                        baselines=curr_baselines,
                        perturbations_per_eval=batch_size,
                        n_perturb_samples=n_perturb_samples,
                        alpha=alpha,
                        num_interp_features=num_interp_features,
                    )
                    assertTensorAlmostEqual(
                        self,
                        attributions,
                        expected_coef_single,
                        delta=delta,
                        mode="max",
                    )
Пример #20
0
        def jit_test_assert(self) -> None:
            model_1 = model
            attr_args = args
            if (mode is JITCompareMode.data_parallel_jit_trace
                    or JITCompareMode.data_parallel_jit_script):
                if not torch.cuda.is_available() or torch.cuda.device_count(
                ) == 0:
                    raise unittest.SkipTest(
                        "Skipping GPU test since CUDA not available.")
                # Construct cuda_args, moving all tensor inputs in args to CUDA device
                cuda_args = {}
                for key in args:
                    if isinstance(args[key], Tensor):
                        cuda_args[key] = args[key].cuda()
                    elif isinstance(args[key], tuple):
                        cuda_args[key] = tuple(
                            elem.cuda() if isinstance(elem, Tensor) else elem
                            for elem in args[key])
                    else:
                        cuda_args[key] = args[key]
                attr_args = cuda_args
                model_1 = model_1.cuda()

            # Initialize models based on JITCompareMode
            if (mode is JITCompareMode.cpu_jit_script
                    or JITCompareMode.data_parallel_jit_script):
                model_2 = torch.jit.script(model_1)  # type: ignore
            elif (mode is JITCompareMode.cpu_jit_trace
                  or JITCompareMode.data_parallel_jit_trace):
                all_inps = _format_input(args["inputs"]) + (
                    _format_additional_forward_args(
                        args["additional_forward_args"])
                    if "additional_forward_args" in args and
                    args["additional_forward_args"] is not None else tuple())
                model_2 = torch.jit.trace(model_1, all_inps)  # type: ignore
            else:
                raise AssertionError("JIT compare mode type is not valid.")

            attr_method_1 = algorithm(model_1)
            attr_method_2 = algorithm(model_2)

            if noise_tunnel:
                attr_method_1 = NoiseTunnel(attr_method_1)
                attr_method_2 = NoiseTunnel(attr_method_2)
            if attr_method_1.has_convergence_delta():
                attributions_1, delta_1 = attr_method_1.attribute(
                    return_convergence_delta=True, **attr_args)
                self.setUp()
                attributions_2, delta_2 = attr_method_2.attribute(
                    return_convergence_delta=True, **attr_args)
                assertTensorTuplesAlmostEqual(self,
                                              attributions_1,
                                              attributions_2,
                                              mode="max")
                assertTensorTuplesAlmostEqual(self,
                                              delta_1,
                                              delta_2,
                                              mode="max")
            else:
                attributions_1 = attr_method_1.attribute(**attr_args)
                self.setUp()
                attributions_2 = attr_method_2.attribute(**attr_args)
                assertTensorTuplesAlmostEqual(self,
                                              attributions_1,
                                              attributions_2,
                                              mode="max")
Пример #21
0
        def target_test_assert(self) -> None:
            attr_method: Attribution
            if target_layer:
                internal_algorithm = cast(Type[InternalAttribution], algorithm)
                attr_method = internal_algorithm(model, target_layer)
            else:
                attr_method = algorithm(model)

            if noise_tunnel:
                attr_method = NoiseTunnel(attr_method)
            attributions_orig = attr_method.attribute(**args)
            self.setUp()
            for i in range(num_examples):
                args["target"] = (original_targets[i] if len(original_targets)
                                  == num_examples else original_targets)
                args["inputs"] = (original_inputs[i:i + 1] if isinstance(
                    original_inputs, Tensor) else tuple(
                        original_inp[i:i + 1]
                        for original_inp in original_inputs))
                if original_additional_forward_args is not None:
                    args["additional_forward_args"] = tuple(
                        single_add_arg[i:i + 1] if isinstance(
                            single_add_arg, Tensor) else single_add_arg
                        for single_add_arg in original_additional_forward_args)
                if replace_baselines:
                    if isinstance(original_inputs, Tensor):
                        args["baselines"] = original_baselines[i:i + 1]
                    elif isinstance(original_baselines, tuple):
                        args["baselines"] = tuple(
                            single_baseline[i:i + 1] if isinstance(
                                single_baseline, Tensor) else single_baseline
                            for single_baseline in original_baselines)
                # Since Lime methods compute attributions for a batch
                # sequentially, random seed should not be reset after
                # each example after the first.
                if not issubclass(algorithm, Lime):
                    self.setUp()
                single_attr = attr_method.attribute(**args)
                current_orig_attributions = (
                    attributions_orig[i:i + 1] if isinstance(
                        attributions_orig, Tensor) else tuple(
                            single_attrib[i:i + 1]
                            for single_attrib in attributions_orig))
                assertTensorTuplesAlmostEqual(
                    self,
                    current_orig_attributions,
                    single_attr,
                    delta=target_delta,
                    mode="max",
                )
                if (not issubclass(algorithm, Lime)
                        and len(original_targets) == num_examples):
                    # If original_targets contained multiple elements, then
                    # we also compare with setting targets to a list with
                    # a single element.
                    args["target"] = original_targets[i:i + 1]
                    self.setUp()
                    single_attr_target_list = attr_method.attribute(**args)
                    assertTensorTuplesAlmostEqual(
                        self,
                        current_orig_attributions,
                        single_attr_target_list,
                        delta=target_delta,
                        mode="max",
                    )
Пример #22
0
 def test_saliency_grad_unchanged(self) -> None:
     model, inp, grads, add_args = _get_basic_config()
     inp.grad = torch.randn_like(inp)
     grad = inp.grad.detach().clone()
     self._saliency_base_assert(model, inp, grads, add_args)
     assertTensorTuplesAlmostEqual(self, inp.grad, grad, delta=0.0)
Пример #23
0
        def data_parallel_test_assert(self) -> None:
            # Construct cuda_args, moving all tensor inputs in args to CUDA device
            cuda_args = {}
            for key in args:
                if isinstance(args[key], Tensor):
                    cuda_args[key] = args[key].cuda()
                elif isinstance(args[key], tuple):
                    cuda_args[key] = tuple(
                        elem.cuda() if isinstance(elem, Tensor) else elem
                        for elem in args[key])
                else:
                    cuda_args[key] = args[key]

            alt_device_ids = None
            cuda_model = copy.deepcopy(model).cuda()
            # Initialize models based on DataParallelCompareMode
            if mode is DataParallelCompareMode.cpu_cuda:
                model_1, model_2 = model, cuda_model
                args_1, args_2 = args, cuda_args
            elif mode is DataParallelCompareMode.data_parallel_default:
                model_1, model_2 = (
                    cuda_model,
                    torch.nn.parallel.DataParallel(cuda_model),
                )
                args_1, args_2 = cuda_args, cuda_args
            elif mode is DataParallelCompareMode.data_parallel_alt_dev_ids:
                alt_device_ids = [0] + [
                    x for x in range(torch.cuda.device_count() - 1, 0, -1)
                ]
                model_1, model_2 = (
                    cuda_model,
                    torch.nn.parallel.DataParallel(cuda_model,
                                                   device_ids=alt_device_ids),
                )
                args_1, args_2 = cuda_args, cuda_args
            elif mode is DataParallelCompareMode.dist_data_parallel:

                model_1, model_2 = (
                    cuda_model,
                    torch.nn.parallel.DistributedDataParallel(cuda_model,
                                                              device_ids=[0],
                                                              output_device=0),
                )
                args_1, args_2 = cuda_args, cuda_args
            else:
                raise AssertionError(
                    "DataParallel compare mode type is not valid.")

            attr_method_1: Attribution
            attr_method_2: Attribution
            if target_layer:
                internal_algorithm = cast(Type[InternalAttribution], algorithm)
                attr_method_1 = internal_algorithm(
                    model_1, get_target_layer(model_1, target_layer))
                # cuda_model is used to obtain target_layer since DataParallel
                # adds additional wrapper.
                # model_2 is always either the CUDA model itself or DataParallel
                if alt_device_ids is None:
                    attr_method_2 = internal_algorithm(
                        model_2, get_target_layer(cuda_model, target_layer))
                else:
                    # LayerDeepLift and LayerDeepLiftShap do not take device ids
                    # as a parameter, since they must always have the DataParallel
                    # model object directly.
                    # Some neuron methods and GuidedGradCAM also require the
                    # model and cannot take a forward function.
                    if issubclass(
                            internal_algorithm,
                        (
                            LayerDeepLift,
                            LayerDeepLiftShap,
                            NeuronDeepLift,
                            NeuronDeepLiftShap,
                            NeuronDeconvolution,
                            NeuronGuidedBackprop,
                            GuidedGradCam,
                        ),
                    ):
                        attr_method_2 = internal_algorithm(
                            model_2, get_target_layer(cuda_model,
                                                      target_layer))
                    else:
                        attr_method_2 = internal_algorithm(
                            model_2.forward,
                            get_target_layer(cuda_model, target_layer),
                            device_ids=alt_device_ids,
                        )
            else:
                attr_method_1 = algorithm(model_1)
                attr_method_2 = algorithm(model_2)

            if noise_tunnel:
                attr_method_1 = NoiseTunnel(attr_method_1)
                attr_method_2 = NoiseTunnel(attr_method_2)
            if attr_method_1.has_convergence_delta():
                attributions_1, delta_1 = attr_method_1.attribute(
                    return_convergence_delta=True, **args_1)
                self.setUp()
                attributions_2, delta_2 = attr_method_2.attribute(
                    return_convergence_delta=True, **args_2)
                if isinstance(attributions_1, list):
                    for i in range(len(attributions_1)):
                        assertTensorTuplesAlmostEqual(
                            self,
                            attributions_1[i],
                            attributions_2[i],
                            mode="max",
                            delta=dp_delta,
                        )
                else:
                    assertTensorTuplesAlmostEqual(self,
                                                  attributions_1,
                                                  attributions_2,
                                                  mode="max",
                                                  delta=dp_delta)
                assertTensorTuplesAlmostEqual(self,
                                              delta_1,
                                              delta_2,
                                              mode="max",
                                              delta=dp_delta)
            else:
                attributions_1 = attr_method_1.attribute(**args_1)
                self.setUp()
                attributions_2 = attr_method_2.attribute(**args_2)
                if isinstance(attributions_1, list):
                    for i in range(len(attributions_1)):
                        assertTensorTuplesAlmostEqual(
                            self,
                            attributions_1[i],
                            attributions_2[i],
                            mode="max",
                            delta=dp_delta,
                        )
                else:
                    assertTensorTuplesAlmostEqual(self,
                                                  attributions_1,
                                                  attributions_2,
                                                  mode="max",
                                                  delta=dp_delta)