Ejemplo n.º 1
0
    def test_simple_input_conv_no_grad(self) -> None:
        net = BasicModel_ConvNet_One_Conv()

        # this way we deactivate require_grad. Some models explicitly
        # do that before interpreting the model.
        for param in net.parameters():
            param.requires_grad = False

        inp = torch.arange(16).view(1, 1, 4, 4).float()
        self._grad_cam_test_assert(net, net.conv1, inp,
                                   [[11.25, 13.5], [20.25, 22.5]])
Ejemplo n.º 2
0
    def test_improper_stride(self) -> None:
        net = BasicModel_ConvNet_One_Conv()
        inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
        occ = Occlusion(net)
        # Check error when too few stride dimensions
        with self.assertRaises(AssertionError):
            _ = occ.attribute(inp,
                              sliding_window_shapes=(1, 2, 2),
                              strides=(1, 2),
                              target=0)

        # Check error when too many stride dimensions
        with self.assertRaises(AssertionError):
            _ = occ.attribute(
                (inp, inp),
                sliding_window_shapes=((1, 1, 2), (1, 2, 2)),
                strides=((1, 1, 2), (2, 1, 2, 2)),
                target=0,
            )

        # Check error when too many stride tuples
        with self.assertRaises(AssertionError):
            _ = occ.attribute(
                (inp, inp),
                sliding_window_shapes=((1, 1, 2), (1, 2, 2)),
                strides=((1, 1, 2), (1, 2, 2), (1, 2, 2)),
                target=0,
            )
Ejemplo n.º 3
0
 def test_simple_multi_input_conv(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     inp2 = torch.ones((1, 1, 4, 4))
     self._occlusion_test_assert(
         net,
         (inp, inp2),
         (67 * torch.ones_like(inp), 13 * torch.ones_like(inp2)),
         perturbations_per_eval=(1, 2, 4, 8, 12, 16),
         sliding_window_shapes=((1, 4, 4), (1, 4, 4)),
     )
     self._occlusion_test_assert(
         net,
         (inp, inp2),
         (
             [
                 [17.0, 17.0, 17.0, 17.0],
                 [17.0, 17.0, 17.0, 17.0],
                 [64.0, 65.5, 65.5, 67.0],
                 [64.0, 65.5, 65.5, 67.0],
             ],
             [
                 [3.0, 3.0, 3.0, 3.0],
                 [3.0, 3.0, 3.0, 3.0],
                 [3.0, 3.0, 3.0, 3.0],
                 [0.0, 0.0, 0.0, 0.0],
             ],
         ),
         perturbations_per_eval=(1, 3, 7, 14),
         sliding_window_shapes=((1, 2, 3), (1, 1, 2)),
         strides=((1, 2, 1), (1, 1, 2)),
     )
Ejemplo n.º 4
0
 def test_improper_method_multi_input_conv(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     inp2 = torch.ones(1)
     self._guided_grad_cam_test_assert(
         net, net.conv1, (inp, inp2), ([], []), interpolate_mode="made_up_nonlinear"
     )
Ejemplo n.º 5
0
 def test_simple_multi_input_conv(self) -> None:
     ablation_algo = FeatureAblation(BasicModel_ConvNet_One_Conv())
     inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     inp2 = torch.ones((1, 1, 4, 4))
     self._ablation_test_assert(
         ablation_algo,
         (inp, inp2),
         (67 * torch.ones_like(inp), 13 * torch.ones_like(inp2)),
         feature_mask=(torch.tensor(0), torch.tensor(1)),
         perturbations_per_eval=(1, 2, 4, 8, 12, 16),
     )
     self._ablation_test_assert(
         ablation_algo,
         (inp, inp2),
         (
             [[[
                 [0.0, 2.0, 4.0, 3.0],
                 [4.0, 9.0, 10.0, 7.0],
                 [4.0, 13.0, 14.0, 11.0],
                 [0.0, 0.0, 0.0, 0.0],
             ]]],
             [[[
                 [1.0, 2.0, 2.0, 1.0],
                 [1.0, 2.0, 2.0, 1.0],
                 [1.0, 2.0, 2.0, 1.0],
                 [0.0, 0.0, 0.0, 0.0],
             ]]],
         ),
         perturbations_per_eval=(1, 3, 7, 14),
     )
Ejemplo n.º 6
0
    def test_convnet_multi_target_and_default_pert_func(self) -> None:
        r"""
        Similar to previous example but here we also test default
        perturbation function.
        """
        model = BasicModel_ConvNet_One_Conv()
        gbp = GuidedBackprop(model)

        input = torch.stack([torch.arange(1, 17).float()] * 20, dim=0).view(20, 1, 4, 4)

        sens1 = self.sensitivity_max_assert(
            gbp.attribute,
            input,
            torch.zeros(20),
            perturb_func=default_perturb_func,
            target=torch.tensor([1] * 20),
            n_perturb_samples=10,
            max_examples_per_batch=40,
        )

        sens2 = self.sensitivity_max_assert(
            gbp.attribute,
            input,
            torch.zeros(20),
            perturb_func=default_perturb_func,
            target=torch.tensor([1] * 20),
            n_perturb_samples=10,
            max_examples_per_batch=5,
        )
        assertTensorAlmostEqual(self, sens1, sens2)
Ejemplo n.º 7
0
 def test_simple_input_conv(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     ex = [
         [0.0, 0.0, 4.0, 4.0],
         [0.0, 0.0, 12.0, 8.0],
         [28.0, 84.0, 97.5, 65.0],
         [28.0, 56.0, 65.0, 32.5],
     ]
     self._guided_grad_cam_test_assert(net, net.relu1, inp, ex)
Ejemplo n.º 8
0
 def test_simple_input_conv_without_final_relu(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = torch.arange(16).view(1, 1, 4, 4).float()
     # Adding negative value to test final relu is not applied by default
     inp[0, 0, 1, 1] = -4.0
     inp.requires_grad_()
     self._grad_cam_test_assert(net,
                                net.conv1,
                                inp, (0.5625 * inp, ),
                                attribute_to_layer_input=True)
Ejemplo n.º 9
0
 def test_simple_input_conv_neuron_deconv(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     exp = [
         [2.0, 3.0, 3.0, 1.0],
         [3.0, 5.0, 5.0, 2.0],
         [3.0, 5.0, 5.0, 2.0],
         [1.0, 2.0, 2.0, 1.0],
     ]
     self._neuron_deconv_test_assert(net, net.fc1, (0, ), (inp, ), (exp, ))
Ejemplo n.º 10
0
 def test_simple_input_conv_gb(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     exp = [
         [0.0, 1.0, 1.0, 1.0],
         [1.0, 3.0, 3.0, 2.0],
         [1.0, 3.0, 3.0, 2.0],
         [1.0, 2.0, 2.0, 1.0],
     ]
     self._guided_backprop_test_assert(net, (inp, ), (exp, ))
Ejemplo n.º 11
0
 def test_improper_dims_multi_input_conv(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     inp2 = torch.ones(1)
     ex = [
         [14.5, 29.0, 38.0, 19.0],
         [29.0, 58.0, 76.0, 38.0],
         [65.0, 130.0, 148.0, 74.0],
         [32.5, 65.0, 74.0, 37.0],
     ]
     self._guided_grad_cam_test_assert(net, net.conv1, (inp, inp2), (ex, []))
Ejemplo n.º 12
0
 def test_simple_multi_input_conv_deconv(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     inp2 = torch.ones((1, 1, 4, 4))
     ex_attr = [
         [2.0, 3.0, 3.0, 1.0],
         [3.0, 5.0, 5.0, 2.0],
         [3.0, 5.0, 5.0, 2.0],
         [1.0, 2.0, 2.0, 1.0],
     ]
     self._deconv_test_assert(net, (inp, inp2), (ex_attr, ex_attr))
Ejemplo n.º 13
0
 def test_simple_multi_input_conv_gb(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     inp2 = torch.ones((1, 1, 4, 4))
     ex_attr = [[[
         [1.0, 2.0, 2.0, 1.0],
         [2.0, 4.0, 4.0, 2.0],
         [2.0, 4.0, 4.0, 2.0],
         [1.0, 2.0, 2.0, 1.0],
     ]]]
     self._guided_backprop_test_assert(net, (inp, inp2), (ex_attr, ex_attr))
Ejemplo n.º 14
0
 def test_simple_input_conv_neuron_gb_agg_neurons(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = 1.0 * torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     exp = [[[
         [0.0, 1.0, 1.0, 1.0],
         [1.0, 3.0, 3.0, 2.0],
         [1.0, 3.0, 3.0, 2.0],
         [1.0, 2.0, 2.0, 1.0],
     ]]]
     self._neuron_guided_backprop_test_assert(net, net.fc1,
                                              (slice(0, 1, 1), ), (inp, ),
                                              (exp, ))
Ejemplo n.º 15
0
 def test_simple_multi_input_relu_input_inplace(self) -> None:
     net = BasicModel_ConvNet_One_Conv(inplace=True)
     inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     inp2 = torch.ones((1, 1, 4, 4))
     ex = [
         [14.5, 29.0, 38.0, 19.0],
         [29.0, 58.0, 76.0, 38.0],
         [65.0, 130.0, 148.0, 74.0],
         [32.5, 65.0, 74.0, 37.0],
     ]
     self._guided_grad_cam_test_assert(
         net, net.relu1, (inp, inp2), (ex, ex), attribute_to_layer_input=True
     )
Ejemplo n.º 16
0
 def test_simple_input_conv_fc_with_final_relu(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = torch.arange(16).view(1, 1, 4, 4).float()
     # Adding negative value to test final relu is applied
     inp[0, 0, 1, 1] = -4.0
     inp.requires_grad_()
     exp = 0.5625 * inp
     exp[0, 0, 1, 1] = 0.0
     self._grad_cam_test_assert(
         net,
         net.conv1,
         inp,
         (exp, ),
         attribute_to_layer_input=True,
         relu_attributions=True,
     )
Ejemplo n.º 17
0
    def test_classification_infidelity_convnet_multi_targets(self) -> None:
        model = BasicModel_ConvNet_One_Conv()
        dl = DeepLift(model)

        input = torch.stack([torch.arange(1, 17).float()] * 20,
                            dim=0).view(20, 1, 4, 4)

        self.infidelity_assert(
            model,
            dl.attribute(input, target=torch.tensor([1] * 20)) / input,
            input,
            torch.zeros(20),
            target=torch.tensor([1] * 20),
            multi_input=False,
            n_perturb_samples=500,
            max_batch_size=120,
        )
Ejemplo n.º 18
0
 def test_simple_multi_input_conv_intermediate(self) -> None:
     net = BasicModel_ConvNet_One_Conv(inplace=True)
     inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     inp2 = torch.ones((1, 1, 4, 4))
     self._ablation_test_assert(
         net,
         net.relu1,
         (inp, inp2),
         (torch.zeros_like(inp), torch.zeros_like(inp2)),
         feature_mask=(torch.tensor(0), torch.tensor(1)),
         perturbations_per_eval=(1, 2, 4, 8, 12, 16),
         neuron_selector=(1, 0, 0),
     )
     self._ablation_test_assert(
         net,
         net.relu1,
         (inp, inp2),
         (45 * torch.ones_like(inp), 9 * torch.ones_like(inp2)),
         feature_mask=(torch.tensor(0), torch.tensor(1)),
         perturbations_per_eval=(1, 2, 4, 8, 12, 16),
         neuron_selector=(1, 0, 0),
         attribute_to_neuron_input=True,
     )
     self._ablation_test_assert(
         net,
         net.relu1,
         (inp, inp2),
         (
             [
                 [0.0, 1.0, 2.0, 0.0],
                 [4.0, 5.0, 6.0, 0.0],
                 [8.0, 9.0, 10.0, 0.0],
                 [0.0, 0.0, 0.0, 0.0],
             ],
             [
                 [1.0, 1.0, 1.0, 0.0],
                 [1.0, 1.0, 1.0, 0.0],
                 [1.0, 1.0, 1.0, 0.0],
                 [0.0, 0.0, 0.0, 0.0],
             ],
         ),
         perturbations_per_eval=(1, 3, 7, 14),
         neuron_selector=(1, 0, 0),
         attribute_to_neuron_input=True,
     )
Ejemplo n.º 19
0
    def test_convnet_multi_target(self) -> None:
        r"""
        Another test with Saliency, local sensitivity and more
        complex model with higher dimensional input.
        """
        model = BasicModel_ConvNet_One_Conv()
        sa = Saliency(model)

        input = torch.stack([torch.arange(1, 17).float()] * 20, dim=0).view(20, 1, 4, 4)

        self.sensitivity_max_assert(
            sa.attribute,
            input,
            torch.zeros(20),
            target=torch.tensor([1] * 20),
            n_perturb_samples=10,
            max_examples_per_batch=40,
        )
Ejemplo n.º 20
0
 def test_too_large_stride(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = torch.arange(16, dtype=torch.float).view(1, 1, 4, 4)
     occ = Occlusion(net)
     with self.assertRaises(AssertionError):
         _ = occ.attribute(inp,
                           sliding_window_shapes=((1, 1, 2), ),
                           strides=2,
                           target=0)
     with self.assertRaises(AssertionError):
         _ = occ.attribute(
             (inp, inp),
             sliding_window_shapes=((1, 1, 2), (1, 4, 2)),
             strides=(2, (1, 2, 3)),
             target=0,
         )
     with self.assertRaises(AssertionError):
         _ = occ.attribute(inp,
                           sliding_window_shapes=((2, 1, 2), ),
                           strides=2,
                           target=0)
Ejemplo n.º 21
0
def _get_basic_config() -> Tuple[Module, Tensor]:
    input = torch.arange(16).view(1, 1, 4, 4).float()
    return BasicModel_ConvNet_One_Conv(), input
Ejemplo n.º 22
0
 def test_gb_matching(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = 100.0 * torch.randn(1, 1, 4, 4)
     self._guided_backprop_matching_assert(net, net.relu2, inp)
Ejemplo n.º 23
0
 def test_simple_multi_input_conv(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = torch.arange(16).view(1, 1, 4, 4).float()
     inp2 = torch.ones((1, 1, 4, 4))
     self._grad_cam_test_assert(net, net.conv1, (inp, inp2),
                                [[14.5, 19.0], [32.5, 37.0]])
Ejemplo n.º 24
0
 def test_deconv_matching(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = 100.0 * torch.randn(1, 1, 4, 4)
     self._deconv_matching_assert(net, net.relu2, inp)
Ejemplo n.º 25
0
 def test_simple_input_conv_relu(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = torch.arange(16).view(1, 1, 4, 4).float()
     self._grad_cam_test_assert(net, net.relu1, inp,
                                [[0.0, 4.0], [28.0, 32.5]])
Ejemplo n.º 26
0
 def test_sample_grads_conv_sum(self) -> None:
     model = BasicModel_ConvNet_One_Conv()
     inp = (123 * torch.randn(6, 1, 4, 4), )
     self._compare_sample_grads_per_sample(model, inp,
                                           lambda x: torch.sum(x), "sum")
Ejemplo n.º 27
0
 def test_simple_input_conv(self) -> None:
     net = BasicModel_ConvNet_One_Conv()
     inp = torch.arange(16).view(1, 1, 4, 4).float()
     self._grad_cam_test_assert(net, net.conv1, inp,
                                [[11.25, 13.5], [20.25, 22.5]])
Ejemplo n.º 28
0
 def test_sample_grads_conv_mean_multi_inp(self) -> None:
     model = BasicModel_ConvNet_One_Conv()
     inp = (20 * torch.randn(6, 1, 4, 4), 9 * torch.randn(6, 1, 4, 4))
     self._compare_sample_grads_per_sample(model, inp,
                                           lambda x: torch.mean(x))