Exemple #1
0
    def test_linear_with_beta_ignore_bias(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(LAYER.SPARSE_LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.IGNORE))
        config.set(LAYER.LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.IGNORE))

        expected_result = [[-83.6, 22.8, -57.79534884, 156.5953488]]

        self._do_test(expected_result, config)
Exemple #2
0
    def test_linear_lrp_alpha_beta_active_bias(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(LAYER.SPARSE_LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.ACTIVE))
        config.set(LAYER.LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.ACTIVE))

        expected_result = [[-83.6, 21.92307692, -47.5372549, 147.214178]]

        self._do_test(expected_result, config)
Exemple #3
0
    def test_linear_with_beta_no_bias(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(LAYER.SPARSE_LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1))
        config.set(LAYER.LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1))

        expected_result = [[-80.14545455, 9.566433566, -28.36791444, 125.5933087]]

        self._do_test(expected_result, config)
Exemple #4
0
    def test_alpha_beta_no_bias(self):
        # config
        config = LRPConfiguration()
        config.set(
            LAYER.CONVOLUTIONAL,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.NONE))

        # Shape (3, 2, 2, 4, 2)
        expected = [[[[[0., 0.], [0., 0.], [0.19799927, 0.58104888], [0., 0.]],
                      [[0., 0.], [0., 0.], [-0.89017772, 0.88278689], [0.,
                                                                       0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.20427209,
                                                      0.61883095]]]],
                    [[[[0., 0.], [0., 0.], [0., 0.], [0.64397426, 0.63812658]],
                      [[0., 0.], [0., 0.], [0., 0.], [-0.87641592,
                                                      0.33343329]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.3990402, 0.37736268], [0., 0.], [0.,
                                                                     0.]]]],
                    [[[[-0.35146675, 0.27085197], [0., 0.], [0., 0.], [0.,
                                                                       0.]],
                      [[0.02872034, 0.31778188], [0., 0.], [0., 0.], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.44995264, 0.88703817], [0., 0.], [0.,
                                                                      0.]]]]]

        self._do_convolutional_test_with_config(config, expected)
Exemple #5
0
    def test_softmax(self):
        with tf.Graph().as_default():
            inp = tf.constant([[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]], dtype=tf.float32)

            out = tf.nn.softmax(inp)

            config = LRPConfiguration()
            config.set(LAYER.SOFTMAX, AlphaBetaConfiguration(alpha=2, beta=-1))
            expl = lrp.lrp(inp, out, config)

            with tf.Session() as s:
                explanation = s.run(expl)

                expected = np.array([[[[-0.05989202454, -0.162803402, 0.8879363823],
                                       [0, 0, 0]],

                                      [[0, 0, 0],
                                       [-0.05989202454, -0.162803402, 0.8879363823]]],

                                     [[[-0.05989202454, -0.162803402, 0.8879363823],
                                       [0, 0, 0]],

                                      [[0, 0, 0],
                                       [-0.05989202454, -0.162803402, 0.8879363823]]]])

                # Check if the relevance scores are correct (the correct values are found by
                # calculating the example by hand)
                self.assertTrue(
                    np.allclose(expected, explanation, rtol=1e-03, atol=1e-03),
                    msg="Should be a good explanation")
Exemple #6
0
    def test_alpha_beta_active_bias(self):
        # config
        config = LRPConfiguration()
        config.set(
            LAYER.CONVOLUTIONAL,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.ACTIVE))

        # Shape (3, 2, 2, 4, 2)
        expected = [[[[[0., 0.], [0., 0.], [0.23750607, 0.62055568], [0., 0.]],
                      [[0., 0.], [0., 0.], [-0.89017772, 0.92229369], [0.,
                                                                       0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.25841425,
                                                      0.67297311]]]],
                    [[[[0., 0.], [0., 0.], [0., 0.], [0.68974016, 0.68389248]],
                      [[0., 0.], [0., 0.], [0., 0.], [-0.87641592,
                                                      0.37919919]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.48797808, 0.46630056], [0., 0.], [0.,
                                                                      0.]]]],
                    [[[[-0.35146675, 0.29937841], [0., 0.], [0., 0.], [0.,
                                                                       0.]],
                      [[0.05724678, 0.34630832], [0., 0.], [0., 0.], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.53026941, 0.96735494], [0., 0.], [0.,
                                                                      0.]]]]]

        self._do_convolutional_test_with_config(config, expected)
Exemple #7
0
    def test_alpha_beta_all_bias(self):
        # config
        config = LRPConfiguration()
        config.set(
            LAYER.CONVOLUTIONAL,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.ALL))

        # Shape (3, 2, 2, 4, 2)
        expected = [[[[[0., 0.], [0., 0.], [0.22762937, 0.61067898], [0., 0.]],
                      [[0., 0.], [0., 0.], [-0.86054762, 0.91241699], [0.,
                                                                       0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.23134317,
                                                      0.64590203]]]],
                    [[[[0., 0.], [0., 0.], [0., 0.], [0.67829868, 0.67245101]],
                      [[0., 0.], [0., 0.], [0., 0.], [-0.84209149,
                                                      0.36775772]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.44350914, 0.42183162], [0., 0.], [0.,
                                                                      0.]]]],
                    [[[[-0.33007192, 0.2922468], [0., 0.], [0., 0.], [0., 0.]],
                      [[0.05011517, 0.33917671], [0., 0.], [0., 0.], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.49011103, 0.92719656], [0., 0.], [0.,
                                                                      0.]]]]]

        self._do_convolutional_test_with_config(config, expected)
Exemple #8
0
    def test_lstm_alpha_beta_active_bias(self):
        config = LRPConfiguration()
        config.set(
            LAYER.LSTM,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.ACTIVE))

        expected_result = np.array([[[[1.17243838, -1.42132187, 1.67020536],
                                      [0., 0., 0.]],
                                     [[1.47216972, -3.08726835, 4.70236698],
                                      [0.52473047, 0.76832693, 0.14187811]]]])
        self._do_test_with_config_and_expected_result(config, expected_result)
Exemple #9
0
    def test_lstm_alpha_beta_all_bias(self):
        config = LRPConfiguration()
        config.set(
            LAYER.LSTM,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.ALL))

        expected_result = np.array([[[[0.90404336, -1.24239185, 1.40181034],
                                      [0., 0., 0.]],
                                     [[0.58241955, -1.00368592, 3.04841668],
                                      [0.30076316, 0.54435962, -0.08208919]]]])
        self._do_test_with_config_and_expected_result(config, expected_result)
Exemple #10
0
    def test_lstm_alpha_beta_no_bias(self):
        config = LRPConfiguration()
        config.set(
            LAYER.LSTM,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.NONE))

        expected_result = np.array([[[[0.72511334, -1.42132187, 1.22288032],
                                      [0., 0., 0.]],
                                     [[1.17308824, -1.30891597, 4.40328549],
                                      [0.52473047, 0.76832693, 0.14187811]]]])
        self._do_test_with_config_and_expected_result(config, expected_result)
Exemple #11
0
    def test_linear_lrp_alpha_beta_equal_bias(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(
            LAYER.LINEAR,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.ALL))

        expected_result = [[
            -70.90120207, 14.94277618, -27.09395311, 121.052379
        ]]

        self.do_test_with_config_and_result(expected_result, config)
Exemple #12
0
    def test_alpha_beta_ignore_bias(self):
        config = LRPConfiguration()
        config.set(LAYER.LINEAR,
                   AlphaBetaConfiguration(bias_strategy=BIAS_STRATEGY.IGNORE))

        expected_result = [
            [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
            [[0.05056302043, 0.001782476642, 0.4564389581, 0, 0.2856364171],
             [
                 0.0006553275046, 0.2230518547, 0.1905802792, 0.09459906389,
                 0.08207159722
             ]],
            [[0, 0, 0, 0, 0],
             [0.1729084504, 0, 0.0773933278, 0.2900907282, 0.006641839018]]
        ]
        self._do_linear_test(config, np.array(expected_result))
Exemple #13
0
    def test_zb_ignore_bias(self):
        config = LRPConfiguration()
        config.set(LAYER.LINEAR,
                   AlphaBetaConfiguration(bias_strategy=BIAS_STRATEGY.IGNORE))
        config.set_first_layer_zb(_l(-1), _l(1), BIAS_STRATEGY.IGNORE)

        expected_result = [[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
                           [[
                               0.08941490911, 0.001847394735, 0.3857157564,
                               0.04130132575, 0.2761414863
                           ],
                            [
                                0.06524084895, 0.2049935833, 0.1593686046,
                                0.08435578325, 0.07699930241
                            ]],
                           [[0, 0, 0, 0, 0],
                            [
                                0.1842489119, 0.02888073415, 0.04590480826,
                                0.2341442052, 0.05385568583
                            ]]]

        self._do_linear_test(config, np.array(expected_result))
Exemple #14
0
    def test_one_prediction_per_sample(self):
        g = tf.Graph()
        with g.as_default():
            inp = tf.placeholder(tf.float32, (2, 1, 5))

            x = tf.contrib.layers.batch_norm(inp, is_training=False, scale=True)
            vars = tf.global_variables()
            beta = next(i for i in vars if 'beta' in i.name)
            gamma = next(i for i in vars if 'gamma' in i.name)
            mean = next(i for i in vars if 'mean' in i.name)
            variance = next(i for i in vars if 'variance' in i.name)

            b = tf.constant([0, 1, 0, 1, 0], dtype=tf.float32)
            assign_beta = tf.assign(beta, b)
            g = tf.constant([0.1, 0.2, 0.3, 0.4, 0.5], dtype=tf.float32)
            assign_gamma = tf.assign(gamma, g)
            m = tf.constant([1, 2, 3, 4, 4.5], dtype=tf.float32)
            assign_mean = tf.assign(mean, m)
            v = tf.constant([0.2, 0.2, 0.2, 0.2, 0.2], dtype=tf.float32)
            assign_variance = tf.assign(variance, v)

            # Get the explanation
            config = LRPConfiguration()
            config.set(LAYER.ELEMENTWISE_LINEAR, AlphaBetaConfiguration())
            explanation = lrp.lrp(inp, x, config)

            with tf.Session() as s:
                s.run(tf.global_variables_initializer())
                s.run([assign_beta, assign_gamma, assign_mean, assign_variance])

                # Shape: (2, 1, 1, 5)
                expected_relevances = np.array(
                    [[[[0, 0, 0, 1, 0]]],
                     [[[0, 0.8921994513, 0, 0, 0]]]])
                relevances = s.run(explanation, feed_dict={inp: [[[1, 0, 3, 4, 5]],
                                                                 [[1, 2, 3, 0, 4]]]})

                self.assertTrue(np.allclose(expected_relevances, relevances, rtol=1e-03, atol=1e-03),
                                msg="The relevances do not match the expected")
Exemple #15
0
    def test_zb_no_bias(self):
        config = LRPConfiguration()
        config.set(LAYER.LINEAR,
                   AlphaBetaConfiguration(bias_strategy=BIAS_STRATEGY.IGNORE))
        config.set_first_layer_zb(_l(-1), _l(1), BIAS_STRATEGY.NONE)

        expected_result = [[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
                           [[
                               0.1145575292, 0.002366864524, 0.4941753504,
                               0.05291486486, 0.3537898399
                           ],
                            [
                                0.07031890972, 0.2048122289, 0.1691654014,
                                0.1035978445, 0.08054911078
                            ]],
                           [[0, 0, 0, 0, 0],
                            [
                                0.2102658664, 0.0283220131, 0.04511325643,
                                0.2887187793, 0.05874777231
                            ]]]

        self._do_linear_test(config, np.array(expected_result))
Exemple #16
0
    def test_zb_all_bias(self):
        config = LRPConfiguration()
        config.set(LAYER.LINEAR,
                   AlphaBetaConfiguration(bias_strategy=BIAS_STRATEGY.IGNORE))
        config.set_first_layer_zb(_l(-1), _l(1), BIAS_STRATEGY.ALL)

        expected_result = [[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
                           [[
                               0.0698808139, -0.0423098508, 0.449498635,
                               0.008238149531, 0.3091131246
                           ],
                            [
                                0.0628218352, 0.1973151543, 0.1616683269,
                                0.09610076993, 0.07305203625
                            ]],
                           [[0, 0, 0, 0, 0],
                            [
                                0.193439198, 0.01149534468, 0.02828658801,
                                0.2718921108, 0.04192110389
                            ]]]

        self._do_linear_test(config, np.array(expected_result))
Exemple #17
0
    def test_four_predictions_per_sample(self):
        g = tf.Graph()
        with g.as_default():
            inp = tf.placeholder(tf.float32, (2, 4, 5))

            x = tf.contrib.layers.batch_norm(inp, is_training=False, scale=True)
            vars = tf.global_variables()
            beta = next(i for i in vars if 'beta' in i.name)
            gamma = next(i for i in vars if 'gamma' in i.name)
            mean = next(i for i in vars if 'mean' in i.name)
            variance = next(i for i in vars if 'variance' in i.name)

            b = tf.constant([0, 1, 0, 1, 0], dtype=tf.float32)
            assign_beta = tf.assign(beta, b)
            g = tf.constant([0.1, 0.2, 0.3, 0.4, 0.5], dtype=tf.float32)
            assign_gamma = tf.assign(gamma, g)
            m = tf.constant([1, 2, 3, 4, 4.5], dtype=tf.float32)
            assign_mean = tf.assign(mean, m)
            v = tf.constant([0.2, 0.2, 0.2, 0.2, 0.2], dtype=tf.float32)
            assign_variance = tf.assign(variance, v)

            # Get the explanation
            config = LRPConfiguration()
            config.set(LAYER.ELEMENTWISE_LINEAR, AlphaBetaConfiguration())
            explanation = lrp.lrp(inp, x, config)

            with tf.Session() as s:
                s.run(tf.global_variables_initializer())
                s.run([assign_beta, assign_gamma, assign_mean, assign_variance])

                input = np.array([[[-0.3597203655, 2.416366089, -0.7543762749, 0.8718006654, -2.221761776],
                                   [1.099448308, 0.4108163696, 1.798067039, -0.6544576652, -0.6968107745],
                                   [-0.5612699962, -0.2597267932, 0.06325442832, -1.236885473, 0.9369620591],
                                   [-0.06784464057, -0.004403155247, -1.195337879, -0.528265092, -0.1020843691]],
                                  [[0.8766405077, 0.522839272, 0.4197016166, -1.497174712, 0.05348117451],
                                   [0.08739119149, -0.9997059536, -0.6212993685, 0.04027413639, 0.3979749684],
                                   [0.06180908495, -0.5322826252, -1.585670194, -0.5220654844, -1.096597863],
                                   [1.003261811, -1.865129316, -1.134796217, -0.1038509194, -0.8933464003]]])

                # Shape: (2, 4, 4, 5)
                expected_relevances = np.array(
                    [[[[0.0, 1.07794025, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0]],

                      [[0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.1832650698, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0]],

                      [[0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0]],

                      [[0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0]]],
                     [[[0.0, 0.2332384558, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0]],
                      [[0.0, 0.0, 0.0, 0.0, 0.0],
                       [-0.2035572695, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0]],
                      [[0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0]],
                      [[0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0, 0.0, 0.0, 0.0, 0.0],
                       [0.0007275465407, 0.0, 0.0, 0.0, 0.0]]]])
                relevances = s.run(explanation, feed_dict={inp: input})
                self.assertTrue(np.allclose(expected_relevances, relevances, rtol=1e-03, atol=1e-03),
                                msg="The relevances do not match the expected")