Ejemplo n.º 1
0
    def test_alpha_beta_all_bias(self):
        # config
        config = LRPConfiguration()
        config.set(
            LAYER.CONVOLUTIONAL,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.ALL))

        # Shape (3, 2, 2, 4, 2)
        expected = [[[[[0., 0.], [0., 0.], [0.22762937, 0.61067898], [0., 0.]],
                      [[0., 0.], [0., 0.], [-0.86054762, 0.91241699], [0.,
                                                                       0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.23134317,
                                                      0.64590203]]]],
                    [[[[0., 0.], [0., 0.], [0., 0.], [0.67829868, 0.67245101]],
                      [[0., 0.], [0., 0.], [0., 0.], [-0.84209149,
                                                      0.36775772]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.44350914, 0.42183162], [0., 0.], [0.,
                                                                      0.]]]],
                    [[[[-0.33007192, 0.2922468], [0., 0.], [0., 0.], [0., 0.]],
                      [[0.05011517, 0.33917671], [0., 0.], [0., 0.], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.49011103, 0.92719656], [0., 0.], [0.,
                                                                      0.]]]]]

        self._do_convolutional_test_with_config(config, expected)
Ejemplo n.º 2
0
    def test_flat(self):
        # config
        config = LRPConfiguration()
        config.set(LAYER.CONVOLUTIONAL, FlatConfiguration())

        # Shape (3, 2, 2, 4, 2)
        expected = [[[[[0., 0.], [0., 0.], [0.22254443, 0.22254443], [0., 0.]],
                      [[0., 0.], [0., 0.], [0.22254443, 0.22254443], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.11642342,
                                                      0.11642342]]]],
                    [[[[0., 0.], [0., 0.], [0., 0.], [0.21910398, 0.21910398]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.21910398,
                                                      0.21910398]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.11928483, 0.11928483], [0., 0.], [0.,
                                                                      0.]]]],
                    [[[[0.08786669, 0.08786669], [0., 0.], [0., 0.], [0., 0.]],
                      [[0.08786669, 0.08786669], [0., 0.], [0., 0.], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.18720304, 0.18720304], [0., 0.], [0.,
                                                                      0.]]]]]

        self._do_convolutional_test_with_config(config, expected)
Ejemplo n.º 3
0
    def test_epsilon_all_bias(self):
        # config
        config = LRPConfiguration()
        config.set(LAYER.CONVOLUTIONAL, EpsilonConfiguration())

        # Shape (3, 2, 2, 4, 2)
        expected = [[[[[0., 0.], [0., 0.], [0.11989002, 0.32163827], [0., 0.]],
                      [[0., 0.], [0., 0.], [-0.03191111, 0.48056054], [0.,
                                                                       0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.11567159,
                                                      0.32295102]]]],
                    [[[[0., 0.], [0., 0.], [0., 0.], [0.3551029, 0.35204153]],
                      [[0., 0.], [0., 0.], [0., 0.], [-0.02325702,
                                                      0.19252851]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.22175457, 0.21091581], [0., 0.], [0.,
                                                                      0.]]]],
                    [[[[0.00876684, 0.14695124], [0., 0.], [0., 0.], [0., 0.]],
                      [[0.02519954, 0.17054913], [0., 0.], [0., 0.], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.24505551, 0.46359828], [0., 0.], [0.,
                                                                      0.]]]]]

        self._do_convolutional_test_with_config(config, expected)
Ejemplo n.º 4
0
    def test_ww(self):
        # config
        config = LRPConfiguration()
        config.set(LAYER.CONVOLUTIONAL, WWConfiguration())

        # Shape (3, 2, 2, 4, 2)
        expected = [[[[[0., 0.], [0., 0.], [0.2205872, 0.21228028], [0., 0.]],
                      [[0., 0.], [0., 0.], [0.08030722, 0.37700302], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.1153995,
                                                      0.11105376]]]],
                    [[[[0., 0.], [0., 0.], [0., 0.], [0.217177, 0.20899851]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.0790657, 0.3711747]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.11823574, 0.1137832], [0., 0.], [0.,
                                                                     0.]]]],
                    [[[[0.08709392, 0.08381412], [0., 0.], [0., 0.], [0., 0.]],
                      [[0.03170751, 0.14885121], [0., 0.], [0., 0.], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.18555663, 0.1785689], [0., 0.], [0.,
                                                                     0.]]]]]

        self._do_convolutional_test_with_config(config, expected)
Ejemplo n.º 5
0
    def test_epsilon_no_bias(self):
        # config
        config = LRPConfiguration()
        config.set(LAYER.CONVOLUTIONAL,
                   EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.NONE))

        # Shape (3, 2, 2, 4, 2)
        expected = [[[[[0., 0.], [0., 0.], [0.10428416, 0.3060324], [0., 0.]],
                      [[0., 0.], [0., 0.], [-0.04751698, 0.46495467], [0.,
                                                                       0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.10213605,
                                                      0.30941548]]]],
                    [[[[0., 0.], [0., 0.], [0., 0.], [0.33713338, 0.33407201]],
                      [[0., 0.], [0., 0.], [0., 0.], [-0.04122655,
                                                      0.17455899]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.1995201, 0.18868134], [0., 0.], [0.,
                                                                     0.]]]],
                    [[[[-0.00199118, 0.13619322], [0., 0.], [0., 0.], [0.,
                                                                       0.]],
                      [[0.01444152, 0.15979111], [0., 0.], [0., 0.], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.22497632, 0.44351909], [0., 0.], [0.,
                                                                      0.]]]]]

        self._do_convolutional_test_with_config(config, expected)
Ejemplo n.º 6
0
    def test_softmax(self):
        with tf.Graph().as_default():
            inp = tf.constant([[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]], dtype=tf.float32)

            out = tf.nn.softmax(inp)

            config = LRPConfiguration()
            config.set(LAYER.SOFTMAX, AlphaBetaConfiguration(alpha=2, beta=-1))
            expl = lrp.lrp(inp, out, config)

            with tf.Session() as s:
                explanation = s.run(expl)

                expected = np.array([[[[-0.05989202454, -0.162803402, 0.8879363823],
                                       [0, 0, 0]],

                                      [[0, 0, 0],
                                       [-0.05989202454, -0.162803402, 0.8879363823]]],

                                     [[[-0.05989202454, -0.162803402, 0.8879363823],
                                       [0, 0, 0]],

                                      [[0, 0, 0],
                                       [-0.05989202454, -0.162803402, 0.8879363823]]]])

                # Check if the relevance scores are correct (the correct values are found by
                # calculating the example by hand)
                self.assertTrue(
                    np.allclose(expected, explanation, rtol=1e-03, atol=1e-03),
                    msg="Should be a good explanation")
Ejemplo n.º 7
0
    def test_epsilon_active_bias(self):
        # config
        config = LRPConfiguration()
        config.set(LAYER.CONVOLUTIONAL,
                   EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.ACTIVE))

        # Shape (3, 2, 2, 4, 2)
        expected = [[[[[0., 0.], [0., 0.], [0.11989002, 0.32163827], [0., 0.]],
                      [[0., 0.], [0., 0.], [-0.03191111, 0.48056054], [0.,
                                                                       0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.12920713,
                                                      0.33648656]]]],
                    [[[[0., 0.], [0., 0.], [0., 0.], [0.3551029, 0.35204153]],
                      [[0., 0.], [0., 0.], [0., 0.], [-0.02325702,
                                                      0.19252851]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.24398904, 0.23315028], [0., 0.], [0.,
                                                                      0.]]]],
                    [[[[0.00876684, 0.14695124], [0., 0.], [0., 0.], [0., 0.]],
                      [[0.02519954, 0.17054913], [0., 0.], [0., 0.], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.26513471, 0.48367747], [0., 0.], [0.,
                                                                      0.]]]]]

        self._do_convolutional_test_with_config(config, expected)
Ejemplo n.º 8
0
    def test_alpha_beta_no_bias(self):
        # config
        config = LRPConfiguration()
        config.set(
            LAYER.CONVOLUTIONAL,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.NONE))

        # Shape (3, 2, 2, 4, 2)
        expected = [[[[[0., 0.], [0., 0.], [0.19799927, 0.58104888], [0., 0.]],
                      [[0., 0.], [0., 0.], [-0.89017772, 0.88278689], [0.,
                                                                       0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.20427209,
                                                      0.61883095]]]],
                    [[[[0., 0.], [0., 0.], [0., 0.], [0.64397426, 0.63812658]],
                      [[0., 0.], [0., 0.], [0., 0.], [-0.87641592,
                                                      0.33343329]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.3990402, 0.37736268], [0., 0.], [0.,
                                                                     0.]]]],
                    [[[[-0.35146675, 0.27085197], [0., 0.], [0., 0.], [0.,
                                                                       0.]],
                      [[0.02872034, 0.31778188], [0., 0.], [0., 0.], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.44995264, 0.88703817], [0., 0.], [0.,
                                                                      0.]]]]]

        self._do_convolutional_test_with_config(config, expected)
Ejemplo n.º 9
0
    def test_alpha_beta_active_bias(self):
        # config
        config = LRPConfiguration()
        config.set(
            LAYER.CONVOLUTIONAL,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.ACTIVE))

        # Shape (3, 2, 2, 4, 2)
        expected = [[[[[0., 0.], [0., 0.], [0.23750607, 0.62055568], [0., 0.]],
                      [[0., 0.], [0., 0.], [-0.89017772, 0.92229369], [0.,
                                                                       0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0., 0.], [0., 0.], [0.25841425,
                                                      0.67297311]]]],
                    [[[[0., 0.], [0., 0.], [0., 0.], [0.68974016, 0.68389248]],
                      [[0., 0.], [0., 0.], [0., 0.], [-0.87641592,
                                                      0.37919919]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.48797808, 0.46630056], [0., 0.], [0.,
                                                                      0.]]]],
                    [[[[-0.35146675, 0.29937841], [0., 0.], [0., 0.], [0.,
                                                                       0.]],
                      [[0.05724678, 0.34630832], [0., 0.], [0., 0.], [0.,
                                                                      0.]]],
                     [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]],
                      [[0., 0.], [0.53026941, 0.96735494], [0., 0.], [0.,
                                                                      0.]]]]]

        self._do_convolutional_test_with_config(config, expected)
Ejemplo n.º 10
0
    def test_linear_with_ww(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(LAYER.LINEAR, WWConfiguration())

        expected_result = [[1.80952381, 13.68, 13.75238095, 8.758095238]]

        self.do_test_with_config_and_result(expected_result, config)
Ejemplo n.º 11
0
    def test_linear_with_flat(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(LAYER.LINEAR, FlatConfiguration())

        expected_result = [[9.5, 9.5, 9.5, 9.5]]

        self.do_test_with_config_and_result(expected_result, config)
Ejemplo n.º 12
0
    def test_flat(self):
        config = LRPConfiguration()
        config.set(LAYER.LSTM, FlatConfiguration())

        expected_result = np.array([[[[0.07106609, 0.07106609, 0.07106609],
                                      [0., 0., 0.]],
                                     [[0.06505566, 0.06505566, 0.06505566],
                                      [0.09293666, 0.09293666, 0.09293666]]]])
        self._do_test_with_config_and_expected_result(config, expected_result)
Ejemplo n.º 13
0
    def test_ww(self):
        config = LRPConfiguration()
        config.set(LAYER.LSTM, WWConfiguration())

        expected_result = np.array([[[[0.26074915, 0.0391348, 0.01872167],
                                      [0., 0., 0.]],
                                     [[0.09823269, 0.04826224, 0.02665878],
                                      [0.04887697, 0.05858786, 0.03348749]]]])
        self._do_test_with_config_and_expected_result(config, expected_result)
Ejemplo n.º 14
0
    def test_linear_with_epsilon_without_bias(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(LAYER.LINEAR,
                   EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.NONE))

        expected_result = [[0., -12, 21, 32]]

        self.do_test_with_config_and_result(expected_result, config)
Ejemplo n.º 15
0
    def test_linear_lrp_epsilon_active_bias(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(LAYER.LINEAR,
                   EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.ACTIVE))

        expected_result = [[1.340986395, -19.125, 28.75255102, 27.03146259]]

        self.do_test_with_config_and_result(expected_result, config)
Ejemplo n.º 16
0
    def test_linear_with_beta(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(LAYER.LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1))

        expected_result = [[
            -80.14545455, 9.566433566, -28.36791444, 125.5933087
        ]]

        self.do_test_with_config_and_result(expected_result, config)
Ejemplo n.º 17
0
    def test_lstm_epsilon_active_bias(self):
        config = LRPConfiguration()
        config.set(LAYER.LSTM,
                   EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.ACTIVE))

        expected_result = np.array([[[[0.27125698, 0.03620247, 0.40320148],
                                      [0., 0., 0.]],
                                     [[0.03854221, -0.29613484, 0.82228568],
                                      [0.13618791, 0.2129638, 0.01552182]]]])
        self._do_test_with_config_and_expected_result(config, expected_result)
Ejemplo n.º 18
0
    def test_lstm_epsilon_all_bias(self):
        config = LRPConfiguration()
        config.set(LAYER.LSTM,
                   EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.ALL))

        expected_result = np.array([[[[0.23963733, 0.00458281, 0.37158183],
                                      [0., 0., 0.]],
                                     [[0.10851287, -0.22616417, 0.89225635],
                                      [0.13618791, 0.2129638, 0.01552182]]]])
        self._do_test_with_config_and_expected_result(config, expected_result)
Ejemplo n.º 19
0
    def test_lstm_epsilon_no_bias(self):
        config = LRPConfiguration()
        config.set(LAYER.LSTM,
                   EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.NONE))

        expected_result = np.array([[[[0.19220784, -0.04284667, 0.32415234],
                                      [0., 0., 0.]],
                                     [[0.23325387, -0.13045811, 1.0724654],
                                      [0.16538274, 0.24215863, 0.04471665]]]])
        self._do_test_with_config_and_expected_result(config, expected_result)
Ejemplo n.º 20
0
    def test_linear_with_epsilon_and_bias(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(LAYER.LINEAR,
                   EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.ALL))

        expected_result = [[
            1.333333333, -14.06802721, 21.0521542, 29.68253968
        ]]

        self.do_test_with_config_and_result(expected_result, config)
Ejemplo n.º 21
0
    def test_linear_lrp_alpha_beta_active_bias(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(
            LAYER.LINEAR,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.ACTIVE))

        expected_result = [[-83.6, 21.92307692, -47.5372549, 147.214178]]

        self.do_test_with_config_and_result(expected_result, config)
Ejemplo n.º 22
0
    def test_linear_lrp_alpha_beta_ignore_bias(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(
            LAYER.LINEAR,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.IGNORE))

        expected_result = [[-83.6, 22.8, -57.79534884, 156.5953488]]

        self.do_test_with_config_and_result(expected_result, config)
Ejemplo n.º 23
0
    def test_lstm_alpha_beta_active_bias(self):
        config = LRPConfiguration()
        config.set(
            LAYER.LSTM,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.ACTIVE))

        expected_result = np.array([[[[1.17243838, -1.42132187, 1.67020536],
                                      [0., 0., 0.]],
                                     [[1.47216972, -3.08726835, 4.70236698],
                                      [0.52473047, 0.76832693, 0.14187811]]]])
        self._do_test_with_config_and_expected_result(config, expected_result)
Ejemplo n.º 24
0
    def test_lstm_alpha_beta_all_bias(self):
        config = LRPConfiguration()
        config.set(
            LAYER.LSTM,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.ALL))

        expected_result = np.array([[[[0.90404336, -1.24239185, 1.40181034],
                                      [0., 0., 0.]],
                                     [[0.58241955, -1.00368592, 3.04841668],
                                      [0.30076316, 0.54435962, -0.08208919]]]])
        self._do_test_with_config_and_expected_result(config, expected_result)
Ejemplo n.º 25
0
    def test_lstm_alpha_beta_no_bias(self):
        config = LRPConfiguration()
        config.set(
            LAYER.LSTM,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.NONE))

        expected_result = np.array([[[[0.72511334, -1.42132187, 1.22288032],
                                      [0., 0., 0.]],
                                     [[1.17308824, -1.30891597, 4.40328549],
                                      [0.52473047, 0.76832693, 0.14187811]]]])
        self._do_test_with_config_and_expected_result(config, expected_result)
Ejemplo n.º 26
0
    def test_linear_with_epsilon_ignore_bias(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(
            LAYER.LINEAR,
            EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.IGNORE,
                                 epsilon=1e-12))

        expected_result = [[
            -5302325581397, 31813953488372, -47720930232555, 21209302325624
        ]]

        self.do_test_with_config_and_result(expected_result, config)
Ejemplo n.º 27
0
    def test_linear_lrp_alpha_beta_equal_bias(self):
        # Prepare configuration of linear layer
        config = LRPConfiguration()
        config.set(
            LAYER.LINEAR,
            AlphaBetaConfiguration(alpha=2,
                                   beta=-1,
                                   bias_strategy=BIAS_STRATEGY.ALL))

        expected_result = [[
            -70.90120207, 14.94277618, -27.09395311, 121.052379
        ]]

        self.do_test_with_config_and_result(expected_result, config)
Ejemplo n.º 28
0
    def test_alpha_beta_ignore_bias(self):
        config = LRPConfiguration()
        config.set(LAYER.LINEAR,
                   AlphaBetaConfiguration(bias_strategy=BIAS_STRATEGY.IGNORE))

        expected_result = [
            [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
            [[0.05056302043, 0.001782476642, 0.4564389581, 0, 0.2856364171],
             [
                 0.0006553275046, 0.2230518547, 0.1905802792, 0.09459906389,
                 0.08207159722
             ]],
            [[0, 0, 0, 0, 0],
             [0.1729084504, 0, 0.0773933278, 0.2900907282, 0.006641839018]]
        ]
        self._do_linear_test(config, np.array(expected_result))
Ejemplo n.º 29
0
    def test_distribute(self):
        with tf.Graph().as_default():
            input = tf.constant([[0.3315922947, 1.053559579, 0.7477053648, 1.22290369,
                                  0.3730588596, -1.034354431, 0.9187013371, 1.478589349,
                                  -0.7325915066, -0.3569675024, -1.136600512, 0.5516666285,
                                  0.4834049101, -1.613833301, 0.1520745652, 0.117390006]],
                                dtype=tf.float32)
            i = tf.reshape(input, (1, 4, 4, 1))

            # Create max pooling layer
            # (1, 2, 2, 1)
            activation = tf.nn.max_pool(i, [1, 2, 2, 1], [1, 2, 2, 1], "SAME")

            # Reshape to get 1 p/s
            output = tf.reshape(activation, (1, 4))

            config = LRPConfiguration()
            config.set(LAYER.MAX_POOLING, EpsilonConfiguration())

            explanation = lrp.lrp(input, output, config)

            with tf.Session() as s:
                expl = s.run(explanation)

                # Check if the explanation has the right shape
                self.assertEqual((1, 16), expl.shape,
                                 msg="Should be a wellformed explanation")

                # Expected explanation
                expected = np.array(
                    [[0, 0, 0.2531077301, 0.4139683781, 0, 0, 0.3109920311, 0.50052121, 0, 0, 0, 0, 0, 0, 0, 0]])

                # Check if the relevance scores are correct (the correct values
                # are found by calculating the example by hand)
                self.assertTrue(
                    np.allclose(expected,
                                expl,
                                rtol=1e-03,
                                atol=1e-03),
                    msg="Should be a good explanation")

                self.assertTrue(True)
    def test_distribute(self):
        with tf.Graph().as_default():
            input = tf.constant([[1., 2., 2., 3., 3., 3., 3., 2., 1]],
                                dtype=tf.float32)

            i = tf.reshape(input, (1, 3, 3, 1))

            # Create max pooling layer
            # (1, 2, 2, 1)
            activation = tf.nn.max_pool(i, [1, 2, 2, 1], [1, 2, 2, 1], "SAME")

            # Reshape to get 1 p/s
            output = tf.reshape(activation, (2, 2))

            config = LRPConfiguration()
            config.set(LAYER.MAX_POOLING,
                       BaseConfiguration(RULE.WINNER_TAKES_ALL))

            explanation = lrp.lrp(input, output, config)

            with tf.Session() as s:
                expl = s.run(explanation)

                # Check if the explanation has the right shape
                self.assertEqual((1, 9),
                                 expl.shape,
                                 msg="Should be a wellformed explanation")

                # Expected explanation
                expected = np.array([[0., 0., 0., 3., 0., 0., 3., 0., 0]])

                # Check if the relevance scores are correct (the correct values
                # are found by calculating the example by hand)
                self.assertTrue(np.allclose(expected,
                                            expl,
                                            rtol=1e-03,
                                            atol=1e-03),
                                msg="Should be a good explanation")

                self.assertTrue(True)