def test_ww(self): # config config = LRPConfiguration() config.set(LAYER.CONVOLUTIONAL, WWConfiguration()) # Shape (3, 2, 2, 4, 2) expected = [[[[[0., 0.], [0., 0.], [0.2205872, 0.21228028], [0., 0.]], [[0., 0.], [0., 0.], [0.08030722, 0.37700302], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0., 0.], [0., 0.], [0.1153995, 0.11105376]]]], [[[[0., 0.], [0., 0.], [0., 0.], [0.217177, 0.20899851]], [[0., 0.], [0., 0.], [0., 0.], [0.0790657, 0.3711747]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.11823574, 0.1137832], [0., 0.], [0., 0.]]]], [[[[0.08709392, 0.08381412], [0., 0.], [0., 0.], [0., 0.]], [[0.03170751, 0.14885121], [0., 0.], [0., 0.], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.18555663, 0.1785689], [0., 0.], [0., 0.]]]]] self._do_convolutional_test_with_config(config, expected)
def test_flat(self): # config config = LRPConfiguration() config.set(LAYER.CONVOLUTIONAL, FlatConfiguration()) # Shape (3, 2, 2, 4, 2) expected = [[[[[0., 0.], [0., 0.], [0.22254443, 0.22254443], [0., 0.]], [[0., 0.], [0., 0.], [0.22254443, 0.22254443], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0., 0.], [0., 0.], [0.11642342, 0.11642342]]]], [[[[0., 0.], [0., 0.], [0., 0.], [0.21910398, 0.21910398]], [[0., 0.], [0., 0.], [0., 0.], [0.21910398, 0.21910398]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.11928483, 0.11928483], [0., 0.], [0., 0.]]]], [[[[0.08786669, 0.08786669], [0., 0.], [0., 0.], [0., 0.]], [[0.08786669, 0.08786669], [0., 0.], [0., 0.], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.18720304, 0.18720304], [0., 0.], [0., 0.]]]]] self._do_convolutional_test_with_config(config, expected)
def test_alpha_beta_all_bias(self): # config config = LRPConfiguration() config.set( LAYER.CONVOLUTIONAL, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.ALL)) # Shape (3, 2, 2, 4, 2) expected = [[[[[0., 0.], [0., 0.], [0.22762937, 0.61067898], [0., 0.]], [[0., 0.], [0., 0.], [-0.86054762, 0.91241699], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0., 0.], [0., 0.], [0.23134317, 0.64590203]]]], [[[[0., 0.], [0., 0.], [0., 0.], [0.67829868, 0.67245101]], [[0., 0.], [0., 0.], [0., 0.], [-0.84209149, 0.36775772]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.44350914, 0.42183162], [0., 0.], [0., 0.]]]], [[[[-0.33007192, 0.2922468], [0., 0.], [0., 0.], [0., 0.]], [[0.05011517, 0.33917671], [0., 0.], [0., 0.], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.49011103, 0.92719656], [0., 0.], [0., 0.]]]]] self._do_convolutional_test_with_config(config, expected)
def test_alpha_beta_active_bias(self): # config config = LRPConfiguration() config.set( LAYER.CONVOLUTIONAL, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.ACTIVE)) # Shape (3, 2, 2, 4, 2) expected = [[[[[0., 0.], [0., 0.], [0.23750607, 0.62055568], [0., 0.]], [[0., 0.], [0., 0.], [-0.89017772, 0.92229369], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0., 0.], [0., 0.], [0.25841425, 0.67297311]]]], [[[[0., 0.], [0., 0.], [0., 0.], [0.68974016, 0.68389248]], [[0., 0.], [0., 0.], [0., 0.], [-0.87641592, 0.37919919]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.48797808, 0.46630056], [0., 0.], [0., 0.]]]], [[[[-0.35146675, 0.29937841], [0., 0.], [0., 0.], [0., 0.]], [[0.05724678, 0.34630832], [0., 0.], [0., 0.], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.53026941, 0.96735494], [0., 0.], [0., 0.]]]]] self._do_convolutional_test_with_config(config, expected)
def test_epsilon_no_bias(self): # config config = LRPConfiguration() config.set(LAYER.CONVOLUTIONAL, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.NONE)) # Shape (3, 2, 2, 4, 2) expected = [[[[[0., 0.], [0., 0.], [0.10428416, 0.3060324], [0., 0.]], [[0., 0.], [0., 0.], [-0.04751698, 0.46495467], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0., 0.], [0., 0.], [0.10213605, 0.30941548]]]], [[[[0., 0.], [0., 0.], [0., 0.], [0.33713338, 0.33407201]], [[0., 0.], [0., 0.], [0., 0.], [-0.04122655, 0.17455899]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.1995201, 0.18868134], [0., 0.], [0., 0.]]]], [[[[-0.00199118, 0.13619322], [0., 0.], [0., 0.], [0., 0.]], [[0.01444152, 0.15979111], [0., 0.], [0., 0.], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.22497632, 0.44351909], [0., 0.], [0., 0.]]]]] self._do_convolutional_test_with_config(config, expected)
def test_epsilon_all_bias(self): # config config = LRPConfiguration() config.set(LAYER.CONVOLUTIONAL, EpsilonConfiguration()) # Shape (3, 2, 2, 4, 2) expected = [[[[[0., 0.], [0., 0.], [0.11989002, 0.32163827], [0., 0.]], [[0., 0.], [0., 0.], [-0.03191111, 0.48056054], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0., 0.], [0., 0.], [0.11567159, 0.32295102]]]], [[[[0., 0.], [0., 0.], [0., 0.], [0.3551029, 0.35204153]], [[0., 0.], [0., 0.], [0., 0.], [-0.02325702, 0.19252851]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.22175457, 0.21091581], [0., 0.], [0., 0.]]]], [[[[0.00876684, 0.14695124], [0., 0.], [0., 0.], [0., 0.]], [[0.02519954, 0.17054913], [0., 0.], [0., 0.], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.24505551, 0.46359828], [0., 0.], [0., 0.]]]]] self._do_convolutional_test_with_config(config, expected)
def test_softmax(self): with tf.Graph().as_default(): inp = tf.constant([[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]], dtype=tf.float32) out = tf.nn.softmax(inp) config = LRPConfiguration() config.set(LAYER.SOFTMAX, AlphaBetaConfiguration(alpha=2, beta=-1)) expl = lrp.lrp(inp, out, config) with tf.Session() as s: explanation = s.run(expl) expected = np.array([[[[-0.05989202454, -0.162803402, 0.8879363823], [0, 0, 0]], [[0, 0, 0], [-0.05989202454, -0.162803402, 0.8879363823]]], [[[-0.05989202454, -0.162803402, 0.8879363823], [0, 0, 0]], [[0, 0, 0], [-0.05989202454, -0.162803402, 0.8879363823]]]]) # Check if the relevance scores are correct (the correct values are found by # calculating the example by hand) self.assertTrue( np.allclose(expected, explanation, rtol=1e-03, atol=1e-03), msg="Should be a good explanation")
def test_epsilon_active_bias(self): # config config = LRPConfiguration() config.set(LAYER.CONVOLUTIONAL, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.ACTIVE)) # Shape (3, 2, 2, 4, 2) expected = [[[[[0., 0.], [0., 0.], [0.11989002, 0.32163827], [0., 0.]], [[0., 0.], [0., 0.], [-0.03191111, 0.48056054], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0., 0.], [0., 0.], [0.12920713, 0.33648656]]]], [[[[0., 0.], [0., 0.], [0., 0.], [0.3551029, 0.35204153]], [[0., 0.], [0., 0.], [0., 0.], [-0.02325702, 0.19252851]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.24398904, 0.23315028], [0., 0.], [0., 0.]]]], [[[[0.00876684, 0.14695124], [0., 0.], [0., 0.], [0., 0.]], [[0.02519954, 0.17054913], [0., 0.], [0., 0.], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.26513471, 0.48367747], [0., 0.], [0., 0.]]]]] self._do_convolutional_test_with_config(config, expected)
def test_alpha_beta_no_bias(self): # config config = LRPConfiguration() config.set( LAYER.CONVOLUTIONAL, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.NONE)) # Shape (3, 2, 2, 4, 2) expected = [[[[[0., 0.], [0., 0.], [0.19799927, 0.58104888], [0., 0.]], [[0., 0.], [0., 0.], [-0.89017772, 0.88278689], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0., 0.], [0., 0.], [0.20427209, 0.61883095]]]], [[[[0., 0.], [0., 0.], [0., 0.], [0.64397426, 0.63812658]], [[0., 0.], [0., 0.], [0., 0.], [-0.87641592, 0.33343329]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.3990402, 0.37736268], [0., 0.], [0., 0.]]]], [[[[-0.35146675, 0.27085197], [0., 0.], [0., 0.], [0., 0.]], [[0.02872034, 0.31778188], [0., 0.], [0., 0.], [0., 0.]]], [[[0., 0.], [0., 0.], [0., 0.], [0., 0.]], [[0., 0.], [0.44995264, 0.88703817], [0., 0.], [0., 0.]]]]] self._do_convolutional_test_with_config(config, expected)
def test_linear_with_flat(self): # Prepare configuration of linear layer config = LRPConfiguration() config.set(LAYER.LINEAR, FlatConfiguration()) expected_result = [[9.5, 9.5, 9.5, 9.5]] self.do_test_with_config_and_result(expected_result, config)
def test_linear_with_ww(self): # Prepare configuration of linear layer config = LRPConfiguration() config.set(LAYER.LINEAR, WWConfiguration()) expected_result = [[1.80952381, 13.68, 13.75238095, 8.758095238]] self.do_test_with_config_and_result(expected_result, config)
def test_linear_with_epsilon_ignore_bias(self): # Prepare configuration of linear layer config = LRPConfiguration() config.set(LAYER.SPARSE_LINEAR, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.IGNORE, epsilon=1e-12)) config.set(LAYER.LINEAR, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.IGNORE, epsilon=1e-12)) expected_result = [[-5302325581397, 31813953488372, -47720930232555, 21209302325624]] self._do_test(expected_result, config)
def test_linear_with_epsilon_without_bias(self): # Prepare configuration of linear layer config = LRPConfiguration() config.set(LAYER.SPARSE_LINEAR, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.NONE)) config.set(LAYER.LINEAR, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.NONE)) expected_result = [[0., -12, 21, 32]] self._do_test(expected_result, config)
def test_linear_with_beta_ignore_bias(self): # Prepare configuration of linear layer config = LRPConfiguration() config.set(LAYER.SPARSE_LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.IGNORE)) config.set(LAYER.LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.IGNORE)) expected_result = [[-83.6, 22.8, -57.79534884, 156.5953488]] self._do_test(expected_result, config)
def test_linear_lrp_alpha_beta_active_bias(self): # Prepare configuration of linear layer config = LRPConfiguration() config.set(LAYER.SPARSE_LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.ACTIVE)) config.set(LAYER.LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.ACTIVE)) expected_result = [[-83.6, 21.92307692, -47.5372549, 147.214178]] self._do_test(expected_result, config)
def test_flat(self): config = LRPConfiguration() config.set(LAYER.LSTM, FlatConfiguration()) expected_result = np.array([[[[0.07106609, 0.07106609, 0.07106609], [0., 0., 0.]], [[0.06505566, 0.06505566, 0.06505566], [0.09293666, 0.09293666, 0.09293666]]]]) self._do_test_with_config_and_expected_result(config, expected_result)
def test_linear_with_beta_no_bias(self): # Prepare configuration of linear layer config = LRPConfiguration() config.set(LAYER.SPARSE_LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1)) config.set(LAYER.LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1)) expected_result = [[-80.14545455, 9.566433566, -28.36791444, 125.5933087]] self._do_test(expected_result, config)
def test_linear_lrp_epsilon_active_bias(self): # Prepare configuration of linear layer config = LRPConfiguration() config.set(LAYER.SPARSE_LINEAR, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.ACTIVE)) config.set(LAYER.LINEAR, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.ACTIVE)) expected_result = [[1.340986395, -19.125, 28.75255102, 27.03146259]] self._do_test(expected_result, config)
def test_ww(self): config = LRPConfiguration() config.set(LAYER.LSTM, WWConfiguration()) expected_result = np.array([[[[0.26074915, 0.0391348, 0.01872167], [0., 0., 0.]], [[0.09823269, 0.04826224, 0.02665878], [0.04887697, 0.05858786, 0.03348749]]]]) self._do_test_with_config_and_expected_result(config, expected_result)
def test_lstm_epsilon_all_bias(self): config = LRPConfiguration() config.set(LAYER.LSTM, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.ALL)) expected_result = np.array([[[[0.23963733, 0.00458281, 0.37158183], [0., 0., 0.]], [[0.10851287, -0.22616417, 0.89225635], [0.13618791, 0.2129638, 0.01552182]]]]) self._do_test_with_config_and_expected_result(config, expected_result)
def test_lstm_epsilon_active_bias(self): config = LRPConfiguration() config.set(LAYER.LSTM, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.ACTIVE)) expected_result = np.array([[[[0.27125698, 0.03620247, 0.40320148], [0., 0., 0.]], [[0.03854221, -0.29613484, 0.82228568], [0.13618791, 0.2129638, 0.01552182]]]]) self._do_test_with_config_and_expected_result(config, expected_result)
def test_lstm_epsilon_no_bias(self): config = LRPConfiguration() config.set(LAYER.LSTM, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.NONE)) expected_result = np.array([[[[0.19220784, -0.04284667, 0.32415234], [0., 0., 0.]], [[0.23325387, -0.13045811, 1.0724654], [0.16538274, 0.24215863, 0.04471665]]]]) self._do_test_with_config_and_expected_result(config, expected_result)
def test_linear_with_epsilon_and_bias(self): # Prepare configuration of linear layer config = LRPConfiguration() config.set(LAYER.LINEAR, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.ALL)) expected_result = [[ 1.333333333, -14.06802721, 21.0521542, 29.68253968 ]] self.do_test_with_config_and_result(expected_result, config)
def test_flat(self): g = tf.Graph() with g.as_default(): inp = tf.placeholder(tf.float32, (3, 5)) W = tf.constant([[1.493394546, 0.5987773779], [0.7321155851, 1.23063763], [2.488971816, 0.9885881838], [0.9965223115, 0.8397688134], [2.089138346, 0.8398492639]] , dtype=tf.float32) b = [[2.665864718, 0.8793648172]] to_normalize = inp @ W + b x = tf.contrib.layers.batch_norm(to_normalize, is_training=False, scale=True) vars = tf.global_variables() beta = next(i for i in vars if 'beta' in i.name) gamma = next(i for i in vars if 'gamma' in i.name) mean = next(i for i in vars if 'mean' in i.name) variance = next(i for i in vars if 'variance' in i.name) b = tf.constant([0.8481817169, -1.118752611], dtype=tf.float32) assign_beta = tf.assign(beta, b) g = tf.constant([0.1005506696, 0.308355701], dtype=tf.float32) assign_gamma = tf.assign(gamma, g) m = tf.constant([-0.8224766215, 0.9257031289], dtype=tf.float32) assign_mean = tf.assign(mean, m) v = tf.constant([0.7134228722, 1.065337135], dtype=tf.float32) assign_variance = tf.assign(variance, v) # Get the explanation config = LRPConfiguration() config.set(LAYER.LINEAR, EpsilonConfiguration(bias_strategy=BIAS_STRATEGY.NONE)) config.set(LAYER.ELEMENTWISE_LINEAR, BaseConfiguration(RULE.IDENTITY)) explanation = lrp.lrp(inp, x, config) with tf.Session() as s: s.run(tf.global_variables_initializer()) s.run([assign_beta, assign_gamma, assign_mean, assign_variance]) expected_relevances = np.array([[0.3331443396, 0.5080297851, 0.2213932963, 0.1821642756, 0.832728544], [0.3800239444, 0.1179592254, 0.5207348458, 0.5280974564, 0.3505242006], [0.5231591662, 0.0895191051, 0.6016423127, 0.423324388, -0.1957729137]] ) out, relevances = s.run([x, explanation], feed_dict={inp: [[1.187187323, 3.692928471, 0.4733755909, 0.9728313491, 2.121278175], [1.302276662, 0.8245540266, 1.070689437, 2.712026357, 0.8586529453], [1.57555465, 0.5499336234, 1.087157802, 1.910559011, -0.4214631393]]}) self.assertTrue(np.allclose(expected_relevances, relevances, rtol=1e-03, atol=1e-03), msg="The relevances do not match the expected")
def test_lstm_alpha_beta_active_bias(self): config = LRPConfiguration() config.set( LAYER.LSTM, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.ACTIVE)) expected_result = np.array([[[[1.17243838, -1.42132187, 1.67020536], [0., 0., 0.]], [[1.47216972, -3.08726835, 4.70236698], [0.52473047, 0.76832693, 0.14187811]]]]) self._do_test_with_config_and_expected_result(config, expected_result)
def test_lstm_alpha_beta_all_bias(self): config = LRPConfiguration() config.set( LAYER.LSTM, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.ALL)) expected_result = np.array([[[[0.90404336, -1.24239185, 1.40181034], [0., 0., 0.]], [[0.58241955, -1.00368592, 3.04841668], [0.30076316, 0.54435962, -0.08208919]]]]) self._do_test_with_config_and_expected_result(config, expected_result)
def test_lstm_alpha_beta_no_bias(self): config = LRPConfiguration() config.set( LAYER.LSTM, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.NONE)) expected_result = np.array([[[[0.72511334, -1.42132187, 1.22288032], [0., 0., 0.]], [[1.17308824, -1.30891597, 4.40328549], [0.52473047, 0.76832693, 0.14187811]]]]) self._do_test_with_config_and_expected_result(config, expected_result)
def test_linear_lrp_alpha_beta_equal_bias(self): # Prepare configuration of linear layer config = LRPConfiguration() config.set( LAYER.LINEAR, AlphaBetaConfiguration(alpha=2, beta=-1, bias_strategy=BIAS_STRATEGY.ALL)) expected_result = [[ -70.90120207, 14.94277618, -27.09395311, 121.052379 ]] self.do_test_with_config_and_result(expected_result, config)
def test_alpha_beta_ignore_bias(self): config = LRPConfiguration() config.set(LAYER.LINEAR, AlphaBetaConfiguration(bias_strategy=BIAS_STRATEGY.IGNORE)) expected_result = [ [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], [[0.05056302043, 0.001782476642, 0.4564389581, 0, 0.2856364171], [ 0.0006553275046, 0.2230518547, 0.1905802792, 0.09459906389, 0.08207159722 ]], [[0, 0, 0, 0, 0], [0.1729084504, 0, 0.0773933278, 0.2900907282, 0.006641839018]] ] self._do_linear_test(config, np.array(expected_result))
def test_conv_zb_active_bias(self): expected_result = [[[[0, 0], [0, 0], [-4.232906357e-05, -4.264118594e-05]], [[0, 0], [0, 0], [-0.0002441027169, -0.0001287812181]], [[0.0511805643, 0.03121454034], [0.1487963475, 0.1636981056], [-0.001490325286, -0.0005114089294]]]] config = LRPConfiguration() config.set( LAYER.LINEAR, EpsilonConfiguration(epsilon=1, bias_strategy=BIAS_STRATEGY.IGNORE)) config.set_first_layer_zb(_c(-0.5), _c(2.), bias_strategy=BIAS_STRATEGY.ACTIVE) self._do_convolutional_test(config, expected_result)