Exemple #1
0
 def test_influences_from(self):
     ig = InfluenceGraph()
     ig.add_node(1, activation=12)
     ig.add_node(2, activation=13)
     ig.add_node(3, activation=0)
     ig.add_influence(1, 2, thing=1)
     ig.add_influence(2, 3, thing=2)
     self.assertEqual(ig.influences_from(1), [(1, 2, {'thing': 1})])
Exemple #2
0
 def test_typed_nodes(self):
     ig = InfluenceGraph()
     ig.add_node(1, activation=12)
     ig.add_node(2, activation=13)
     ig.add_node(3, activation=0)
     ig.add_influence(1, 2, thing=1)
     ig.add_influence(2, 3, thing=2)
     start, mid, terminal = ig.get_typed_nodes()
     self.assertEqual(start, [1])
     self.assertEqual(mid, [2])
     self.assertEqual(terminal, [3])
Exemple #3
0
 def test_add_node(self):
     ig = InfluenceGraph()
     ig.add_node(1, activation=12)
     ig.add_node(2, activation=13)
     ig.add_node(3, activation=0)
     self.assertEqual(ig.nodes(), {
         1: {
             'activation': 12
         },
         2: {
             'activation': 13
         },
         3: {
             'activation': 0
         }
     })
Exemple #4
0
    def apply(self, model, x):
        influences = InfluenceGraph()
        x_tensor = tf.convert_to_tensor([x], dtype=tf.float32)

        # Calculate d output / d x_tensor
        model_headless = Sequential(model.layers[:-1])
        with tf.GradientTape() as g:
            g.watch(x_tensor)
            output = tf.squeeze(model_headless(x_tensor))
        gradients = tf.squeeze(g.gradient(output, x_tensor))
        pred_label = f"Predicted {tf.math.argmax(output).numpy()}"
        influences.add_node(pred_label)

        for i, feature in enumerate(x):
            influences.add_node(f"Feature {i}",
                                grad=gradients[i].numpy(),
                                value=feature)
            influences.add_influence(f"Feature {i}", pred_label)

        return influences, int(tf.math.argmax(output).numpy()), 0.7
Exemple #5
0
    def apply(self, model, x):
        influences = InfluenceGraph()
        x_tensor = tf.convert_to_tensor([x], dtype=tf.float32)

        # Calculate d output / d x_tensor
        model_headless = Sequential(model.layers[:-1])
        with tf.GradientTape() as g:
            g.watch(x_tensor)
            output = tf.squeeze(model_headless(x_tensor))
        gradients = tf.squeeze(g.gradient(output, x_tensor))

        pred_label = tf.math.argmax(output).numpy()
        confidence = max(keras.backend.softmax(output.numpy()))
        influences.add_node('Prediction')

        influences.add_node('Input', grad=0)

        for i, feature in enumerate(x):
            influences.add_node(f'Feature {i}',
                                grad=gradients[i].numpy(),
                                value=feature,
                                idx=i)
            influences.add_influence(f'Feature {i}', 'Prediction')
            influences.add_influence('Input', f'Feature {i}')

        return influences, str(pred_label), confidence
Exemple #6
0
 def apply(self, model, x):
     influences = InfluenceGraph()
     influences.add_node('has leaves', value=1)
     influences.add_node('orange plant', value=2)
     influences.add_node('carrot')
     influences.add_influence('has leaves', 'orange plant')
     influences.add_influence('orange plant', 'carrot')
     return influences, 'carrot', 0.9
Exemple #7
0
 def test_remove_node(self):
     ig = InfluenceGraph()
     ig.add_node(1, activation=12)
     ig.add_node(2, activation=13)
     ig.remove_node(2)
     self.assertEqual(ig.nodes(), {1: {'activation': 12}})
Exemple #8
0
    def apply(self, model, x):
        influences = InfluenceGraph()

        layers = copy.copy(model.layers)
        layers.reverse()

        classifier_layers = []
        last_conv_layer = None

        for layer in layers:
            if not (isinstance(layer, keras.layers.Conv2D)):
                classifier_layers.append(layer)
            else:
                last_conv_layer = layer
                break
        classifier_layers.reverse()

        if last_conv_layer is None:
            raise Exception('Could not detect any convolutional layers')

        relevant_features = self.get_relevant(model, x, last_conv_layer,
                                              classifier_layers)

        preds = model.predict(x)
        influences.add_node('Prediction')
        if self.decoder_function is not None:
            _, predicted_class, confidence = decode_predictions(preds,
                                                                top=1)[0][0]
        else:
            predicted_class, confidence = default_decoder_function(
                preds, model, x)

        influences.add_node('Input', grad=0)

        for layer, idx, grad in relevant_features:
            influences.add_node(f'Filter {idx}',
                                layer=layer,
                                filter_idx=idx,
                                grad=grad)
            influences.add_influence(f'Filter {idx}', 'Prediction')
            influences.add_influence('Input', f'Filter {idx}')

        return influences, predicted_class, confidence