def test_should_save_output_grid(output_dir):
    grid = np.random.random((208, 208))

    explainer = ExtractActivations()
    explainer.save(grid, output_dir, "output.png")

    assert len(list(output_dir.glob("output.png"))) == 1
示例#2
0
def save_all_explainer(validation_data,
                       model,
                       name_conv,
                       n_conv=1,
                       dir_save_im='./',
                       save_name='outputs'):
    explainerGradCam = GradCAM()
    explainerActiv = ExtractActivations()
    explainerOccl = OcclusionSensitivity()
    explainerSmoothGrad = SmoothGrad()

    for i in range(1, n_conv + 1):
        output = explainerActiv.explain(validation_data, model,
                                        '{}_{}'.format(name_conv, i))
        explainerActiv.save(output, dir_save_im,
                            '{}-activ-conv{}.jpg'.format(save_name, i))

        output = explainerGradCam.explain(validation_data, model,
                                          '{}_{}'.format(name_conv, i), 0)
        explainerGradCam.save(output, dir_save_im,
                              '{}-gradCam0-conv{}.jpg'.format(save_name, i))

    output = explainerSmoothGrad.explain(validation_data, model, 0)
    explainerSmoothGrad.save(output, dir_save_im,
                             '{}-smooth0.jpg'.format(save_name))

    output = explainerSmoothGrad.explain(validation_data, model, 1)
    explainerSmoothGrad.save(output, dir_save_im,
                             '{}-smooth1.jpg'.format(save_name))

    output = explainerOccl.explain(validation_data, model, 0, 5)
    explainerOccl.save(output, dir_save_im,
                       '{}-occlSens0.jpg'.format(save_name))
    output = explainerOccl.explain(validation_data, model, 1, 5)
    explainerOccl.save(output, dir_save_im,
                       '{}-occlSens1.jpg'.format(save_name))
示例#3
0
import numpy as np
import tensorflow as tf

from tf_explain.core.activations import ExtractActivations

target_layers = ['activation_6']  # Could be either the output of a Conv2D, or an activation
IMAGE_PATH = './cat.jpg'

if __name__ == '__main__':
    model = tf.keras.applications.resnet50.ResNet50(weights='imagenet', include_top=True)

    img = tf.keras.preprocessing.image.load_img(IMAGE_PATH, target_size=(224, 224))
    img = tf.keras.preprocessing.image.img_to_array(img)

    model.summary()
    data = (np.array([img]), None)

    explainer = ExtractActivations()
    # Compute Activations of layer activation_1
    grid = explainer.explain(data, model, target_layers)
    explainer.save(grid, '.', 'activations.png')
index = np.random.randint(0, len(X_test_reshaped))
# image = input_test[index].reshape((1, 32, 32, 3))
# image = np.expand_dims(X_test_reshaped[index],0)
image = X_test_reshaped[index:index + 10]
label = image
print('val:', image.shape)

data = ([image])
explainer = ExtractActivations()

layers_of_interest = ['conv2d_1']
grid = explainer.explain(validation_data=data,
                         model=complete_model,
                         layers_name=['conv2d_1'])
print(grid.shape)
explainer.save(grid, '.', 'conv2d_1.png')

grid = explainer.explain(validation_data=data,
                         model=complete_model,
                         layers_name=['conv2d_2'])
print(grid.shape)
explainer.save(grid, '.', 'conv2d_2.png')

grid = explainer.explain(validation_data=data,
                         model=complete_model,
                         layers_name=['conv2d_3'])
print(grid.shape)
explainer.save(grid, '.', 'conv2d_3.png')

grid = explainer.explain(validation_data=data,
                         model=complete_model,
import numpy as np
import tensorflow as tf

from tf_explain.core.activations import ExtractActivations

target_layers = ["conv1_relu"
                 ]  # Could be either the output of a Conv2D, or an activation
IMAGE_PATH = "./cat.jpg"

if __name__ == "__main__":
    model = tf.keras.applications.resnet50.ResNet50(weights="imagenet",
                                                    include_top=True)

    img = tf.keras.preprocessing.image.load_img(IMAGE_PATH,
                                                target_size=(224, 224))
    img = tf.keras.preprocessing.image.img_to_array(img)

    model.summary()
    data = (np.array([img]), None)

    explainer = ExtractActivations()
    # Compute Activations of layer activation_1
    grid = explainer.explain(data, model, target_layers)
    explainer.save(grid, ".", "activations.png")
示例#6
0
# Fit data to model
history = model.fit(input_train,
                    target_train,
                    batch_size=batch_size,
                    epochs=no_epochs,
                    verbose=verbosity,
                    validation_split=validation_split)
#callbacks=callbacks)

# In[10]:

# Define the Activation Visualization explainer
index = 250
image = input_test[index].reshape((1, 32, 32, 3))
label = target_test[index]
data = ([image], [label])
explainer = ExtractActivations()
grid = explainer.explain(data, model, layers_name='visualization_layer')
explainer.save(grid, '.', 'act.png')

# In[11]:

# Generate generalization metrics
score = model.evaluate(input_test, target_test, verbose=0)
print(f'Test loss: {score[0]} / Test accuracy: {score[1]}')

# In[ ]:

# In[ ]:
示例#7
0
 def extract_activations(self, value, index):
     extract_activations = ExtractActivations()
     grid = extract_activations.explain((value, self.setY[index]),
                                        self.model, self.target_layers)
     name = "n_{}_gradients_input.png".format(index)
     extract_activations.save(grid, ".", sg.PATH_ACTIVATION + name)