def test_should_extract_activations(random_data, convolutional_model, mocker):
    non_normalized_grid = np.array([[1, 2], [1, 2]])
    mocker.patch("tf_explain.core.activations.filter_display",
                 return_value=non_normalized_grid)
    explainer = ExtractActivations()
    grid = explainer.explain(random_data, convolutional_model,
                             ["activation_1"])

    expected_output = np.array([[0, 255], [0, 255]]).astype("uint8")

    np.testing.assert_array_equal(grid, expected_output)
    def on_epoch_end(self, epoch, logs=None):
        """
        Draw activations outputs at each epoch end to Tensorboard.

        Args:
            epoch (int): Epoch index
            logs (dict): Additional information on epoch
        """
        explainer = ExtractActivations()
        grid = explainer.explain(self.validation_data, self.model,
                                 self.layers_name)

        # Using the file writer, log the reshaped image.
        with self.file_writer.as_default():
            tf.summary.image(
                "Activations Visualization",
                np.array([np.expand_dims(grid, axis=-1)]),
                step=epoch,
            )
示例#3
0
def save_all_explainer(validation_data,
                       model,
                       name_conv,
                       n_conv=1,
                       dir_save_im='./',
                       save_name='outputs'):
    explainerGradCam = GradCAM()
    explainerActiv = ExtractActivations()
    explainerOccl = OcclusionSensitivity()
    explainerSmoothGrad = SmoothGrad()

    for i in range(1, n_conv + 1):
        output = explainerActiv.explain(validation_data, model,
                                        '{}_{}'.format(name_conv, i))
        explainerActiv.save(output, dir_save_im,
                            '{}-activ-conv{}.jpg'.format(save_name, i))

        output = explainerGradCam.explain(validation_data, model,
                                          '{}_{}'.format(name_conv, i), 0)
        explainerGradCam.save(output, dir_save_im,
                              '{}-gradCam0-conv{}.jpg'.format(save_name, i))

    output = explainerSmoothGrad.explain(validation_data, model, 0)
    explainerSmoothGrad.save(output, dir_save_im,
                             '{}-smooth0.jpg'.format(save_name))

    output = explainerSmoothGrad.explain(validation_data, model, 1)
    explainerSmoothGrad.save(output, dir_save_im,
                             '{}-smooth1.jpg'.format(save_name))

    output = explainerOccl.explain(validation_data, model, 0, 5)
    explainerOccl.save(output, dir_save_im,
                       '{}-occlSens0.jpg'.format(save_name))
    output = explainerOccl.explain(validation_data, model, 1, 5)
    explainerOccl.save(output, dir_save_im,
                       '{}-occlSens1.jpg'.format(save_name))
示例#4
0
import numpy as np
import tensorflow as tf

from tf_explain.core.activations import ExtractActivations

target_layers = ['activation_6']  # Could be either the output of a Conv2D, or an activation
IMAGE_PATH = './cat.jpg'

if __name__ == '__main__':
    model = tf.keras.applications.resnet50.ResNet50(weights='imagenet', include_top=True)

    img = tf.keras.preprocessing.image.load_img(IMAGE_PATH, target_size=(224, 224))
    img = tf.keras.preprocessing.image.img_to_array(img)

    model.summary()
    data = (np.array([img]), None)

    explainer = ExtractActivations()
    # Compute Activations of layer activation_1
    grid = explainer.explain(data, model, target_layers)
    explainer.save(grid, '.', 'activations.png')
from tf_explain.core.activations import ExtractActivations

# Define the Activation Visualization explainer
index = np.random.randint(0, len(X_test_reshaped))
# image = input_test[index].reshape((1, 32, 32, 3))
# image = np.expand_dims(X_test_reshaped[index],0)
image = X_test_reshaped[index:index + 10]
label = image
print('val:', image.shape)

data = ([image])
explainer = ExtractActivations()

layers_of_interest = ['conv2d_1']
grid = explainer.explain(validation_data=data,
                         model=complete_model,
                         layers_name=['conv2d_1'])
print(grid.shape)
explainer.save(grid, '.', 'conv2d_1.png')

grid = explainer.explain(validation_data=data,
                         model=complete_model,
                         layers_name=['conv2d_2'])
print(grid.shape)
explainer.save(grid, '.', 'conv2d_2.png')

grid = explainer.explain(validation_data=data,
                         model=complete_model,
                         layers_name=['conv2d_3'])
print(grid.shape)
explainer.save(grid, '.', 'conv2d_3.png')
示例#6
0
def extractActivations(X, model, layer_name):
    explainerActiv = ExtractActivations()
    outputs = explainerActiv.explain((X, None), model, layer_name)
    return outputs
import numpy as np
import tensorflow as tf

from tf_explain.core.activations import ExtractActivations

layers_name = ['activation_6']
IMAGE_PATH = './cat.jpg'

if __name__ == '__main__':
    model = tf.keras.applications.resnet50.ResNet50(weights='imagenet',
                                                    include_top=True)

    img = tf.keras.preprocessing.image.load_img(IMAGE_PATH,
                                                target_size=(224, 224))
    img = tf.keras.preprocessing.image.img_to_array(img)

    model.summary()
    data = (np.array([img]), None)

    explainer = ExtractActivations()
    # Compute Activations of layer activation_1
    grid = explainer.explain(data, model, ['activation_6'])
    explainer.save(grid, '.', 'activations.png')
示例#8
0
# Fit data to model
history = model.fit(input_train,
                    target_train,
                    batch_size=batch_size,
                    epochs=no_epochs,
                    verbose=verbosity,
                    validation_split=validation_split)
#callbacks=callbacks)

# In[10]:

# Define the Activation Visualization explainer
index = 250
image = input_test[index].reshape((1, 32, 32, 3))
label = target_test[index]
data = ([image], [label])
explainer = ExtractActivations()
grid = explainer.explain(data, model, layers_name='visualization_layer')
explainer.save(grid, '.', 'act.png')

# In[11]:

# Generate generalization metrics
score = model.evaluate(input_test, target_test, verbose=0)
print(f'Test loss: {score[0]} / Test accuracy: {score[1]}')

# In[ ]:

# In[ ]:
示例#9
0
def visualise_activations(model, input_data, datadir):
    '''
    function: to visualize output after a specific layer of model
    param: layer_name - name of layer of model whose output is needed
           model - saved model
    returns: string file-path to output plot OR list of file-paths to plots
    '''
    if type(input_data) != list:
        image = load_img(input_data, target_size=(32, 32))
        image = img_to_array(image)
        image_l = [image]
        explainer = ExtractActivations()
        arr = []
        for i in range(4):
            grid = explainer.explain((np.array(image_l), None), model,
                                     [model.layers[i].name])
            cv.applyColorMap(grid, cv.COLORMAP_HOT)
            grid = cv.resize(grid, dsize=(400, 400))
            grid = cv.copyMakeBorder(grid,
                                     80,
                                     1,
                                     1,
                                     1,
                                     cv.BORDER_CONSTANT,
                                     value=[255, 255, 255])
            cv.putText(grid,
                       'layer ' + str(i + 1) + ' : ' + model.layers[i].name,
                       (0, 50), cv.FONT_HERSHEY_COMPLEX, 0.6, (0, 0, 0))
            arr.append(grid)
        final = np.concatenate(arr, axis=0)
        plot_save_path = datadir + 'AL0' + '.png'
        cv.imwrite(plot_save_path, final)

        return plot_save_path

    else:
        image_l = input_data

        explainer = ExtractActivations()
        plot_save_path_l = []
        for i in range(len(image_l)):
            arr = []
            for i in range(4):
                grid = explainer.explain((np.array(image_l), None), model,
                                         [model.layers[i].name])
                cv.applyColorMap(grid, cv.COLORMAP_HOT)
                grid = cv.resize(grid, dsize=(400, 400))
                grid = cv.copyMakeBorder(grid,
                                         80,
                                         1,
                                         1,
                                         1,
                                         cv.BORDER_CONSTANT,
                                         value=[255, 255, 255])
                cv.putText(
                    grid, 'layer ' + str(i + 1) + ' : ' + model.layers[i].name,
                    (0, 50), cv.FONT_HERSHEY_COMPLEX, 0.6, (0, 0, 0))
                arr.append(grid)
            final = np.concatenate(arr, axis=0)
            cv.applyColorMap(final, cv.COLORMAP_HOT)
            grid = cv.resize(final, dsize=(400, 400))
            plot_save_path = datadir + 'AL' + str(i) + '.png'
            cv.imwrite(plot_save_path, final)
            plot_save_path_l.append(plot_save_path)

        return plot_save_path_l
示例#10
0
 def extract_activations(self, value, index):
     extract_activations = ExtractActivations()
     grid = extract_activations.explain((value, self.setY[index]),
                                        self.model, self.target_layers)
     name = "n_{}_gradients_input.png".format(index)
     extract_activations.save(grid, ".", sg.PATH_ACTIVATION + name)