コード例 #1
0
def test_should_save_output_grid(output_dir):
    grid = np.random.random((208, 208))

    explainer = ExtractActivations()
    explainer.save(grid, output_dir, "output.png")

    assert len(list(output_dir.glob("output.png"))) == 1
コード例 #2
0
def test_should_extract_activations(random_data, convolutional_model, mocker):
    non_normalized_grid = np.array([[1, 2], [1, 2]])
    mocker.patch("tf_explain.core.activations.filter_display",
                 return_value=non_normalized_grid)
    explainer = ExtractActivations()
    grid = explainer.explain(random_data, convolutional_model,
                             ["activation_1"])

    expected_output = np.array([[0, 255], [0, 255]]).astype("uint8")

    np.testing.assert_array_equal(grid, expected_output)
コード例 #3
0
    def on_epoch_end(self, epoch, logs=None):
        """
        Draw activations outputs at each epoch end to Tensorboard.

        Args:
            epoch (int): Epoch index
            logs (dict): Additional information on epoch
        """
        explainer = ExtractActivations()
        grid = explainer.explain(self.validation_data, self.model,
                                 self.layers_name)

        # Using the file writer, log the reshaped image.
        with self.file_writer.as_default():
            tf.summary.image(
                "Activations Visualization",
                np.array([np.expand_dims(grid, axis=-1)]),
                step=epoch,
            )
コード例 #4
0
def save_all_explainer(validation_data,
                       model,
                       name_conv,
                       n_conv=1,
                       dir_save_im='./',
                       save_name='outputs'):
    explainerGradCam = GradCAM()
    explainerActiv = ExtractActivations()
    explainerOccl = OcclusionSensitivity()
    explainerSmoothGrad = SmoothGrad()

    for i in range(1, n_conv + 1):
        output = explainerActiv.explain(validation_data, model,
                                        '{}_{}'.format(name_conv, i))
        explainerActiv.save(output, dir_save_im,
                            '{}-activ-conv{}.jpg'.format(save_name, i))

        output = explainerGradCam.explain(validation_data, model,
                                          '{}_{}'.format(name_conv, i), 0)
        explainerGradCam.save(output, dir_save_im,
                              '{}-gradCam0-conv{}.jpg'.format(save_name, i))

    output = explainerSmoothGrad.explain(validation_data, model, 0)
    explainerSmoothGrad.save(output, dir_save_im,
                             '{}-smooth0.jpg'.format(save_name))

    output = explainerSmoothGrad.explain(validation_data, model, 1)
    explainerSmoothGrad.save(output, dir_save_im,
                             '{}-smooth1.jpg'.format(save_name))

    output = explainerOccl.explain(validation_data, model, 0, 5)
    explainerOccl.save(output, dir_save_im,
                       '{}-occlSens0.jpg'.format(save_name))
    output = explainerOccl.explain(validation_data, model, 1, 5)
    explainerOccl.save(output, dir_save_im,
                       '{}-occlSens1.jpg'.format(save_name))
コード例 #5
0
def extractActivations(X, model, layer_name):
    explainerActiv = ExtractActivations()
    outputs = explainerActiv.explain((X, None), model, layer_name)
    return outputs
コード例 #6
0
import numpy as np
import tensorflow as tf

from tf_explain.core.activations import ExtractActivations

layers_name = ['activation_6']
IMAGE_PATH = './cat.jpg'

if __name__ == '__main__':
    model = tf.keras.applications.resnet50.ResNet50(weights='imagenet',
                                                    include_top=True)

    img = tf.keras.preprocessing.image.load_img(IMAGE_PATH,
                                                target_size=(224, 224))
    img = tf.keras.preprocessing.image.img_to_array(img)

    model.summary()
    data = (np.array([img]), None)

    explainer = ExtractActivations()
    # Compute Activations of layer activation_1
    grid = explainer.explain(data, model, ['activation_6'])
    explainer.save(grid, '.', 'activations.png')
コード例 #7
0
# Fit data to model
history = model.fit(input_train,
                    target_train,
                    batch_size=batch_size,
                    epochs=no_epochs,
                    verbose=verbosity,
                    validation_split=validation_split)
#callbacks=callbacks)

# In[10]:

# Define the Activation Visualization explainer
index = 250
image = input_test[index].reshape((1, 32, 32, 3))
label = target_test[index]
data = ([image], [label])
explainer = ExtractActivations()
grid = explainer.explain(data, model, layers_name='visualization_layer')
explainer.save(grid, '.', 'act.png')

# In[11]:

# Generate generalization metrics
score = model.evaluate(input_test, target_test, verbose=0)
print(f'Test loss: {score[0]} / Test accuracy: {score[1]}')

# In[ ]:

# In[ ]:
コード例 #8
0
def visualise_activations(model, input_data, datadir):
    '''
    function: to visualize output after a specific layer of model
    param: layer_name - name of layer of model whose output is needed
           model - saved model
    returns: string file-path to output plot OR list of file-paths to plots
    '''
    if type(input_data) != list:
        image = load_img(input_data, target_size=(32, 32))
        image = img_to_array(image)
        image_l = [image]
        explainer = ExtractActivations()
        arr = []
        for i in range(4):
            grid = explainer.explain((np.array(image_l), None), model,
                                     [model.layers[i].name])
            cv.applyColorMap(grid, cv.COLORMAP_HOT)
            grid = cv.resize(grid, dsize=(400, 400))
            grid = cv.copyMakeBorder(grid,
                                     80,
                                     1,
                                     1,
                                     1,
                                     cv.BORDER_CONSTANT,
                                     value=[255, 255, 255])
            cv.putText(grid,
                       'layer ' + str(i + 1) + ' : ' + model.layers[i].name,
                       (0, 50), cv.FONT_HERSHEY_COMPLEX, 0.6, (0, 0, 0))
            arr.append(grid)
        final = np.concatenate(arr, axis=0)
        plot_save_path = datadir + 'AL0' + '.png'
        cv.imwrite(plot_save_path, final)

        return plot_save_path

    else:
        image_l = input_data

        explainer = ExtractActivations()
        plot_save_path_l = []
        for i in range(len(image_l)):
            arr = []
            for i in range(4):
                grid = explainer.explain((np.array(image_l), None), model,
                                         [model.layers[i].name])
                cv.applyColorMap(grid, cv.COLORMAP_HOT)
                grid = cv.resize(grid, dsize=(400, 400))
                grid = cv.copyMakeBorder(grid,
                                         80,
                                         1,
                                         1,
                                         1,
                                         cv.BORDER_CONSTANT,
                                         value=[255, 255, 255])
                cv.putText(
                    grid, 'layer ' + str(i + 1) + ' : ' + model.layers[i].name,
                    (0, 50), cv.FONT_HERSHEY_COMPLEX, 0.6, (0, 0, 0))
                arr.append(grid)
            final = np.concatenate(arr, axis=0)
            cv.applyColorMap(final, cv.COLORMAP_HOT)
            grid = cv.resize(final, dsize=(400, 400))
            plot_save_path = datadir + 'AL' + str(i) + '.png'
            cv.imwrite(plot_save_path, final)
            plot_save_path_l.append(plot_save_path)

        return plot_save_path_l
コード例 #9
0
def test_should_generate_subgraph(convolutional_model):
    activations_model = ExtractActivations.generate_activations_graph(
        convolutional_model, ["activation_1"])

    assert activations_model.layers[-1].name == "activation_1"
コード例 #10
0
def explain_tfexplain():

    global graph
    data = {"success": "failed"}
#      # ensure an image was properly uploaded to our endpoint
    if flask.request.method == "POST":
        if flask.request.form.get("image"):
            
            
            

            explainer = request.args.get("explainer")
            #with graph.as_default():
            model_path = flask.request.form.get("model_path")
            model = load_model(model_path)
           
            
#                  # read the image in PIL format
            image64 = flask.request.form.get("image")
            image = base64.b64decode(image64)
            image = Image.open(io.BytesIO(image))
            image = prepare_image(image, target=(224, 224))
            image = image*(1./255)
            #img = tf.keras.preprocessing.image.img_to_array(image)
            prediction = model.predict(image)
            topClass = getTopXpredictions(prediction, 1)
            print(topClass[0])
            image = np.squeeze(image)
            

            if explainer == "GRADCAM":
                im = ([image], None)
                from tf_explain.core.grad_cam import GradCAM
                exp = GradCAM()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])  
                #exp.save(imgFinal, ".", "grad_cam.png")  
                   
            elif explainer == "OCCLUSIONSENSITIVITY":
                im = ([image], None)
                from tf_explain.core.occlusion_sensitivity import OcclusionSensitivity
                exp = OcclusionSensitivity()   
                imgFinal = exp.explain(im, model,class_index=topClass[0][0], patch_size=10)  
                #exp.save(imgFinal, ".", "grad_cam.png")  

            elif explainer == "GRADIENTSINPUTS":
                im = (np.array([image]), None)
                from tf_explain.core.gradients_inputs import GradientsInputs
                exp = GradientsInputs()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")

            elif explainer == "VANILLAGRADIENTS":
                im = (np.array([image]), None)
                from tf_explain.core.vanilla_gradients import VanillaGradients
                exp = VanillaGradients()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")

            elif explainer == "SMOOTHGRAD":
                im = (np.array([image]), None)
                from tf_explain.core.smoothgrad  import SmoothGrad
                exp = SmoothGrad()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")

            elif explainer == "INTEGRATEDGRADIENTS":
                im = (np.array([image]), None)
                from tf_explain.core.integrated_gradients  import IntegratedGradients
                exp = IntegratedGradients()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")
            
            elif explainer == "ACTIVATIONVISUALIZATION":
                #need some solution to find out and submit layers name
                im = (np.array([image]), None)
                from tf_explain.core.activations  import ExtractActivations
                exp = ExtractActivations()
                imgFinal = exp.explain(im, model, layers_name=["activation_1"])
                #exp.save(imgFinal, ".", "gradients_inputs.png")


         




          

            img = pilimage.fromarray(imgFinal)
            imgByteArr = inputoutput.BytesIO()
            img.save(imgByteArr, format='JPEG')
            imgByteArr = imgByteArr.getvalue()

            img64 = base64.b64encode(imgByteArr)
            img64_string = img64.decode("utf-8")

            data["explanation"] = img64_string
            data["prediction"] = str(topClass[0][0])
            data["prediction_score"] = str(topClass[0][1])
            data["success"] = "success"
                    
    return flask.Response(json.dumps(data), mimetype="text/plain")
コード例 #11
0
 def extract_activations(self, value, index):
     extract_activations = ExtractActivations()
     grid = extract_activations.explain((value, self.setY[index]),
                                        self.model, self.target_layers)
     name = "n_{}_gradients_input.png".format(index)
     extract_activations.save(grid, ".", sg.PATH_ACTIVATION + name)