Esempio n. 1
0
def process_image(image_path, output_path):
    explainer = GradCAM()

    img = image.load_img(image_path, target_size=(224, 224))
    img = image.img_to_array(img)
    data = ([img], None)

    original_image = Image.open(image_path)
    width = int(original_image.size[0] / 4)
    height = int(original_image.size[1] / 4)
    original_image.thumbnail((width, height), Image.ANTIALIAS)

    class_index, class_name, prob_value = get_predictions(img, model)
    print('class_index:', class_index)
    heatmap = explainer.explain(data, model, "block5_conv3", class_index)
    #heatmap = explainer.explain(data, model, "block5_conv3", class_index)

    # overlay the text prediction on the heatmap overlay
    heatmap_with_prediction_overlayed = overlay_prediction_on_image(
        array_to_PIL(heatmap), class_name[-1], prob_value[0] * 100, width,
        height)

    # place the images side by side
    joined_image = join_images(original_image,
                               heatmap_with_prediction_overlayed)
    joined_image.save(output_path)
Esempio n. 2
0
 def grad_cam(self, value, index):
     for classe in self.labels:
         grad_cam = GradCAM()
         grid = grad_cam.explain((value, self.setY[index]),
                                 self.model,
                                 class_index=classe,
                                 layer_name=self.target_layers)
         name = "n_{}_".format(index) + str(classe) + "_grad_cam.png"
         grad_cam.save(grid, ".", sg.PATH_GRAD + name)
Esempio n. 3
0
    def on_epoch_end(self, epoch, logs=None):
        """
        Draw GradCAM outputs at each epoch end to Tensorboard.

        Args:
            epoch (int): Epoch index
            logs (dict): Additional information on epoch
        """
        explainer = GradCAM()
        heatmap = explainer.explain(self.validation_data, self.model,
                                    self.layer_name, self.class_index)

        # Using the file writer, log the reshaped image.
        with self.file_writer.as_default():
            tf.summary.image("Grad CAM", np.array([heatmap]), step=epoch)
Esempio n. 4
0
def test_should_explain_output(mocker):
    mock_get_gradients = mocker.patch(
        "tf_explain.core.grad_cam.GradCAM.get_gradients_and_filters",
        return_value=(
            [mocker.sentinel.conv_output_1, mocker.sentinel.conv_output_2],
            [mocker.sentinel.guided_grads_1, mocker.sentinel.guided_grads_2],
        ),
    )
    mocker.sentinel.cam_1.numpy = lambda: mocker.sentinel.cam_1
    mocker.sentinel.cam_2.numpy = lambda: mocker.sentinel.cam_2
    mock_generate_output = mocker.patch(
        "tf_explain.core.grad_cam.GradCAM.generate_ponderated_output",
        return_value=[mocker.sentinel.cam_1, mocker.sentinel.cam_2],
    )
    mocker.patch(
        "tf_explain.core.grad_cam.heatmap_display",
        side_effect=[mocker.sentinel.heatmap_1, mocker.sentinel.heatmap_2],
    )
    mocker.patch("tf_explain.core.grad_cam.grid_display",
                 side_effect=lambda x: x)

    explainer = GradCAM()
    data = ([mocker.sentinel.image_1,
             mocker.sentinel.image_2], mocker.sentinel.labels)
    grid = explainer.explain(
        data,
        mocker.sentinel.model,
        mocker.sentinel.class_index,
        mocker.sentinel.layer_name,
        mocker.sentinel.use_guided_grads,
    )

    for heatmap, expected_heatmap in zip(
            grid, [mocker.sentinel.heatmap_1, mocker.sentinel.heatmap_2]):
        assert heatmap == expected_heatmap

    mock_get_gradients.assert_called_once_with(
        mocker.sentinel.model,
        [mocker.sentinel.image_1, mocker.sentinel.image_2],
        mocker.sentinel.layer_name,
        mocker.sentinel.class_index,
        mocker.sentinel.use_guided_grads,
    )

    mock_generate_output.assert_called_once_with(
        [mocker.sentinel.conv_output_1, mocker.sentinel.conv_output_2],
        [mocker.sentinel.guided_grads_1, mocker.sentinel.guided_grads_2],
    )
Esempio n. 5
0
def predict_with_gradcam(model,
                         imgNP,
                         labels,
                         selected_labels,
                         layer_name='bn',
                         expected=None,
                         predictions=None,
                         showPlot=False):
    '''
    TODO: Explainer requires model to sent for every call. This may be expensive.
          Need to find a way to Initialize the model or share with predict engine. 
          Else the memory required may double.
    '''
    #preprocessed_input = load_image(img, image_dir, df)
    if predictions is None:
        predictions = model.predict(imgNP)

    #print("Loading original image")
    plt.figure(figsize=(15, 10))
    plt.subplot(151)
    plt.title(("Original - " +
               expected) if expected is not None else "Original")
    plt.axis('off')
    plt.imshow(imgNP[0], cmap='gray')

    explainer = GradCAM()

    j = 1
    for i in range(len(labels)):
        if labels[i] in selected_labels:
            #print("Generating gradcam for class",labels[i])
            #gradcam = grad_cam(model, imgNP, i, layer_name)
            #print("the class index is :", i)
            gradcam = explainer.explain(validation_data=(imgNP, labels),
                                        model=model,
                                        layer_name=layer_name,
                                        class_index=i)
            plt.subplot(151 + j)
            #plt.title(labels[i]+": p="+str(predictions[0][i]))
            plt.title("{:}: p={:.2f}%".format(labels[i],
                                              predictions[0][i] * 100))
            plt.axis('off')
            plt.imshow(imgNP[0], cmap='gray')
            plt.imshow(gradcam, cmap='jet', alpha=min(0.5, predictions[0][i]))
            j += 1
    if showPlot:
        plt.show()
Esempio n. 6
0
def test_should_raise_error_if_grad_cam_layer_cannot_be_found():
    model = tf.keras.Sequential([
        tf.keras.layers.Dense(10, input_shape=(10, ), name="dense_1"),
        tf.keras.layers.Dense(1, name="dense_2"),
    ])

    with pytest.raises(ValueError):
        layer_name = GradCAM.infer_grad_cam_target_layer(model)
Esempio n. 7
0
def test_should_produce_gradients_and_filters(convolutional_model,
                                              random_data):
    images, _ = random_data
    layer_name = "activation_1"
    output, grads = GradCAM.get_gradients_and_filters(convolutional_model,
                                                      images, layer_name, 0)

    assert output.shape == [len(images)] + list(
        convolutional_model.get_layer(layer_name).output.shape[1:])
    assert grads.shape == output.shape
Esempio n. 8
0
def test_should_ponderate_output():
    grad = np.concatenate(
        [np.ones((3, 3, 1)), 2 * np.ones((3, 3, 1)), 3 * np.ones((3, 3, 1))],
        axis=-1)

    output = np.concatenate(
        [np.ones((3, 3, 1)), 2 * np.ones((3, 3, 1)), 4 * np.ones((3, 3, 1))],
        axis=-1)

    ponderated_output = GradCAM.ponderate_output(output, grad)

    ponderated_sum = 1 * 1 + 2 * 2 + 3 * 4
    expected_output = ponderated_sum * np.ones((3, 3))

    np.testing.assert_almost_equal(expected_output, ponderated_output)
Esempio n. 9
0
def save_all_explainer(validation_data,
                       model,
                       name_conv,
                       n_conv=1,
                       dir_save_im='./',
                       save_name='outputs'):
    explainerGradCam = GradCAM()
    explainerActiv = ExtractActivations()
    explainerOccl = OcclusionSensitivity()
    explainerSmoothGrad = SmoothGrad()

    for i in range(1, n_conv + 1):
        output = explainerActiv.explain(validation_data, model,
                                        '{}_{}'.format(name_conv, i))
        explainerActiv.save(output, dir_save_im,
                            '{}-activ-conv{}.jpg'.format(save_name, i))

        output = explainerGradCam.explain(validation_data, model,
                                          '{}_{}'.format(name_conv, i), 0)
        explainerGradCam.save(output, dir_save_im,
                              '{}-gradCam0-conv{}.jpg'.format(save_name, i))

    output = explainerSmoothGrad.explain(validation_data, model, 0)
    explainerSmoothGrad.save(output, dir_save_im,
                             '{}-smooth0.jpg'.format(save_name))

    output = explainerSmoothGrad.explain(validation_data, model, 1)
    explainerSmoothGrad.save(output, dir_save_im,
                             '{}-smooth1.jpg'.format(save_name))

    output = explainerOccl.explain(validation_data, model, 0, 5)
    explainerOccl.save(output, dir_save_im,
                       '{}-occlSens0.jpg'.format(save_name))
    output = explainerOccl.explain(validation_data, model, 1, 5)
    explainerOccl.save(output, dir_save_im,
                       '{}-occlSens1.jpg'.format(save_name))
Esempio n. 10
0
def test_should_generate_ponderated_output(mocker):
    mocker.patch(
        "tf_explain.core.grad_cam.GradCAM.ponderate_output",
        side_effect=[
            mocker.sentinel.ponderated_1, mocker.sentinel.ponderated_2
        ],
    )

    expected_output = [
        mocker.sentinel.ponderated_1, mocker.sentinel.ponderated_2
    ]

    outputs = [mocker.sentinel.output_1, mocker.sentinel.output_2]
    grads = [mocker.sentinel.grads_1, mocker.sentinel.grads_2]

    output = GradCAM.generate_ponderated_output(outputs, grads)

    for real, expected in zip(output, expected_output):
        assert real == expected
Esempio n. 11
0
    def explain(self,
                model_input,
                model,
                layer_name,
                class_index,
                colormap=cv2.COLORMAP_INFERNO):
        """
        Compute GradCAM for a specific class index.

        Args:
            model_input (tf.tensor): Data to perform the evaluation on.
            model (tf.keras.Model): tf.keras model to inspect
            layer_name (str): Targeted layer for GradCAM
            class_index (int, None): Index of targeted class
            colormap (int): Used in parent method signature, but ignored here

        Returns:
            tf.cams: The gradcams
        """
        outputs, guided_grads, predictions = FEGradCAM.get_gradients_and_filters(
            model, model_input, layer_name, class_index)
        cams = GradCAM.generate_ponderated_output(outputs, guided_grads)

        input_min = tf.reduce_min(model_input)
        input_max = tf.reduce_max(model_input)

        # Need to move input image into the 0-255 range
        adjust_sum = 0.0
        adjust_factor = 1.0
        if input_min < 0:
            adjust_sum = 1.0
            adjust_factor /= 2.0
        if input_max <= 1:
            adjust_factor *= 255.0

        heatmaps = [
            heatmap_display(cam.numpy(),
                            (inp.numpy() + adjust_sum) * adjust_factor,
                            colormap) for cam, inp in zip(cams, model_input)
        ]

        return heatmaps, predictions
Esempio n. 12
0

if method_number==1:  # Occlusion method - results were disappointing.

    ########### Method: Occlusion ################
    explainer = OcclusionSensitivity() # define which type of heatmap we want
    occlusion_patch_width = 20
    heatmap = explainer.explain( my_data, model, my_index, occlusion_patch_width, colormap=cv2.COLORMAP_PINK )

    my_title_text = 'Occlusion - patch = ' + repr(occlusion_patch_width) + ' pixels'
    my_file_text = 'tf_explain_occlusion' + '_' + repr(occlusion_patch_width)  #+ repr{my_row} + '{}_P{}_{}'.format('%i %i %i')

elif method_number==2:
    ########### Method: Grad CAM ################
    ### Apply GradCAM (class activation map) to the last convolution layer of the network.
    explainer = GradCAM()

    # find last convolution layer in model (typically 1x1 convolution)
    n_layers = len(model.layers)
    layer_names = [layer.name for layer in model.layers]
    my_layer_name = ''  # start with empty string
    for i_layer in range(n_layers):
        this_name = layer_names[i_layer]
        if "conv" in this_name:
            my_layer_name = this_name
    print(my_layer_name)

    # Generate result
    heatmap = explainer.explain(my_data, model, my_layer_name, my_index, colormap=cv2.COLORMAP_HOT )

    my_title_text = 'GradCam - Layer:' + my_layer_name
Esempio n. 13
0
            (mask, withoutMask) = model.predict(expanded_face)[0]

            predicted_class = 0
            label = "No Mask"
            if mask > withoutMask:
                label = "Mask"
                predicted_class = 1

            color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
            label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
            cv2.putText(image, label, (startX, startY - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
            cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
            st.image(image, width=640)
            st.write('### ' + label)

    if grad_cam_button:
        data = ([face], None)
        explainer = GradCAM()
        grad_cam_grid = explainer.explain(
            data, model, class_index=predicted_class, layer_name="Conv_1"
        )
        st.image(grad_cam_grid)

    if occlusion_sensitivity_button:
        data = ([face], None)
        explainer = OcclusionSensitivity()
        sensitivity_occlusion_grid = explainer.explain(data, model, predicted_class, patch_size_value)
        st.image(sensitivity_occlusion_grid)
Esempio n. 14
0
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')

test_loss, test_acc = model.evaluate(images_vali, labels_vali, verbose=2)

print(test_acc)
"""Our simple CNN has achieved a test accuracy of over 70%. Not bad for a few lines of code! For another CNN style, 
see an example using the Keras subclassing API and a `tf.GradientTape` 
[here](https://www.tensorflow.org/tutorials/quickstart/advanced)."""

#%%
# Instantiation of the explainer
explainer = GradCAM()

for class_index in range(7):
    ind_in_images_vali = np.where(labels_vali == class_index)[0]

    img_explain = images_vali[ind_in_images_vali[0:25], :, :, :]
    # print(img_single.size)

    data = (img_explain, None)

    # Save output
    output_dir = '.'
    output_name = 'grad_cam_class_%d.png' % class_index

    output = explainer.explain(data, model, "conv3", class_index)
    explainer.save(output, output_dir, output_name)
Esempio n. 15
0
    'Do not see a bird here :/', 'Black capped chickadee', 'Blue jay',
    'Brown headed cowbird F', 'brown headed cowbird M', 'Carolina wren',
    'Common Grakle', 'Downy woodpecker', 'Eatern Bluebird',
    'Eu starling on-duty Ad', 'Eu starling off-duty Ad', 'House finch M',
    'House finch F', 'House sparrow F/Im', 'House sparrow M', 'Mourning dove',
    'Caridal M', 'Cardinal F', 'Norhtern flicker (red)', 'Pileated woodpecker',
    'Red winged blackbird F/Im', 'Red winged blackbird M', 'Squirrel!',
    'Tufted titmouse', 'White breasted nuthatch'
]

file_list = []
label_list = []
filename_classes = "C:/Users/alert/Google Drive/ML/Databases/Birds_dB/Mappings/minimal_bird_list.txt"
LIST_OF_CLASSES = [line.strip() for line in open(filename_classes, 'r')]

explainer = GradCAM()
explainer_occ = OcclusionSensitivity()

model = load_model("E:\\KerasOutput\\run_2019_11_25_07_33\\my_keras_model.h5")
model.summary()

testDir = "C:\\Users\\alert\\Google Drive\\ML\\Electric Bird Caster\\Classified\Carolina wren\\"

files = os.listdir(testDir)
shuffle(files)

# f = "2019-11-12_07-25-14_840.jpg"
for f in files:

    img_pil = image.load_img(path=testDir + f, target_size=(224, 224, 3))
Esempio n. 16
0
    expanded_image = np.expand_dims(image, axis=0)
    
    class_names = ['Focal Leakage', 'Normal', 'Papilledema', 'Punctuate Leakage', 'Vessel Leakage']
    classes = model.predict(expanded_image)[0]
    (focal, normal, papilledema, punctuate, vessel) = classes
    pred = list(classes)
    for i in pred:
        x =  max(pred)
        y =  pred.index(x)
    predicted_class_index = y
    classes = class_names[y]
    x = int(x*100)
    st.subheader(classes + ':  ' + str(x) + '%')
    
    
    
    if grad_cam_button:
        data = ([image], None)
        explainer = GradCAM()
        grad_cam_grid = explainer.explain(
            data, model, class_index=y, layer_name="mixed7"
        )
        st.image(grad_cam_grid, width = 200)

    if occlusion_sensitivity_button:
        data = ([image], None)
        explainer = OcclusionSensitivity()
        sensitivity_occlusion_grid = explainer.explain(data, model, y , patch_size_value)
        st.image(sensitivity_occlusion_grid, width = 200)

Esempio n. 17
0
def test_should_infer_layer_name_for_grad_cam(model, expected_layer_name):
    layer_name = GradCAM.infer_grad_cam_target_layer(model)

    assert layer_name == expected_layer_name
Esempio n. 18
0
from tensorflow.keras.preprocessing.image import ImageDataGenerator

from tensorflow.keras.applications.inception_v3 import preprocess_input
from matplotlib import pyplot as plt
import os
import skimage
from tensorflow.keras.preprocessing import image
from random import shuffle
from tf_explain.core.grad_cam import GradCAM

# model.summary()
model = load_model("E:\\KerasOutput\\run_2019_11_25_07_33\\my_keras_model.h5")

doImageGen = True

explainer = GradCAM()

if doImageGen:
    eval_test_datagen = ImageDataGenerator(
        preprocessing_function=preprocess_input)

    # testDir = 'C:/Users/alert/Google Drive/ML/Databases/Birds_dB/Keras2/test/'
    testDir = "E:\\ML Training Data\\Keras\\test\\"
    # 'C:\\Users\\alert\\Google Drive\\ML\\Databases\\Photo Booth User Group Photos\\'
    # test_path = "E:\\ML Training Data\\Keras\\eval\\"

    label_list = []
    filename_classes = "C:/Users/alert/Google Drive/ML/Databases/Birds_dB/Mappings/minimal_bird_list.txt"
    LIST_OF_CLASSES = [line.strip() for line in open(filename_classes, 'r')]

    test_generator = eval_test_datagen.flow_from_directory(
Esempio n. 19
0
def explain_tfexplain():

    global graph
    data = {"success": "failed"}
#      # ensure an image was properly uploaded to our endpoint
    if flask.request.method == "POST":
        if flask.request.form.get("image"):
            
            
            

            explainer = request.args.get("explainer")
            #with graph.as_default():
            model_path = flask.request.form.get("model_path")
            model = load_model(model_path)
           
            
#                  # read the image in PIL format
            image64 = flask.request.form.get("image")
            image = base64.b64decode(image64)
            image = Image.open(io.BytesIO(image))
            image = prepare_image(image, target=(224, 224))
            image = image*(1./255)
            #img = tf.keras.preprocessing.image.img_to_array(image)
            prediction = model.predict(image)
            topClass = getTopXpredictions(prediction, 1)
            print(topClass[0])
            image = np.squeeze(image)
            

            if explainer == "GRADCAM":
                im = ([image], None)
                from tf_explain.core.grad_cam import GradCAM
                exp = GradCAM()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])  
                #exp.save(imgFinal, ".", "grad_cam.png")  
                   
            elif explainer == "OCCLUSIONSENSITIVITY":
                im = ([image], None)
                from tf_explain.core.occlusion_sensitivity import OcclusionSensitivity
                exp = OcclusionSensitivity()   
                imgFinal = exp.explain(im, model,class_index=topClass[0][0], patch_size=10)  
                #exp.save(imgFinal, ".", "grad_cam.png")  

            elif explainer == "GRADIENTSINPUTS":
                im = (np.array([image]), None)
                from tf_explain.core.gradients_inputs import GradientsInputs
                exp = GradientsInputs()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")

            elif explainer == "VANILLAGRADIENTS":
                im = (np.array([image]), None)
                from tf_explain.core.vanilla_gradients import VanillaGradients
                exp = VanillaGradients()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")

            elif explainer == "SMOOTHGRAD":
                im = (np.array([image]), None)
                from tf_explain.core.smoothgrad  import SmoothGrad
                exp = SmoothGrad()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")

            elif explainer == "INTEGRATEDGRADIENTS":
                im = (np.array([image]), None)
                from tf_explain.core.integrated_gradients  import IntegratedGradients
                exp = IntegratedGradients()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")
            
            elif explainer == "ACTIVATIONVISUALIZATION":
                #need some solution to find out and submit layers name
                im = (np.array([image]), None)
                from tf_explain.core.activations  import ExtractActivations
                exp = ExtractActivations()
                imgFinal = exp.explain(im, model, layers_name=["activation_1"])
                #exp.save(imgFinal, ".", "gradients_inputs.png")


         




          

            img = pilimage.fromarray(imgFinal)
            imgByteArr = inputoutput.BytesIO()
            img.save(imgByteArr, format='JPEG')
            imgByteArr = imgByteArr.getvalue()

            img64 = base64.b64encode(imgByteArr)
            img64_string = img64.decode("utf-8")

            data["explanation"] = img64_string
            data["prediction"] = str(topClass[0][0])
            data["prediction_score"] = str(topClass[0][1])
            data["success"] = "success"
                    
    return flask.Response(json.dumps(data), mimetype="text/plain")
Esempio n. 20
0
import tensorflow as tf

from tf_explain.core.grad_cam import GradCAM

IMAGE_PATH = "./cat.jpg"

if __name__ == "__main__":
    model = tf.keras.applications.vgg16.VGG16(weights="imagenet",
                                              include_top=True)

    img = tf.keras.preprocessing.image.load_img(IMAGE_PATH,
                                                target_size=(224, 224))
    img = tf.keras.preprocessing.image.img_to_array(img)

    model.summary()
    data = ([img], None)

    tabby_cat_class_index = 281
    explainer = GradCAM()
    # Compute GradCAM on VGG16
    grid = explainer.explain(data,
                             model,
                             class_index=tabby_cat_class_index,
                             layer_name="block5_conv3")
    explainer.save(grid, ".", "grad_cam.png")
Esempio n. 21
0
img /= 255.0

# Show image
plt.figure()
plt.imshow(img)
plt.axis('off')
plt.show()

# Reshape data
img = img.reshape(-1, *img.shape)

# Prepage data
data = (img, None)
label_idx = np.where(np.array(classes) == label)[0][0]

explainer = GradCAM()
grid = explainer.explain(validation_data=data,
                         model=model.model,
                         class_index=label_idx,
                         layer_name='conv5_block3_3_conv')

plt.figure()
plt.imshow(grid)
plt.axis('off')
plt.show()

explainer = OcclusionSensitivity()
grid = explainer.explain(validation_data=data,
                         model=model.model,
                         class_index=label_idx,
                         patch_size=10)
Esempio n. 22
0
import tensorflow as tf

from tf_explain.core.grad_cam import GradCAM

IMAGE_PATH = './cat.jpg'

if __name__ == '__main__':
    model = tf.keras.applications.vgg16.VGG16(weights='imagenet',
                                              include_top=True)

    img = tf.keras.preprocessing.image.load_img(IMAGE_PATH,
                                                target_size=(224, 224))
    img = tf.keras.preprocessing.image.img_to_array(img)

    model.summary()
    data = ([img], None)

    tabby_cat_class_index = 281
    explainer = GradCAM()
    # Compute GradCAM on VGG16
    grid = explainer.explain(data, model, 'block5_conv3',
                             tabby_cat_class_index)
    explainer.save(grid, '.', 'grad_cam.png')
Esempio n. 23
0
import tensorflow as tf

from tf_explain.core.grad_cam import GradCAM

IMAGE_PATH = "./cat.jpg"

if __name__ == "__main__":
    model = tf.keras.applications.vgg16.VGG16(weights="imagenet",
                                              include_top=True)

    img = tf.keras.preprocessing.image.load_img(IMAGE_PATH,
                                                target_size=(224, 224))
    img = tf.keras.preprocessing.image.img_to_array(img)

    print(img.shape)

    model.summary()
    data = ([img], None)

    tabby_cat_class_index = 281
    explainer = GradCAM()
    # Compute GradCAM on VGG16
    grid = explainer.explain(data, model, "block5_conv3",
                             tabby_cat_class_index)
    explainer.save(grid, ".", "grad_cam.png")
Esempio n. 24
0
plt.ylim([0.5, 1])
plt.legend(loc='lower right')

test_loss, test_acc = model.evaluate(
    x=[data_input1_vali, data_input2_vali, data_input3_vali, data_input4_vali],
    y=[data_output_vali],
    verbose=1)

print(test_acc)
"""Our simple CNN has achieved a test accuracy of over 70%. Not bad for a few lines of code! For another CNN style, 
see an example using the Keras subclassing API and a `tf.GradientTape` 
[here](https://www.tensorflow.org/tutorials/quickstart/advanced)."""

#%%
# Instantiation of the explainer
explainer = GradCAM()

for class_index in range(7):
    for input_index, name_of_conv in zip(
            range(4),
        ['Conv_time_3', 'Conv_freq_3', 'Conv_spec_3', 'Conv_recc_3']):
        ind_in_images_vali = np.where(labels_vali == class_index)[0]

        img_explain1 = data_input1_vali[
            ind_in_images_vali[0:25], :, :, :].astype('float32')
        img_explain2 = data_input2_vali[
            ind_in_images_vali[0:25], :, :, :].astype('float32')
        img_explain3 = data_input3_vali[
            ind_in_images_vali[0:25], :, :, :].astype('float32')
        img_explain4 = data_input4_vali[
            ind_in_images_vali[0:25], :, :, :].astype('float32')
Esempio n. 25
0
def gradCAM(X, model, layer_name, class_index):
    explainerGradCam = GradCAM()
    outputs = explainerGradCam.explain((X, None), model, layer_name,
                                       class_index)
    return cv2.cvtColor(outputs, cv2.COLOR_RGB2BGR)
def write():
    st.title(' Face Mask Detector')

    net = load_face_detector_and_model()
    model = load_cnn_model()

    selected_option = st.radio("Choose", ('File', 'Webcam'))

    if selected_option == 'File':
        #uploaded_image = st.sidebar.file_uploader("Choose a JPG file", type="jpg")
        uploaded_image = st.sidebar.file_uploader("Choose a JPG file",
                                                  type=FILE_TYPES)
        confidence_value = st.sidebar.slider('Confidence:', 0.0, 1.0, 0.5, 0.1)
        if uploaded_image:
            image1 = Image.open(uploaded_image)
            st.sidebar.image(image1,
                             caption='Uploaded Image.',
                             use_column_width=True)
            #st.sidebar.info('Uploaded image:')
            #st.sidebar.image(uploaded_image, width=240)
            #f = open(uploaded_image, 'rb')

            #file = st.file_uploader("Upload file", type=FILE_TYPES)
            show_file = st.empty()
            if not uploaded_image:
                show_file.info("Please upload a file of type: " +
                               ", ".join(FILE_TYPES))
                return
            file_type = get_file_type(uploaded_image)
            if file_type == FileType.IMAGE:
                show_file.image(image1)
            elif file_type == FileType.PYTHON:
                st.code(uploaded_image.getvalue())
            else:
                data = pd.read_csv(uploaded_image)
                st.dataframe(data.head(10))

            f = open(get_file_name(uploaded_image), 'rb')
            img_bytes = f.read()
            f.close()

            grad_cam_button = st.sidebar.button('Grad CAM')
            patch_size_value = st.sidebar.slider('Patch size:', 10, 90, 20, 10)
            occlusion_sensitivity_button = st.sidebar.button(
                'Occlusion Sensitivity')
            image = cv2.imdecode(np.fromstring(img_bytes, np.uint8), 1)
            #image = cv2.imdecode(np.fromstring(uploaded_image.read(), np.uint8), 1)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            orig = image.copy()
            (h, w) = image.shape[:2]
            blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),
                                         (104.0, 177.0, 123.0))
            net.setInput(blob)
            detections = net.forward()

            for i in range(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]
                if confidence > confidence_value:
                    box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                    (startX, startY, endX, endY) = box.astype("int")
                    (startX, startY) = (max(0, startX), max(0, startY))
                    (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

                    face = image[startY:endY, startX:endX]
                    face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
                    face = cv2.resize(face, (224, 224))
                    face = img_to_array(face)
                    face = preprocess_input(face)
                    expanded_face = np.expand_dims(face, axis=0)

                    (mask, withoutMask) = model.predict(expanded_face)[0]

                    predicted_class = 0
                    label = "No Mask"
                    if mask > withoutMask:
                        label = "Mask"
                        predicted_class = 1

                    color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
                    label = "{}: {:.2f}%".format(label,
                                                 max(mask, withoutMask) * 100)
                    cv2.putText(image, label, (startX, startY - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                    cv2.rectangle(image, (startX, startY), (endX, endY), color,
                                  2)
                    st.image(image, width=640)
                    st.write('### ' + label)

            if grad_cam_button:
                data = ([face], None)
                explainer = GradCAM()
                grad_cam_grid = explainer.explain(data,
                                                  model,
                                                  class_index=predicted_class,
                                                  layer_name="Conv_1")
                st.image(grad_cam_grid)

            if occlusion_sensitivity_button:
                data = ([face], None)
                explainer = OcclusionSensitivity()
                sensitivity_occlusion_grid = explainer.explain(
                    data, model, predicted_class, patch_size_value)
                st.image(sensitivity_occlusion_grid)

    # PROGRAM FOR WEB CAM
    if selected_option == 'Webcam':
        labels_dict = {0: 'without_mask', 1: 'with_mask'}
        color_dict = {0: (0, 0, 255), 1: (0, 255, 0)}
        size = 4

        webcam = cv2.VideoCapture(0)  #Use camera 0
        st.write("Webcam On")
        stframe_cam = st.empty()
        # We load the xml file
        classifier = cv2.CascadeClassifier(
            'src/pages/Services/frecog/haarcascade_frontalface_default.xml')

        while True:
            (rval, im) = webcam.read()
            stframe_cam.image(im)
            # st.write("Webcam Read")
            #if im:
            #ret, framecar = vf.read()
            im = cv2.flip(im, 1, 1)  #Flip to act as a mirror

            # Resize the image to speed up detection
            mini = cv2.resize(im, (im.shape[1] // size, im.shape[0] // size))

            # detect MultiScale / faces
            faces = classifier.detectMultiScale(mini)

            # Draw rectangles around each face
            for f in faces:
                (x, y, w, h) = [v * size
                                for v in f]  #Scale the shapesize backup
                #Save just the rectangle faces in SubRecFaces
                face_img = im[y:y + h, x:x + w]
                resized = cv2.resize(face_img, (150, 150))
                normalized = resized / 255.0
                reshaped = np.reshape(normalized, (1, 150, 150, 3))
                reshaped = np.vstack([reshaped])
                result = model.predict(reshaped)
                #print(result)

                label = np.argmax(result, axis=1)[0]

                cv2.rectangle(im, (x, y), (x + w, y + h), color_dict[label], 2)
                cv2.rectangle(im, (x, y - 40), (x + w, y), color_dict[label],
                              -1)
                cv2.putText(im, labels_dict[label], (x, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)

                # Show the image
                stframe_cam.image('LIVE', im)
                #cv2.imshow('LIVE',   im)
                key = cv2.waitKey(10)
                # if Esc key is press then break out of the loop
                if key == 27:  #The Esc key
                    break
        # Stop video
        webcam.release()

        # Close all started windows
        cv2.destroyAllWindows()


#write()
#uploaded_image.close()
Esempio n. 27
0



####################################  Load Data #####################################
folder    = './drive/MyDrive/processed_data/'
te_data   = np.load(folder+'data_test.npy')
FOV       = np.load(folder+'FOV_te.npy')
te_mask   = np.load(folder+'mask_test.npy')

te_data  = np.expand_dims(te_data, axis=3)


print('Dataset loaded')
#te_data2  = dataset_normalized(te_data)
te_data2 = te_data /255.

explainer = GradCAM()


dataaa = (te_data2[1].reshape(1,512,512,1),None)
grid = explainer.explain(dataaa, model, class_index=1)  

explainer.save(grid, ".", "grad_cam.png")

explainer = OcclusionSensitivity()

grid = explainer.explain(dataaa, model, class_index=1)  

explainer.save(grid, ".", "occ_cam.png")