Exemplo n.º 1
0
 def occlusion_sensitivity(self, value, index):
     for classe in self.labels:
         occlusion_sensitivity = OcclusionSensitivity()
         grid = occlusion_sensitivity.explain((value, self.setY[index]),
                                              self.model,
                                              class_index=classe,
                                              patch_size=20)
         name = "n_{}_".format(index) + str(
             classe) + "_occlusion_sensitivity.png"
         occlusion_sensitivity.save(grid, ".", sg.PATH_OCCLUSION + name)
def test_should_produce_heatmap(convolutional_model, random_data, mocker):
    mocker.patch(
        "tf_explain.core.occlusion_sensitivity.grid_display",
        return_value=mocker.sentinel.grid,
    )

    explainer = OcclusionSensitivity()
    grid = explainer.explain(random_data, convolutional_model, 0, 10)

    assert grid == mocker.sentinel.grid
    def on_epoch_end(self, epoch, logs=None):
        """
        Draw Occlusion Sensitivity outputs at each epoch end to Tensorboard.

        Args:
            epoch (int): Epoch index
            logs (dict): Additional information on epoch
        """
        explainer = OcclusionSensitivity()
        grid = explainer.explain(
            self.validation_data, self.model, self.class_index, self.patch_size
        )

        # Using the file writer, log the reshaped image.
        with self.file_writer.as_default():
            tf.summary.image("Occlusion Sensitivity", np.array([grid]), step=epoch)
def test_should_get_sensitivity_map(convolutional_model, random_data, mocker):
    x, y = random_data
    patch_size = 4

    predict_return_value = np.ones((
        math.ceil(x[0].shape[0] / patch_size) *
        math.ceil(x[0].shape[1] / patch_size),
        1,
    )) * np.expand_dims([0.6, 0.4], axis=0)
    convolutional_model.predict = mocker.MagicMock(
        return_value=predict_return_value)
    mocker.patch(
        "tf_explain.core.occlusion_sensitivity.apply_grey_patch",
        return_value=np.random.randint(
            low=0, high=255, size=convolutional_model.inputs[0].shape[1:]),
    )
    mocker.patch("tf_explain.core.occlusion_sensitivity.cv2.resize",
                 side_effect=lambda x, _: x)

    output = OcclusionSensitivity().get_sensitivity_map(
        model=convolutional_model,
        image=x[0],
        class_index=0,
        patch_size=patch_size)

    expected_output = 0.4 * np.ones(
        (x[0].shape[0] // patch_size, x[0].shape[1] // patch_size))

    np.testing.assert_almost_equal(output, expected_output)
Exemplo n.º 5
0
def visualise_occlusion_sensitivity(model,
                                    class_label,
                                    image_path,
                                    datadir,
                                    patch_size=3,
                                    img_size=32):
    # '''
    # function: to visualize output after a specific layer of model
    # param: layer_name - name of layer of model whose output is needed
    #        model - saved model
    # returns: string file-path to outputplot
    # '''
    image = load_img(image_path, target_size=(img_size, img_size))
    image = img_to_array(image)

    explainer = OcclusionSensitivity()
    grid = explainer.explain(([image], None), model, class_label, patch_size)
    grid = cv.resize(grid, dsize=(10, 10))

    plot_save_path = datadir + "OS.png"
    cv.imwrite(plot_save_path, grid)

    return plot_save_path
Exemplo n.º 6
0
def save_all_explainer(validation_data,
                       model,
                       name_conv,
                       n_conv=1,
                       dir_save_im='./',
                       save_name='outputs'):
    explainerGradCam = GradCAM()
    explainerActiv = ExtractActivations()
    explainerOccl = OcclusionSensitivity()
    explainerSmoothGrad = SmoothGrad()

    for i in range(1, n_conv + 1):
        output = explainerActiv.explain(validation_data, model,
                                        '{}_{}'.format(name_conv, i))
        explainerActiv.save(output, dir_save_im,
                            '{}-activ-conv{}.jpg'.format(save_name, i))

        output = explainerGradCam.explain(validation_data, model,
                                          '{}_{}'.format(name_conv, i), 0)
        explainerGradCam.save(output, dir_save_im,
                              '{}-gradCam0-conv{}.jpg'.format(save_name, i))

    output = explainerSmoothGrad.explain(validation_data, model, 0)
    explainerSmoothGrad.save(output, dir_save_im,
                             '{}-smooth0.jpg'.format(save_name))

    output = explainerSmoothGrad.explain(validation_data, model, 1)
    explainerSmoothGrad.save(output, dir_save_im,
                             '{}-smooth1.jpg'.format(save_name))

    output = explainerOccl.explain(validation_data, model, 0, 5)
    explainerOccl.save(output, dir_save_im,
                       '{}-occlSens0.jpg'.format(save_name))
    output = explainerOccl.explain(validation_data, model, 1, 5)
    explainerOccl.save(output, dir_save_im,
                       '{}-occlSens1.jpg'.format(save_name))
Exemplo n.º 7
0
import tensorflow as tf

from tf_explain.core.occlusion_sensitivity import OcclusionSensitivity

IMAGE_PATH = './cat.jpg'

if __name__ == '__main__':
    model = tf.keras.applications.resnet50.ResNet50(weights='imagenet',
                                                    include_top=True)

    img = tf.keras.preprocessing.image.load_img(IMAGE_PATH,
                                                target_size=(224, 224))
    img = tf.keras.preprocessing.image.img_to_array(img)

    model.summary()
    data = ([img], None)

    tabby_cat_class_index = 281
    explainer = OcclusionSensitivity()
    # Compute Occlusion Sensitivity for patch_size 20
    grid = explainer.explain(data, model, tabby_cat_class_index, 20)
    explainer.save(grid, '.', 'occlusion_sensitivity_20.png')
    # Compute Occlusion Sensitivity for patch_size 10
    grid = explainer.explain(data, model, tabby_cat_class_index, 10)
    explainer.save(grid, '.', 'occlusion_sensitivity_10.png')
Exemplo n.º 8
0
    'Brown headed cowbird F', 'brown headed cowbird M', 'Carolina wren',
    'Common Grakle', 'Downy woodpecker', 'Eatern Bluebird',
    'Eu starling on-duty Ad', 'Eu starling off-duty Ad', 'House finch M',
    'House finch F', 'House sparrow F/Im', 'House sparrow M', 'Mourning dove',
    'Caridal M', 'Cardinal F', 'Norhtern flicker (red)', 'Pileated woodpecker',
    'Red winged blackbird F/Im', 'Red winged blackbird M', 'Squirrel!',
    'Tufted titmouse', 'White breasted nuthatch'
]

file_list = []
label_list = []
filename_classes = "C:/Users/alert/Google Drive/ML/Databases/Birds_dB/Mappings/minimal_bird_list.txt"
LIST_OF_CLASSES = [line.strip() for line in open(filename_classes, 'r')]

explainer = GradCAM()
explainer_occ = OcclusionSensitivity()

model = load_model("E:\\KerasOutput\\run_2019_11_25_07_33\\my_keras_model.h5")
model.summary()

testDir = "C:\\Users\\alert\\Google Drive\\ML\\Electric Bird Caster\\Classified\Carolina wren\\"

files = os.listdir(testDir)
shuffle(files)

# f = "2019-11-12_07-25-14_840.jpg"
for f in files:

    img_pil = image.load_img(path=testDir + f, target_size=(224, 224, 3))

    img = image.img_to_array(img_pil)
Exemplo n.º 9
0
#    print( '\n --- Warning - estimate for LRP might not be correct! --- \n --- Reason: estimated output at pixel={}. ---'.format(pixel_value))

my_data = ( [my_sample], None )

# CV colormaps to choose from:
# See https://docs.opencv.org/master/d3/d50/group__imgproc__colormap.html#gga9a805d8262bcbe273f16be9ea2055a65afdb81862da35ea4912a75f0e8f274aeb
#   for list of CV colormaps
# COLORMAP_HOT  # white to red
# COLORMAP_BONE # white to gray
# COLORMAP_PINK # white to reddish


if method_number==1:  # Occlusion method - results were disappointing.

    ########### Method: Occlusion ################
    explainer = OcclusionSensitivity() # define which type of heatmap we want
    occlusion_patch_width = 20
    heatmap = explainer.explain( my_data, model, my_index, occlusion_patch_width, colormap=cv2.COLORMAP_PINK )

    my_title_text = 'Occlusion - patch = ' + repr(occlusion_patch_width) + ' pixels'
    my_file_text = 'tf_explain_occlusion' + '_' + repr(occlusion_patch_width)  #+ repr{my_row} + '{}_P{}_{}'.format('%i %i %i')

elif method_number==2:
    ########### Method: Grad CAM ################
    ### Apply GradCAM (class activation map) to the last convolution layer of the network.
    explainer = GradCAM()

    # find last convolution layer in model (typically 1x1 convolution)
    n_layers = len(model.layers)
    layer_names = [layer.name for layer in model.layers]
    my_layer_name = ''  # start with empty string
Exemplo n.º 10
0
def occlSensitivity(X, model, class_index, patch_size):
    explainerOccl = OcclusionSensitivity()
    outputs = explainerOccl.explain((X, None), model, class_index, patch_size)
    return cv2.cvtColor(outputs, cv2.COLOR_RGB2BGR)
Exemplo n.º 11
0



####################################  Load Data #####################################
folder    = './drive/MyDrive/processed_data/'
te_data   = np.load(folder+'data_test.npy')
FOV       = np.load(folder+'FOV_te.npy')
te_mask   = np.load(folder+'mask_test.npy')

te_data  = np.expand_dims(te_data, axis=3)


print('Dataset loaded')
#te_data2  = dataset_normalized(te_data)
te_data2 = te_data /255.

explainer = GradCAM()


dataaa = (te_data2[1].reshape(1,512,512,1),None)
grid = explainer.explain(dataaa, model, class_index=1)  

explainer.save(grid, ".", "grad_cam.png")

explainer = OcclusionSensitivity()

grid = explainer.explain(dataaa, model, class_index=1)  

explainer.save(grid, ".", "occ_cam.png")
Exemplo n.º 12
0
def explain_tfexplain():

    global graph
    data = {"success": "failed"}
#      # ensure an image was properly uploaded to our endpoint
    if flask.request.method == "POST":
        if flask.request.form.get("image"):
            
            
            

            explainer = request.args.get("explainer")
            #with graph.as_default():
            model_path = flask.request.form.get("model_path")
            model = load_model(model_path)
           
            
#                  # read the image in PIL format
            image64 = flask.request.form.get("image")
            image = base64.b64decode(image64)
            image = Image.open(io.BytesIO(image))
            image = prepare_image(image, target=(224, 224))
            image = image*(1./255)
            #img = tf.keras.preprocessing.image.img_to_array(image)
            prediction = model.predict(image)
            topClass = getTopXpredictions(prediction, 1)
            print(topClass[0])
            image = np.squeeze(image)
            

            if explainer == "GRADCAM":
                im = ([image], None)
                from tf_explain.core.grad_cam import GradCAM
                exp = GradCAM()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])  
                #exp.save(imgFinal, ".", "grad_cam.png")  
                   
            elif explainer == "OCCLUSIONSENSITIVITY":
                im = ([image], None)
                from tf_explain.core.occlusion_sensitivity import OcclusionSensitivity
                exp = OcclusionSensitivity()   
                imgFinal = exp.explain(im, model,class_index=topClass[0][0], patch_size=10)  
                #exp.save(imgFinal, ".", "grad_cam.png")  

            elif explainer == "GRADIENTSINPUTS":
                im = (np.array([image]), None)
                from tf_explain.core.gradients_inputs import GradientsInputs
                exp = GradientsInputs()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")

            elif explainer == "VANILLAGRADIENTS":
                im = (np.array([image]), None)
                from tf_explain.core.vanilla_gradients import VanillaGradients
                exp = VanillaGradients()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")

            elif explainer == "SMOOTHGRAD":
                im = (np.array([image]), None)
                from tf_explain.core.smoothgrad  import SmoothGrad
                exp = SmoothGrad()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")

            elif explainer == "INTEGRATEDGRADIENTS":
                im = (np.array([image]), None)
                from tf_explain.core.integrated_gradients  import IntegratedGradients
                exp = IntegratedGradients()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")
            
            elif explainer == "ACTIVATIONVISUALIZATION":
                #need some solution to find out and submit layers name
                im = (np.array([image]), None)
                from tf_explain.core.activations  import ExtractActivations
                exp = ExtractActivations()
                imgFinal = exp.explain(im, model, layers_name=["activation_1"])
                #exp.save(imgFinal, ".", "gradients_inputs.png")


         




          

            img = pilimage.fromarray(imgFinal)
            imgByteArr = inputoutput.BytesIO()
            img.save(imgByteArr, format='JPEG')
            imgByteArr = imgByteArr.getvalue()

            img64 = base64.b64encode(imgByteArr)
            img64_string = img64.decode("utf-8")

            data["explanation"] = img64_string
            data["prediction"] = str(topClass[0][0])
            data["prediction_score"] = str(topClass[0][1])
            data["success"] = "success"
                    
    return flask.Response(json.dumps(data), mimetype="text/plain")
def write():
    st.title(' Face Mask Detector')

    net = load_face_detector_and_model()
    model = load_cnn_model()

    selected_option = st.radio("Choose", ('File', 'Webcam'))

    if selected_option == 'File':
        #uploaded_image = st.sidebar.file_uploader("Choose a JPG file", type="jpg")
        uploaded_image = st.sidebar.file_uploader("Choose a JPG file",
                                                  type=FILE_TYPES)
        confidence_value = st.sidebar.slider('Confidence:', 0.0, 1.0, 0.5, 0.1)
        if uploaded_image:
            image1 = Image.open(uploaded_image)
            st.sidebar.image(image1,
                             caption='Uploaded Image.',
                             use_column_width=True)
            #st.sidebar.info('Uploaded image:')
            #st.sidebar.image(uploaded_image, width=240)
            #f = open(uploaded_image, 'rb')

            #file = st.file_uploader("Upload file", type=FILE_TYPES)
            show_file = st.empty()
            if not uploaded_image:
                show_file.info("Please upload a file of type: " +
                               ", ".join(FILE_TYPES))
                return
            file_type = get_file_type(uploaded_image)
            if file_type == FileType.IMAGE:
                show_file.image(image1)
            elif file_type == FileType.PYTHON:
                st.code(uploaded_image.getvalue())
            else:
                data = pd.read_csv(uploaded_image)
                st.dataframe(data.head(10))

            f = open(get_file_name(uploaded_image), 'rb')
            img_bytes = f.read()
            f.close()

            grad_cam_button = st.sidebar.button('Grad CAM')
            patch_size_value = st.sidebar.slider('Patch size:', 10, 90, 20, 10)
            occlusion_sensitivity_button = st.sidebar.button(
                'Occlusion Sensitivity')
            image = cv2.imdecode(np.fromstring(img_bytes, np.uint8), 1)
            #image = cv2.imdecode(np.fromstring(uploaded_image.read(), np.uint8), 1)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            orig = image.copy()
            (h, w) = image.shape[:2]
            blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),
                                         (104.0, 177.0, 123.0))
            net.setInput(blob)
            detections = net.forward()

            for i in range(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]
                if confidence > confidence_value:
                    box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                    (startX, startY, endX, endY) = box.astype("int")
                    (startX, startY) = (max(0, startX), max(0, startY))
                    (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

                    face = image[startY:endY, startX:endX]
                    face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
                    face = cv2.resize(face, (224, 224))
                    face = img_to_array(face)
                    face = preprocess_input(face)
                    expanded_face = np.expand_dims(face, axis=0)

                    (mask, withoutMask) = model.predict(expanded_face)[0]

                    predicted_class = 0
                    label = "No Mask"
                    if mask > withoutMask:
                        label = "Mask"
                        predicted_class = 1

                    color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
                    label = "{}: {:.2f}%".format(label,
                                                 max(mask, withoutMask) * 100)
                    cv2.putText(image, label, (startX, startY - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                    cv2.rectangle(image, (startX, startY), (endX, endY), color,
                                  2)
                    st.image(image, width=640)
                    st.write('### ' + label)

            if grad_cam_button:
                data = ([face], None)
                explainer = GradCAM()
                grad_cam_grid = explainer.explain(data,
                                                  model,
                                                  class_index=predicted_class,
                                                  layer_name="Conv_1")
                st.image(grad_cam_grid)

            if occlusion_sensitivity_button:
                data = ([face], None)
                explainer = OcclusionSensitivity()
                sensitivity_occlusion_grid = explainer.explain(
                    data, model, predicted_class, patch_size_value)
                st.image(sensitivity_occlusion_grid)

    # PROGRAM FOR WEB CAM
    if selected_option == 'Webcam':
        labels_dict = {0: 'without_mask', 1: 'with_mask'}
        color_dict = {0: (0, 0, 255), 1: (0, 255, 0)}
        size = 4

        webcam = cv2.VideoCapture(0)  #Use camera 0
        st.write("Webcam On")
        stframe_cam = st.empty()
        # We load the xml file
        classifier = cv2.CascadeClassifier(
            'src/pages/Services/frecog/haarcascade_frontalface_default.xml')

        while True:
            (rval, im) = webcam.read()
            stframe_cam.image(im)
            # st.write("Webcam Read")
            #if im:
            #ret, framecar = vf.read()
            im = cv2.flip(im, 1, 1)  #Flip to act as a mirror

            # Resize the image to speed up detection
            mini = cv2.resize(im, (im.shape[1] // size, im.shape[0] // size))

            # detect MultiScale / faces
            faces = classifier.detectMultiScale(mini)

            # Draw rectangles around each face
            for f in faces:
                (x, y, w, h) = [v * size
                                for v in f]  #Scale the shapesize backup
                #Save just the rectangle faces in SubRecFaces
                face_img = im[y:y + h, x:x + w]
                resized = cv2.resize(face_img, (150, 150))
                normalized = resized / 255.0
                reshaped = np.reshape(normalized, (1, 150, 150, 3))
                reshaped = np.vstack([reshaped])
                result = model.predict(reshaped)
                #print(result)

                label = np.argmax(result, axis=1)[0]

                cv2.rectangle(im, (x, y), (x + w, y + h), color_dict[label], 2)
                cv2.rectangle(im, (x, y - 40), (x + w, y), color_dict[label],
                              -1)
                cv2.putText(im, labels_dict[label], (x, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)

                # Show the image
                stframe_cam.image('LIVE', im)
                #cv2.imshow('LIVE',   im)
                key = cv2.waitKey(10)
                # if Esc key is press then break out of the loop
                if key == 27:  #The Esc key
                    break
        # Stop video
        webcam.release()

        # Close all started windows
        cv2.destroyAllWindows()


#write()
#uploaded_image.close()